summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2004-10-18 20:50:22 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-10-18 20:50:22 -0700
commit098fc560ef2bbd1bde80845c898fa95db616eb6c (patch)
treeca722c6fdbdffe9b7cfd31d61e8f4aae906a319c
parentbffe01870598b7a0a77073e25ee94e026bc98e6b (diff)
parent2a136606fe21b603a0ce484fc578f862f8e8384d (diff)
Trivial Makefile merge
-rw-r--r--CREDITS2
-rw-r--r--Documentation/cdrom/00-INDEX2
-rw-r--r--Documentation/cdrom/packet-writing.txt86
-rw-r--r--Documentation/fb/matroxfb.txt7
-rw-r--r--Documentation/fb/vesafb.txt7
-rw-r--r--Documentation/filesystems/ntfs.txt177
-rw-r--r--Documentation/filesystems/vfs.txt197
-rw-r--r--Documentation/i386/zero-page.txt3
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/keys.txt836
-rw-r--r--Documentation/usb/sn9c102.txt109
-rw-r--r--Documentation/x86_64/boot-options.txt16
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/alpha/kernel/time.c3
-rw-r--r--arch/arm/kernel/time.c3
-rw-r--r--arch/arm/mach-pxa/pxa27x.c44
-rw-r--r--arch/arm26/kernel/time.c3
-rw-r--r--arch/arm26/machine/dma.c2
-rw-r--r--arch/cris/arch-v10/kernel/fasttimer.c31
-rw-r--r--arch/cris/arch-v10/kernel/time.c3
-rw-r--r--arch/h8300/kernel/time.c3
-rw-r--r--arch/i386/boot/video.S4
-rw-r--r--arch/i386/kernel/acpi/boot.c5
-rw-r--r--arch/i386/kernel/cpu/mcheck/k7.c1
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c1
-rw-r--r--arch/i386/kernel/cpu/mcheck/p4.c3
-rw-r--r--arch/i386/kernel/cpu/mcheck/p5.c1
-rw-r--r--arch/i386/kernel/cpu/mcheck/p6.c1
-rw-r--r--arch/i386/kernel/cpu/mcheck/winchip.c1
-rw-r--r--arch/i386/kernel/entry.S5
-rw-r--r--arch/i386/kernel/microcode.c31
-rw-r--r--arch/i386/kernel/mpparse.c11
-rw-r--r--arch/i386/kernel/numaq.c1
-rw-r--r--arch/i386/kernel/srat.c1
-rw-r--r--arch/i386/kernel/timers/common.c1
-rw-r--r--arch/i386/mach-default/topology.c1
-rw-r--r--arch/i386/mm/discontig.c1
-rw-r--r--arch/i386/mm/hugetlbpage.c1
-rw-r--r--arch/ia64/kernel/acpi.c1
-rw-r--r--arch/ia64/kernel/time.c8
-rw-r--r--arch/ia64/mm/discontig.c1
-rw-r--r--arch/m68k/kernel/time.c3
-rw-r--r--arch/m68k/sun3/sun3ints.c3
-rw-r--r--arch/m68knommu/kernel/time.c3
-rw-r--r--arch/mips/au1000/common/time.c9
-rw-r--r--arch/mips/au1000/db1x00/mirage_ts.c2
-rw-r--r--arch/mips/baget/time.c3
-rw-r--r--arch/mips/galileo-boards/ev96100/time.c3
-rw-r--r--arch/mips/gt64120/common/time.c3
-rw-r--r--arch/mips/kernel/time.c3
-rw-r--r--arch/mips/momentum/ocelot_g/gt-irq.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/parisc/kernel/time.c2
-rw-r--r--arch/ppc/kernel/time.c3
-rw-r--r--arch/ppc64/kernel/sysfs.c2
-rw-r--r--arch/ppc64/kernel/time.c3
-rw-r--r--arch/ppc64/mm/hugetlbpage.c1
-rw-r--r--arch/ppc64/mm/numa.c1
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/sh/kernel/time.c3
-rw-r--r--arch/sh64/kernel/time.c3
-rw-r--r--arch/sparc/kernel/pcic.c3
-rw-r--r--arch/sparc/kernel/time.c4
-rw-r--r--arch/sparc64/kernel/time.c12
-rw-r--r--arch/um/kernel/time_kern.c2
-rw-r--r--arch/v850/kernel/time.c3
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c10
-rw-r--r--arch/x86_64/ia32/sys_ia32.c8
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/mpparse.c13
-rw-r--r--arch/x86_64/kernel/setup64.c61
-rw-r--r--arch/x86_64/kernel/time.c3
-rw-r--r--arch/x86_64/kernel/vsyscall.c1
-rw-r--r--arch/x86_64/mm/k8topology.c1
-rw-r--r--arch/x86_64/mm/numa.c6
-rw-r--r--drivers/block/Kconfig33
-rw-r--r--drivers/block/Kconfig.iosched8
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/as-iosched.c122
-rw-r--r--drivers/block/cfq-iosched.c1574
-rw-r--r--drivers/block/cpqarray.c14
-rw-r--r--drivers/block/deadline-iosched.c136
-rw-r--r--drivers/block/elevator.c318
-rw-r--r--drivers/block/ll_rw_blk.c253
-rw-r--r--drivers/block/noop-iosched.c33
-rw-r--r--drivers/block/pktcdvd.c2679
-rw-r--r--drivers/block/ub.c165
-rw-r--r--drivers/cdrom/Makefile1
-rw-r--r--drivers/cdrom/cdrom.c86
-rw-r--r--drivers/char/agp/intel-agp.c10
-rw-r--r--drivers/char/agp/sis-agp.c4
-rw-r--r--drivers/char/amiserial.c7
-rw-r--r--drivers/char/cyclades.c11
-rw-r--r--drivers/char/drm/radeon_mem.c8
-rw-r--r--drivers/char/dtlk.c19
-rw-r--r--drivers/char/epca.c3
-rw-r--r--drivers/char/esp.c56
-rw-r--r--drivers/char/ftape/lowlevel/fdc-io.c2
-rw-r--r--drivers/char/ftape/lowlevel/ftape-io.c20
-rw-r--r--drivers/char/ftape/zftape/zftape-buffers.c4
-rw-r--r--drivers/char/generic_serial.c7
-rw-r--r--drivers/char/hvc_console.c5
-rw-r--r--drivers/char/ip2main.c3
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c1
-rw-r--r--drivers/char/isicom.c3
-rw-r--r--drivers/char/istallion.c24
-rw-r--r--drivers/char/lcd.c4
-rw-r--r--drivers/char/moxa.c3
-rw-r--r--drivers/char/mxser.c4
-rw-r--r--drivers/char/pcmcia/synclink_cs.c12
-rw-r--r--drivers/char/pcxx.c3
-rw-r--r--drivers/char/rio/rio_linux.c6
-rw-r--r--drivers/char/riscom8.c7
-rw-r--r--drivers/char/rocket.c6
-rw-r--r--drivers/char/serial167.c3
-rw-r--r--drivers/char/specialix.c6
-rw-r--r--drivers/char/stallion.c25
-rw-r--r--drivers/char/sx.c2
-rw-r--r--drivers/char/synclink.c13
-rw-r--r--drivers/char/synclinkmp.c15
-rw-r--r--drivers/char/tpqic02.c3
-rw-r--r--drivers/char/tty_io.c4
-rw-r--r--drivers/char/vt.c4
-rw-r--r--drivers/ide/ide-cd.c8
-rw-r--r--drivers/ieee1394/nodemgr.c5
-rw-r--r--drivers/ieee1394/sbp2.c11
-rw-r--r--drivers/ieee1394/sbp2.h18
-rw-r--r--drivers/isdn/act2000/act2000_isa.c11
-rw-r--r--drivers/isdn/capi/kcapi.c7
-rw-r--r--drivers/isdn/hisax/config.c3
-rw-r--r--drivers/isdn/hisax/elsa.c3
-rw-r--r--drivers/isdn/hisax/hfc_pci.c3
-rw-r--r--drivers/isdn/hisax/hfc_sx.c6
-rw-r--r--drivers/isdn/hisax/hfcscard.c3
-rw-r--r--drivers/isdn/hysdn/boardergo.c7
-rw-r--r--drivers/isdn/hysdn/hysdn_sched.c7
-rw-r--r--drivers/isdn/i4l/isdn_tty.c4
-rw-r--r--drivers/isdn/icn/icn.c15
-rw-r--r--drivers/isdn/icn/icn.h3
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c3
-rw-r--r--drivers/isdn/sc/card.h7
-rw-r--r--drivers/isdn/sc/hardware.h3
-rw-r--r--drivers/isdn/sc/init.c10
-rw-r--r--drivers/isdn/tpam/tpam.h11
-rw-r--r--drivers/isdn/tpam/tpam_commands.c50
-rw-r--r--drivers/isdn/tpam/tpam_nco.c34
-rw-r--r--drivers/isdn/tpam/tpam_queues.c26
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/media/video/bw-qcam.c12
-rw-r--r--drivers/media/video/c-qcam.c6
-rw-r--r--drivers/media/video/cpia.c17
-rw-r--r--drivers/media/video/ovcamchip/ovcamchip_core.c7
-rw-r--r--drivers/media/video/planb.c3
-rw-r--r--drivers/media/video/saa5249.c3
-rw-r--r--drivers/media/video/tda9887.c4
-rw-r--r--drivers/media/video/videocodec.c5
-rw-r--r--drivers/media/video/zoran_driver.c6
-rw-r--r--drivers/media/video/zr36120.c6
-rw-r--r--drivers/message/fusion/mptbase.c39
-rw-r--r--drivers/message/i2o/debug.c196
-rw-r--r--drivers/message/i2o/device.c4
-rw-r--r--drivers/message/i2o/driver.c31
-rw-r--r--drivers/message/i2o/exec-osm.c13
-rw-r--r--drivers/message/i2o/i2o_block.c204
-rw-r--r--drivers/message/i2o/i2o_config.c94
-rw-r--r--drivers/message/i2o/i2o_proc.c9
-rw-r--r--drivers/message/i2o/i2o_scsi.c98
-rw-r--r--drivers/message/i2o/iop.c76
-rw-r--r--drivers/message/i2o/pci.c19
-rw-r--r--drivers/net/bsd_comp.c4
-rw-r--r--drivers/net/hamradio/dmascc.c1
-rw-r--r--drivers/net/irda/stir4200.c10
-rw-r--r--drivers/net/mac89x0.c4
-rw-r--r--drivers/net/ppp_deflate.c4
-rw-r--r--drivers/pci/quirks.c230
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/usb/Kconfig4
-rw-r--r--drivers/usb/Makefile4
-rw-r--r--drivers/usb/atm/Kconfig30
-rw-r--r--drivers/usb/atm/Makefile7
-rw-r--r--drivers/usb/atm/speedtch.c866
-rw-r--r--drivers/usb/atm/usb_atm.c1205
-rw-r--r--drivers/usb/atm/usb_atm.h159
-rw-r--r--drivers/usb/class/Kconfig3
-rw-r--r--drivers/usb/class/audio.c37
-rw-r--r--drivers/usb/class/bluetty.c12
-rw-r--r--drivers/usb/class/cdc-acm.c33
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/usb-midi.c19
-rw-r--r--drivers/usb/class/usblp.c4
-rw-r--r--drivers/usb/core/devices.c18
-rw-r--r--drivers/usb/core/devio.c57
-rw-r--r--drivers/usb/core/hcd-pci.c6
-rw-r--r--drivers/usb/core/hcd.c33
-rw-r--r--drivers/usb/core/hcd.h4
-rw-r--r--drivers/usb/core/hub.c397
-rw-r--r--drivers/usb/core/inode.c121
-rw-r--r--drivers/usb/core/message.c50
-rw-r--r--drivers/usb/core/sysfs.c64
-rw-r--r--drivers/usb/core/urb.c5
-rw-r--r--drivers/usb/core/usb.c200
-rw-r--r--drivers/usb/core/usb.h10
-rw-r--r--drivers/usb/gadget/Kconfig15
-rw-r--r--drivers/usb/gadget/dummy_hcd.c39
-rw-r--r--drivers/usb/gadget/ether.c46
-rw-r--r--drivers/usb/gadget/file_storage.c43
-rw-r--r--drivers/usb/gadget/gadget_chips.h6
-rw-r--r--drivers/usb/gadget/goku_udc.c14
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.c7
-rw-r--r--drivers/usb/gadget/net2280.c10
-rw-r--r--drivers/usb/gadget/omap_udc.c146
-rw-r--r--drivers/usb/gadget/omap_udc.h11
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c17
-rw-r--r--drivers/usb/gadget/rndis.c51
-rw-r--r--drivers/usb/gadget/zero.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/hc_sl811.c8
-rw-r--r--drivers/usb/host/ohci-dbg.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c254
-rw-r--r--drivers/usb/host/ohci-hub.c63
-rw-r--r--drivers/usb/host/ohci-lh7a404.c30
-rw-r--r--drivers/usb/host/ohci-omap.c30
-rw-r--r--drivers/usb/host/ohci-pci.c37
-rw-r--r--drivers/usb/host/ohci-pxa27x.c460
-rw-r--r--drivers/usb/host/ohci-sa1111.c25
-rw-r--r--drivers/usb/host/ohci.h34
-rw-r--r--drivers/usb/host/uhci-hcd.c253
-rw-r--r--drivers/usb/host/uhci-hcd.h18
-rw-r--r--drivers/usb/host/uhci-hub.c134
-rw-r--r--drivers/usb/image/Kconfig7
-rw-r--r--drivers/usb/image/hpusbscsi.c12
-rw-r--r--drivers/usb/image/mdc800.c18
-rw-r--r--drivers/usb/image/microtek.c17
-rw-r--r--drivers/usb/image/microtek.h1
-rw-r--r--drivers/usb/input/aiptek.c4
-rw-r--r--drivers/usb/input/ati_remote.c7
-rw-r--r--drivers/usb/input/hid-core.c22
-rw-r--r--drivers/usb/input/kbtab.c4
-rw-r--r--drivers/usb/input/mtouchusb.c4
-rw-r--r--drivers/usb/input/pid.c2
-rw-r--r--drivers/usb/input/powermate.c2
-rw-r--r--drivers/usb/input/touchkitusb.c4
-rw-r--r--drivers/usb/input/usbkbd.c4
-rw-r--r--drivers/usb/input/usbmouse.c4
-rw-r--r--drivers/usb/input/wacom.c4
-rw-r--r--drivers/usb/input/xpad.c4
-rw-r--r--drivers/usb/media/Kconfig6
-rw-r--r--drivers/usb/media/dabusb.c6
-rw-r--r--drivers/usb/media/konicawc.c9
-rw-r--r--drivers/usb/media/ov511.c2
-rw-r--r--drivers/usb/media/se401.c6
-rw-r--r--drivers/usb/media/sn9c102.h19
-rw-r--r--drivers/usb/media/sn9c102_core.c148
-rw-r--r--drivers/usb/media/sn9c102_pas106b.c14
-rw-r--r--drivers/usb/media/sn9c102_pas202bcb.c21
-rw-r--r--drivers/usb/media/sn9c102_sensor.h61
-rw-r--r--drivers/usb/media/sn9c102_tas5110c1b.c45
-rw-r--r--drivers/usb/media/sn9c102_tas5130d1b.c60
-rw-r--r--drivers/usb/media/stv680.c4
-rw-r--r--drivers/usb/media/usbvideo.c4
-rw-r--r--drivers/usb/misc/Kconfig12
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/auerswald.c14
-rw-r--r--drivers/usb/misc/legousbtower.c4
-rw-r--r--drivers/usb/misc/speedtch.c1373
-rw-r--r--drivers/usb/misc/tiglusb.c1
-rw-r--r--drivers/usb/misc/uss720.c4
-rw-r--r--drivers/usb/net/catc.c8
-rw-r--r--drivers/usb/net/kaweth.c22
-rw-r--r--drivers/usb/net/pegasus.c12
-rw-r--r--drivers/usb/net/rtl8150.c10
-rw-r--r--drivers/usb/net/usbnet.c21
-rw-r--r--drivers/usb/serial/Kconfig10
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/belkin_sa.c8
-rw-r--r--drivers/usb/serial/cyberjack.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c6
-rw-r--r--drivers/usb/serial/empeg.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.c24
-rw-r--r--drivers/usb/serial/ftdi_sio.h15
-rw-r--r--drivers/usb/serial/generic.c4
-rw-r--r--drivers/usb/serial/io_edgeport.c6
-rw-r--r--drivers/usb/serial/io_ti.c6
-rw-r--r--drivers/usb/serial/ipaq.c23
-rw-r--r--drivers/usb/serial/ipw.c496
-rw-r--r--drivers/usb/serial/ir-usb.c2
-rw-r--r--drivers/usb/serial/keyspan_pda.c6
-rw-r--r--drivers/usb/serial/kl5kusb105.c12
-rw-r--r--drivers/usb/serial/kobil_sct.c14
-rw-r--r--drivers/usb/serial/mct_u232.c6
-rw-r--r--drivers/usb/serial/omninet.c4
-rw-r--r--drivers/usb/serial/pl2303.c430
-rw-r--r--drivers/usb/serial/usb-serial.c125
-rw-r--r--drivers/usb/serial/usb-serial.h1
-rw-r--r--drivers/usb/serial/visor.c6
-rw-r--r--drivers/usb/serial/whiteheat.c8
-rw-r--r--drivers/usb/storage/isd200.c6
-rw-r--r--drivers/usb/storage/protocol.c49
-rw-r--r--drivers/usb/storage/scsiglue.c31
-rw-r--r--drivers/usb/storage/transport.c25
-rw-r--r--drivers/usb/storage/unusual_devs.h136
-rw-r--r--drivers/usb/storage/usb.c70
-rw-r--r--drivers/usb/storage/usb.h3
-rw-r--r--drivers/video/Kconfig18
-rw-r--r--drivers/video/aty/atyfb_base.c2
-rw-r--r--drivers/video/aty/radeon_base.c307
-rw-r--r--drivers/video/aty/radeon_monitor.c57
-rw-r--r--drivers/video/aty/radeon_pm.c4
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/bw2.c2
-rw-r--r--drivers/video/chipsfb.c2
-rw-r--r--drivers/video/console/Makefile3
-rw-r--r--drivers/video/console/bitblit.c370
-rw-r--r--drivers/video/console/fbcon.c891
-rw-r--r--drivers/video/console/fbcon.h26
-rw-r--r--drivers/video/console/tileblit.c146
-rw-r--r--drivers/video/console/vgacon.c4
-rw-r--r--drivers/video/cyber2000fb.c4
-rw-r--r--drivers/video/fbmem.c63
-rw-r--r--drivers/video/fbsysfs.c2
-rw-r--r--drivers/video/fm2fb.c23
-rw-r--r--drivers/video/i810/i810.h4
-rw-r--r--drivers/video/i810/i810_accel.c14
-rw-r--r--drivers/video/i810/i810_gtf.c3
-rw-r--r--drivers/video/i810/i810_main.c148
-rw-r--r--drivers/video/igafb.c1
-rw-r--r--drivers/video/imsttfb.c1
-rw-r--r--drivers/video/kyro/fbdev.c1
-rw-r--r--drivers/video/matrox/matroxfb_accel.c22
-rw-r--r--drivers/video/matrox/matroxfb_base.c8
-rw-r--r--drivers/video/matrox/matroxfb_base.h140
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c2
-rw-r--r--drivers/video/pvr2fb.c1
-rw-r--r--drivers/video/radeonfb.c2
-rw-r--r--drivers/video/riva/fbdev.c22
-rw-r--r--drivers/video/sstfb.c1
-rw-r--r--drivers/video/tdfxfb.c9
-rw-r--r--drivers/video/tgafb.c1
-rw-r--r--drivers/video/tridentfb.c1
-rw-r--r--drivers/video/vesafb.c67
-rw-r--r--drivers/video/vga16fb.c29
-rw-r--r--fs/Kconfig21
-rw-r--r--fs/afs/main.c6
-rw-r--r--fs/autofs4/root.c24
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/buffer.c100
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--fs/dcache.c50
-rw-r--r--fs/devfs/base.c1
-rw-r--r--fs/devpts/Makefile1
-rw-r--r--fs/devpts/inode.c34
-rw-r--r--fs/devpts/xattr.c214
-rw-r--r--fs/devpts/xattr.h59
-rw-r--r--fs/devpts/xattr_security.c29
-rw-r--r--fs/exec.c14
-rw-r--r--fs/ext2/acl.c111
-rw-r--r--fs/ext2/acl.h3
-rw-r--r--fs/ext2/file.c8
-rw-r--r--fs/ext2/namei.c16
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext2/symlink.c16
-rw-r--r--fs/ext2/xattr.c269
-rw-r--r--fs/ext2/xattr.h32
-rw-r--r--fs/ext2/xattr_security.c11
-rw-r--r--fs/ext2/xattr_trusted.c11
-rw-r--r--fs/ext2/xattr_user.c27
-rw-r--r--fs/ext3/acl.c111
-rw-r--r--fs/ext3/file.c8
-rw-r--r--fs/ext3/inode.c11
-rw-r--r--fs/ext3/namei.c16
-rw-r--r--fs/ext3/super.c1
-rw-r--r--fs/ext3/symlink.c16
-rw-r--r--fs/ext3/xattr.c266
-rw-r--r--fs/ext3/xattr.h32
-rw-r--r--fs/ext3/xattr_security.c14
-rw-r--r--fs/ext3/xattr_trusted.c13
-rw-r--r--fs/ext3/xattr_user.c13
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/file_table.c6
-rw-r--r--fs/fs-writeback.c20
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c2
-rw-r--r--fs/inode.c69
-rw-r--r--fs/jbd/commit.c2
-rw-r--r--fs/jbd/transaction.c9
-rw-r--r--fs/jfs/acl.c81
-rw-r--r--fs/jfs/super.c4
-rw-r--r--fs/mbcache.c33
-rw-r--r--fs/namei.c34
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/ntfs/ChangeLog124
-rw-r--r--fs/ntfs/Makefile6
-rw-r--r--fs/ntfs/aops.c407
-rw-r--r--fs/ntfs/aops.h102
-rw-r--r--fs/ntfs/attrib.c1173
-rw-r--r--fs/ntfs/attrib.h26
-rw-r--r--fs/ntfs/bitmap.c1
-rw-r--r--fs/ntfs/collate.c3
-rw-r--r--fs/ntfs/compress.c9
-rw-r--r--fs/ntfs/debug.h6
-rw-r--r--fs/ntfs/dir.c14
-rw-r--r--fs/ntfs/dir.h2
-rw-r--r--fs/ntfs/file.c5
-rw-r--r--fs/ntfs/index.c59
-rw-r--r--fs/ntfs/index.h6
-rw-r--r--fs/ntfs/inode.c144
-rw-r--r--fs/ntfs/inode.h19
-rw-r--r--fs/ntfs/layout.h83
-rw-r--r--fs/ntfs/lcnalloc.c9
-rw-r--r--fs/ntfs/lcnalloc.h29
-rw-r--r--fs/ntfs/logfile.c65
-rw-r--r--fs/ntfs/malloc.h1
-rw-r--r--fs/ntfs/mft.c2445
-rw-r--r--fs/ntfs/mft.h15
-rw-r--r--fs/ntfs/namei.c5
-rw-r--r--fs/ntfs/ntfs.h78
-rw-r--r--fs/ntfs/quota.c3
-rw-r--r--fs/ntfs/runlist.c1462
-rw-r--r--fs/ntfs/runlist.h89
-rw-r--r--fs/ntfs/super.c26
-rw-r--r--fs/ntfs/types.h28
-rw-r--r--fs/ntfs/unistr.c2
-rw-r--r--fs/ntfs/upcase.c1
-rw-r--r--fs/ntfs/volume.h4
-rw-r--r--fs/posix_acl.c42
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/proc/task_mmu.c11
-rw-r--r--fs/reiserfs/bitmap.c4
-rw-r--r--fs/reiserfs/dir.c4
-rw-r--r--fs/reiserfs/fix_node.c4
-rw-r--r--fs/reiserfs/ibalance.c8
-rw-r--r--fs/reiserfs/item_ops.c10
-rw-r--r--fs/reiserfs/namei.c2
-rw-r--r--fs/reiserfs/prints.c10
-rw-r--r--fs/reiserfs/stree.c28
-rw-r--r--fs/reiserfs/super.c8
-rw-r--r--fs/select.c2
-rw-r--r--fs/smbfs/inode.c23
-rw-r--r--fs/smbfs/proc.c30
-rw-r--r--fs/smbfs/proto.h2
-rw-r--r--fs/xattr.c130
-rw-r--r--include/asm-alpha/errno.h4
-rw-r--r--include/asm-arm/arch-clps711x/time.h3
-rw-r--r--include/asm-arm/arch-integrator/time.h3
-rw-r--r--include/asm-arm/arch-l7200/time.h3
-rw-r--r--include/asm-arm/arch-lh7a40x/timex.h1
-rw-r--r--include/asm-arm/arch-sa1100/timex.h1
-rw-r--r--include/asm-generic/errno.h4
-rw-r--r--include/asm-h8300/timex.h4
-rw-r--r--include/asm-i386/cpu.h1
-rw-r--r--include/asm-i386/mach-default/do_timer.h3
-rw-r--r--include/asm-i386/mach-summit/mach_mpparse.h3
-rw-r--r--include/asm-i386/mach-visws/do_timer.h3
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h3
-rw-r--r--include/asm-i386/mpspec.h2
-rw-r--r--include/asm-i386/node.h1
-rw-r--r--include/asm-i386/setup.h2
-rw-r--r--include/asm-i386/system.h4
-rw-r--r--include/asm-i386/timex.h4
-rw-r--r--include/asm-i386/unistd.h13
-rw-r--r--include/asm-m68k/timex.h4
-rw-r--r--include/asm-mips/errno.h4
-rw-r--r--include/asm-parisc/errno.h4
-rw-r--r--include/asm-ppc/timex.h4
-rw-r--r--include/asm-ppc64/timex.h4
-rw-r--r--include/asm-s390/timex.h4
-rw-r--r--include/asm-sh/timex.h4
-rw-r--r--include/asm-sparc/errno.h4
-rw-r--r--include/asm-sparc/timex.h4
-rw-r--r--include/asm-sparc64/errno.h4
-rw-r--r--include/asm-sparc64/timex.h4
-rw-r--r--include/asm-v850/timex.h4
-rw-r--r--include/asm-x86_64/elf.h5
-rw-r--r--include/asm-x86_64/mpspec.h2
-rw-r--r--include/asm-x86_64/numa.h2
-rw-r--r--include/asm-x86_64/page.h12
-rw-r--r--include/asm-x86_64/system.h4
-rw-r--r--include/asm-x86_64/timex.h4
-rw-r--r--include/linux/blkdev.h28
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/compat_ioctl.h2
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/elevator.h64
-rw-r--r--include/linux/fb.h84
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/i2o.h14
-rw-r--r--include/linux/jbd.h1
-rw-r--r--include/linux/jiffies.h384
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/key-ui.h97
-rw-r--r--include/linux/key.h284
-rw-r--r--include/linux/keyctl.h39
-rw-r--r--include/linux/kfifo.h157
-rw-r--r--include/linux/mbcache.h2
-rw-r--r--include/linux/mmzone.h29
-rw-r--r--include/linux/module.h34
-rw-r--r--include/linux/nodemask.h326
-rw-r--r--include/linux/pagevec.h6
-rw-r--r--include/linux/pktcdvd.h275
-rw-r--r--include/linux/pm.h5
-rw-r--r--include/linux/posix_acl.h1
-rw-r--r--include/linux/prctl.h1
-rw-r--r--include/linux/reiserfs_fs.h48
-rw-r--r--include/linux/sched.h19
-rw-r--r--include/linux/security.h42
-rw-r--r--include/linux/smb_mount.h5
-rw-r--r--include/linux/syscalls.h15
-rw-r--r--include/linux/threads.h2
-rw-r--r--include/linux/time.h274
-rw-r--r--include/linux/times.h74
-rw-r--r--include/linux/timex.h62
-rw-r--r--include/linux/types.h14
-rw-r--r--include/linux/usb.h29
-rw-r--r--include/linux/usbdevice_fs.h10
-rw-r--r--include/linux/wait.h80
-rw-r--r--include/linux/writeback.h6
-rw-r--r--include/linux/xattr.h16
-rw-r--r--include/video/radeon.h3
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c139
-rw-r--r--kernel/kallsyms.c194
-rw-r--r--kernel/kfifo.c170
-rw-r--r--kernel/panic.c16
-rw-r--r--kernel/pid.c117
-rw-r--r--kernel/power/pm.c31
-rw-r--r--kernel/printk.c8
-rw-r--r--kernel/profile.c258
-rw-r--r--kernel/signal.c47
-rw-r--r--kernel/sys.c20
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/timer.c5
-rw-r--r--kernel/user.c16
-rw-r--r--kernel/wait.c246
-rw-r--r--lib/idr.c10
-rw-r--r--mm/filemap.c85
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/shmem.c92
-rw-r--r--mm/tiny-shmem.c2
-rw-r--r--scripts/kallsyms.c553
-rw-r--r--scripts/mod/sumversion.c8
-rw-r--r--security/Kconfig29
-rw-r--r--security/Makefile1
-rw-r--r--security/dummy.c6
-rw-r--r--security/keys/Makefile13
-rw-r--r--security/keys/internal.h109
-rw-r--r--security/keys/key.c1039
-rw-r--r--security/keys/keyctl.c991
-rw-r--r--security/keys/keyring.c895
-rw-r--r--security/keys/proc.c251
-rw-r--r--security/keys/process_keys.c640
-rw-r--r--security/keys/request_key.c337
-rw-r--r--security/keys/user_defined.c191
-rw-r--r--security/selinux/hooks.c18
567 files changed, 27601 insertions, 10277 deletions
diff --git a/CREDITS b/CREDITS
index 22252bdf58c4..66fbacc1006f 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2764,7 +2764,7 @@ N: Luca Risolia
E: luca.risolia@studio.unibo.it
P: 1024D/FCE635A4 88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4
D: V4L driver for W996[87]CF JPEG USB Dual Mode Camera Chips
-D: V4L2 driver for SN9C10[12] PC Camera Controllers
+D: V4L2 driver for SN9C10x PC Camera Controllers
S: Via Liberta' 41/A
S: Osio Sotto, 24046, Bergamo
S: Italy
diff --git a/Documentation/cdrom/00-INDEX b/Documentation/cdrom/00-INDEX
index eae6896676f2..916dafe29d3f 100644
--- a/Documentation/cdrom/00-INDEX
+++ b/Documentation/cdrom/00-INDEX
@@ -22,6 +22,8 @@ mcdx
- info on improved Mitsumi CD-ROM driver.
optcd
- info on the Optics Storage 8000 AT CD-ROM driver
+packet-writing.txt
+ - Info on the CDRW packet writing module
sbpcd
- info on the SoundBlaster/Panasonic CD-ROM interface driver.
sjcd
diff --git a/Documentation/cdrom/packet-writing.txt b/Documentation/cdrom/packet-writing.txt
new file mode 100644
index 000000000000..d34fcbca9f27
--- /dev/null
+++ b/Documentation/cdrom/packet-writing.txt
@@ -0,0 +1,86 @@
+Getting started quick
+---------------------
+
+- Select packet support in the block device section and UDF support in
+ the file system section.
+
+- Compile and install kernel and modules, reboot.
+
+- You need the udftools package (pktsetup, mkudffs, cdrwtool).
+ Download from http://sourceforge.net/projects/linux-udf/
+
+- Grab a new CD-RW disc and format it (assuming CD-RW is hdc, substitute
+ as appropriate):
+ # cdrwtool -d /dev/hdc -q
+
+- Setup your writer
+ # pktsetup dev_name /dev/hdc
+
+- Now you can mount /dev/pktcdvd/dev_name and copy files to it. Enjoy!
+ # mount /dev/pktcdvd/dev_name /cdrom -t udf -o rw,noatime
+
+
+Packet writing for DVD-RW media
+-------------------------------
+
+DVD-RW discs can be written to much like CD-RW discs if they are in
+the so called "restricted overwrite" mode. To put a disc in restricted
+overwrite mode, run:
+
+ # dvd+rw-format /dev/hdc
+
+You can then use the disc the same way you would use a CD-RW disc:
+
+ # pktsetup dev_name /dev/hdc
+ # mount /dev/pktcdvd/dev_name /cdrom -t udf -o rw,noatime
+
+
+Packet writing for DVD+RW media
+-------------------------------
+
+According to the DVD+RW specification, a drive supporting DVD+RW discs
+shall implement "true random writes with 2KB granularity", which means
+that it should be possible to put any filesystem with a block size >=
+2KB on such a disc. For example, it should be possible to do:
+
+ # mkudffs /dev/hdc
+ # mount /dev/hdc /cdrom -t udf -o rw,noatime
+
+However, some drives don't follow the specification and expect the
+host to perform aligned writes at 32KB boundaries. Other drives do
+follow the specification, but suffer bad performance problems if the
+writes are not 32KB aligned.
+
+Both problems can be solved by using the pktcdvd driver, which always
+generates aligned writes.
+
+ # pktsetup dev_name /dev/hdc
+ # mkudffs /dev/pktcdvd/dev_name
+ # mount /dev/pktcdvd/dev_name /cdrom -t udf -o rw,noatime
+
+
+Notes
+-----
+
+- CD-RW media can usually not be overwritten more than about 1000
+ times, so to avoid unnecessary wear on the media, you should always
+ use the noatime mount option.
+
+- Defect management (ie automatic remapping of bad sectors) has not
+ been implemented yet, so you are likely to get at least some
+ filesystem corruption if the disc wears out.
+
+- Since the pktcdvd driver makes the disc appear as a regular block
+ device with a 2KB block size, you can put any filesystem you like on
+ the disc. For example, run:
+
+ # /sbin/mke2fs /dev/pktcdvd/dev_name
+
+ to create an ext2 filesystem on the disc.
+
+
+Links
+-----
+
+See http://fy.chalmers.se/~appro/linux/DVD+RW/ for more information
+about DVD writing.
diff --git a/Documentation/fb/matroxfb.txt b/Documentation/fb/matroxfb.txt
index 621e3b3e40df..ad7a67707d62 100644
--- a/Documentation/fb/matroxfb.txt
+++ b/Documentation/fb/matroxfb.txt
@@ -223,6 +223,13 @@ dfp:X - use settings X for digital flat panel interface. X is number from
selects who is source of display clocks, whether G400, or panel.
Default value is now read back from hardware - so you should specify
this value only if you are also using `init' parameter.
+outputs:XYZ - set mapping between CRTC and outputs. Each letter can have value
+ of 0 (for no CRTC), 1 (CRTC1) or 2 (CRTC2), and first letter corresponds
+ to primary analog output, second letter to the secondary analog output
+ and third letter to the DVI output. Default setting is 100 for
+ cards below G400 or G400 without DFP, 101 for G400 with DFP, and
+ 111 for G450 and G550. You can set mapping only on first card,
+ use matroxset for setting up other devices.
vesa:X - selects startup videomode. X is number from 0 to 0x1FF, see table
above for detailed explanation. Default is 640x480x8bpp if driver
has 8bpp support. Otherwise first available of 640x350x4bpp,
diff --git a/Documentation/fb/vesafb.txt b/Documentation/fb/vesafb.txt
index 36beb54f1a57..814e2f56a6ad 100644
--- a/Documentation/fb/vesafb.txt
+++ b/Documentation/fb/vesafb.txt
@@ -146,11 +146,16 @@ pmipal Use the protected mode interface for palette changes.
mtrr setup memory type range registers for the vesafb framebuffer.
-vram:n remap 'n' MiB of video RAM. If 0 or not specified, remap memory
+vremap:n
+ remap 'n' MiB of video RAM. If 0 or not specified, remap memory
according to video mode. (2.5.66 patch/idea by Antonino Daplas
reversed to give override possibility (allocate more fb memory
than the kernel would) to 2.4 by tmb@iki.fi)
+vtotal:n
+ if the video BIOS of your card incorrectly determines the total
+ amount of video RAM, use this option to override the BIOS (in MiB).
+
Have fun!
Gerd
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index 7d600c0f4c86..ad2e32227476 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -10,8 +10,10 @@ Table of contents
- Features
- Supported mount options
- Known bugs and (mis-)features
-- Using Software RAID with NTFS
-- Limitiations when using the MD driver
+- Using NTFS volume and stripe sets
+ - The Device-Mapper driver
+ - The Software RAID / MD driver
+ - Limitiations when using the MD driver
- ChangeLog
@@ -199,11 +201,161 @@ Please send bug reports/comments/feedback/abuse to the Linux-NTFS development
list at sourceforge: linux-ntfs-dev@lists.sourceforge.net
-Using Software RAID with NTFS
-=============================
+Using NTFS volume and stripe sets
+=================================
+
+For support of volume and stripe sets, you can either use the kernel's
+Device-Mapper driver or the kernel's Software RAID / MD driver. The former is
+the recommended one to use for linear raid. But the latter is required for
+raid level 5. For striping and mirroring, either driver should work fine.
+
+
+The Device-Mapper driver
+------------------------
+
+You will need to create a table of the components of the volume/stripe set and
+how they fit together and load this into the kernel using the dmsetup utility
+(see man 8 dmsetup).
+
+Linear volume sets, i.e. linear raid, has been tested and works fine. Even
+though untested, there is no reason why stripe sets, i.e. raid level 0, and
+mirrors, i.e. raid level 1 should not work, too. Stripes with parity, i.e.
+raid level 5, unfortunately cannot work yet because the current version of the
+Device-Mapper driver does not support raid level 5. You may be able to use the
+Software RAID / MD driver for raid level 5, see the next section for details.
+
+To create the table describing your volume you will need to know each of its
+components and their sizes in sectors, i.e. multiples of 512-byte blocks.
+
+For NT4 fault tolerant volumes you can obtain the sizes using fdisk. So for
+example if one of your partitions is /dev/hda2 you would do:
+
+$ fdisk -ul /dev/hda
+
+Disk /dev/hda: 81.9 GB, 81964302336 bytes
+255 heads, 63 sectors/track, 9964 cylinders, total 160086528 sectors
+Units = sectors of 1 * 512 = 512 bytes
+
+ Device Boot Start End Blocks Id System
+ /dev/hda1 * 63 4209029 2104483+ 83 Linux
+ /dev/hda2 4209030 37768814 16779892+ 86 NTFS
+ /dev/hda3 37768815 46170809 4200997+ 83 Linux
+
+And you would know that /dev/hda2 has a size of 37768814 - 4209030 + 1 =
+33559785 sectors.
+
+For Win2k and later dynamic disks, you can for example use the ldminfo utility
+which is part of the Linux LDM tools (the latest version at the time of
+writing is linux-ldm-0.0.8.tar.bz2). You can download it from:
+ http://linux-ntfs.sourceforge.net/downloads.html
+Simply extract the downloaded archive (tar xvjf linux-ldm-0.0.8.tar.bz2), go
+into it (cd linux-ldm-0.0.8) and change to the test directory (cd test). You
+will find the precompiled (i386) ldminfo utility there. NOTE: You will not be
+able to compile this yourself easily so use the binary version!
+
+Then you would use ldminfo in dump mode to obtain the necessary information:
+
+$ ./ldminfo --dump /dev/hda
+
+This would dump the LDM database found on /dev/hda which describes all of your
+dinamic disks and all the volumes on them. At the bottom you will see the
+VOLUME DEFINITIONS section which is all you really need. You may need to look
+further above to determine which of the disks in the volume definitions is
+which device in Linux. Hint: Run ldminfo on each of your dinamic disks and
+look at the Disk Id close to the top of the output for each (the PRIVATE HEADER
+section). You can then find these Disk Ids in the VBLK DATABASE section in the
+<Disk> components where you will get the LDM Name for the disk that is found in
+the VOLUME DEFINITIONS section.
+
+Note you will also need to enable the LDM driver in the Linux kernel. If your
+distribution did not enable it, you will need to recompile the kernel with it
+enabled. This will create the LDM partitions on each device at boot time. You
+would then use those devices (for /dev/hda they would be /dev/hda1, 2, 3, etc)
+in the Device-Mapper table.
+
+You can also bypass using the LDM driver by using the main device (e.g.
+/dev/hda) and then using the offsets of the LDM partitions into this device as
+the "Start sector of device" when creating the table. Once again ldminfo would
+give you the correct information to do this.
+
+Assuming you know all your devices and their sizes things are easy.
+
+For a linear raid the table would look like this (note all values are in
+512-byte sectors):
+
+--- cut here ---
+# Offset into Size of this Raid type Device Start sector
+# volume device of device
+0 1028161 linear /dev/hda1 0
+1028161 3903762 linear /dev/hdb2 0
+4931923 2103211 linear /dev/hdc1 0
+--- cut here ---
-For support of volume and stripe sets, use the kernel's Software RAID / MD
-driver and set up your /etc/raidtab appropriately (see man 5 raidtab).
+For a striped volume, i.e. raid level 0, you will need to know the chunk size
+you used when creating the volume. Windows uses 64kiB as the default, so it
+will probably be this unless you changes the defaults when creating the array.
+
+For a raid level 0 the table would look like this (note all values are in
+512-byte sectors):
+
+--- cut here ---
+# Offset Size Raid Number Chunk 1st Start 2nd Start
+# into of the type of size Device in Device in
+# volume volume stripes device device
+0 2056320 striped 2 128 /dev/hda1 0 /dev/hdb1 0
+--- cut here ---
+
+If there are more than two devices, just add each of them to the end of the
+line.
+
+Finally, for a mirrored volume, i.e. raid level 1, the table would look like
+this (note all values are in 512-byte sectors):
+
+--- cut here ---
+# Ofs Size Raid Log Number Region Should Number Source Start Taget Start
+# in of the type type of log size sync? of Device in Device in
+# vol volume params mirrors Device Device
+0 2056320 mirror core 2 16 nosync 2 /dev/hda1 0 /dev/hdb1 0
+--- cut here ---
+
+If you are mirroring to multiple devices you can specify further targets at the
+end of the line.
+
+Note the "Should sync?" parameter "nosync" means that the two mirrors are
+already in sync which will be the case on a clean shutdown of Windows. If the
+mirrors are not clean, you can specify the "sync" option instead of "nosync"
+and the Device-Mapper driver will then copy the entirey of the "Source Device"
+to the "Target Device" or if you specified multipled target devices to all of
+them.
+
+Once you have your table, save it in a file somewhere (e.g. /etc/ntfsvolume1),
+and hand it over to dmsetup to work with, like so:
+
+$ dmsetup create myvolume1 /etc/ntfsvolume1
+
+You can obviously replace "myvolume1" with whatever name you like.
+
+If it all worked, you will now have the device /dev/device-mapper/myvolume1
+which you can then just use as an argument to the mount command as usual to
+mount the ntfs volume. For example:
+
+$ mount -t ntfs -o ro /dev/device-mapper/myvolume1 /mnt/myvol1
+
+(You need to create the directory /mnt/myvol1 first and of course you can use
+anything you like instead of /mnt/myvol1 as long as it is an existing
+directory.)
+
+It is advisable to do the mount read-only to see if the volume has been setup
+correctly to avoid the possibility of causing damage to the data on the ntfs
+volume.
+
+
+The Software RAID / MD driver
+-----------------------------
+
+An alternative to using the Device-Mapper driver is to use the kernel's
+Software RAID / MD driver. For which you need to set up your /etc/raidtab
+appropriately (see man 5 raidtab).
Linear volume sets, i.e. linear raid, as well as stripe sets, i.e. raid level
0, have been tested and work fine (though see section "Limitiations when using
@@ -258,8 +410,8 @@ setup correctly to avoid the possibility of causing damage to the data on the
ntfs volume.
-Limitiations when using the MD driver
-=====================================
+Limitiations when using the Software RAID / MD driver
+-----------------------------------------------------
Using the md driver will not work properly if any of your NTFS partitions have
an odd number of sectors. This is especially important for linear raid as all
@@ -271,12 +423,21 @@ apparent when you try to use the volume again under Windows.
So when using linear raid, make sure that all your partitions have an even
number of sectors BEFORE attempting to use it. You have been warned!
+Even better is to simply use the Device-Mapper for linear raid and then you do
+not have this problem with odd numbers of sectors.
+
ChangeLog
=========
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
+2.1.21:
+ - Fix several race conditions and various other bugs.
+ - Many internal cleanups, code reorganization, optimizations, and mft
+ and index record writing code rewritten to fit in with the changes.
+ - Update Documentation/filesystems/ntfs.txt with instructions on how to
+ use the Device-Mapper driver with NTFS ftdisk/LDM raid.
2.1.20:
- Fix two stupid bugs introduced in 2.1.18 release.
2.1.19:
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 5be10c915614..3f318dd44c77 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -44,7 +44,7 @@ Opening a File <subsection>
The VFS implements the open(2), stat(2), chmod(2) and similar system
calls. The pathname argument is used by the VFS to search through the
directory entry cache (dentry cache or "dcache"). This provides a very
-fast lookup mechanism to translate a pathname (filename) into a
+fast look-up mechanism to translate a pathname (filename) into a
specific dentry.
An individual dentry usually has a pointer to an inode. Inodes are the
@@ -64,7 +64,7 @@ resolve your pathname into a dentry, the VFS may have to resort to
creating dentries along the way, and then loading the inode. This is
done by looking up the inode.
-To lookup an inode (usually read from disc) requires that the VFS
+To look up an inode (usually read from disc) requires that the VFS
calls the lookup() method of the parent directory inode. This method
is installed by the specific filesystem implementation that the inode
lives in. There will be more on this later.
@@ -286,7 +286,7 @@ otherwise noted.
dentry). Here you will probably call d_instantiate() with the
dentry and the newly created inode
- lookup: called when the VFS needs to lookup an inode in a parent
+ lookup: called when the VFS needs to look up an inode in a parent
directory. The name to look for is found in the dentry. This
method must call d_add() to insert the found inode into the
dentry. The "i_count" field in the inode structure should be
@@ -405,7 +405,10 @@ from device node to device driver (this is an unofficial kernel
patch).
-struct dentry_operations <section>
+Directory Entry Cache (dcache) <section>
+------------------------------
+
+struct dentry_operations
========================
This describes how a filesystem can overload the standard dentry
@@ -425,7 +428,7 @@ struct dentry_operations {
};
d_revalidate: called when the VFS needs to revalidate a dentry. This
- is called whenever a name lookup finds a dentry in the
+ is called whenever a name look-up finds a dentry in the
dcache. Most filesystems leave this as NULL, because all their
dentries in the dcache are valid
@@ -448,6 +451,9 @@ Each dentry has a pointer to its parent dentry, as well as a hash list
of child dentries. Child dentries are basically like files in a
directory.
+Directory Entry Cache APIs
+--------------------------
+
There are a number of functions defined which permit a filesystem to
manipulate dentries:
@@ -482,3 +488,184 @@ manipulate dentries:
pointer is NULL, the dentry is called a "negative
dentry". This function is commonly called when an inode is
created for an existing negative dentry
+
+ d_lookup: look up a dentry given its parent and path name component
+ It looks up the child of that given name from the dcache
+ hash table. If it is found, the reference count is incremented
+ and the dentry is returned. The caller must use d_put()
+ to free the dentry when it finishes using it.
+
+
+RCU-based dcache locking model
+------------------------------
+
+On many workloads, the most common operation on dcache is
+to look up a dentry, given a parent dentry and the name
+of the child. Typically, for every open(), stat() etc.,
+the dentry corresponding to the pathname will be looked
+up by walking the tree starting with the first component
+of the pathname and using that dentry along with the next
+component to look up the next level and so on. Since it
+is a frequent operation for workloads like multiuser
+environments and webservers, it is important to optimize
+this path.
+
+Prior to 2.5.10, dcache_lock was acquired in d_lookup and thus
+in every component during path look-up. Since 2.5.10 onwards,
+fastwalk algorithm changed this by holding the dcache_lock
+at the beginning and walking as many cached path component
+dentries as possible. This signficantly decreases the number
+of acquisition of dcache_lock. However it also increases the
+lock hold time signficantly and affects performance in large
+SMP machines. Since 2.5.62 kernel, dcache has been using
+a new locking model that uses RCU to make dcache look-up
+lock-free.
+
+The current dcache locking model is not very different from the existing
+dcache locking model. Prior to 2.5.62 kernel, dcache_lock
+protected the hash chain, d_child, d_alias, d_lru lists as well
+as d_inode and several other things like mount look-up. RCU-based
+changes affect only the way the hash chain is protected. For everything
+else the dcache_lock must be taken for both traversing as well as
+updating. The hash chain updations too take the dcache_lock.
+The significant change is the way d_lookup traverses the hash chain,
+it doesn't acquire the dcache_lock for this and rely on RCU to
+ensure that the dentry has not been *freed*.
+
+
+Dcache locking details
+----------------------
+For many multi-user workloads, open() and stat() on files are
+very frequently occurring operations. Both involve walking
+of path names to find the dentry corresponding to the
+concerned file. In 2.4 kernel, dcache_lock was held
+during look-up of each path component. Contention and
+cacheline bouncing of this global lock caused significant
+scalability problems. With the introduction of RCU
+in linux kernel, this was worked around by making
+the look-up of path components during path walking lock-free.
+
+
+Safe lock-free look-up of dcache hash table
+===========================================
+
+Dcache is a complex data structure with the hash table entries
+also linked together in other lists. In 2.4 kernel, dcache_lock
+protected all the lists. We applied RCU only on hash chain
+walking. The rest of the lists are still protected by dcache_lock.
+Some of the important changes are :
+
+1. The deletion from hash chain is done using hlist_del_rcu() macro which
+ doesn't initialize next pointer of the deleted dentry and this
+ allows us to walk safely lock-free while a deletion is happening.
+
+2. Insertion of a dentry into the hash table is done using
+ hlist_add_head_rcu() which take care of ordering the writes -
+ the writes to the dentry must be visible before the dentry
+ is inserted. This works in conjuction with hlist_for_each_rcu()
+ while walking the hash chain. The only requirement is that
+ all initialization to the dentry must be done before hlist_add_head_rcu()
+ since we don't have dcache_lock protection while traversing
+ the hash chain. This isn't different from the existing code.
+
+3. The dentry looked up without holding dcache_lock by cannot be
+ returned for walking if it is unhashed. It then may have a NULL
+ d_inode or other bogosity since RCU doesn't protect the other
+ fields in the dentry. We therefore use a flag DCACHE_UNHASHED to
+ indicate unhashed dentries and use this in conjunction with a
+ per-dentry lock (d_lock). Once looked up without the dcache_lock,
+ we acquire the per-dentry lock (d_lock) and check if the
+ dentry is unhashed. If so, the look-up is failed. If not, the
+ reference count of the dentry is increased and the dentry is returned.
+
+4. Once a dentry is looked up, it must be ensured during the path
+ walk for that component it doesn't go away. In pre-2.5.10 code,
+ this was done holding a reference to the dentry. dcache_rcu does
+ the same. In some sense, dcache_rcu path walking looks like
+ the pre-2.5.10 version.
+
+5. All dentry hash chain updations must take the dcache_lock as well as
+ the per-dentry lock in that order. dput() does this to ensure
+ that a dentry that has just been looked up in another CPU
+ doesn't get deleted before dget() can be done on it.
+
+6. There are several ways to do reference counting of RCU protected
+ objects. One such example is in ipv4 route cache where
+ deferred freeing (using call_rcu()) is done as soon as
+ the reference count goes to zero. This cannot be done in
+ the case of dentries because tearing down of dentries
+ require blocking (dentry_iput()) which isn't supported from
+ RCU callbacks. Instead, tearing down of dentries happen
+ synchronously in dput(), but actual freeing happens later
+ when RCU grace period is over. This allows safe lock-free
+ walking of the hash chains, but a matched dentry may have
+ been partially torn down. The checking of DCACHE_UNHASHED
+ flag with d_lock held detects such dentries and prevents
+ them from being returned from look-up.
+
+
+Maintaining POSIX rename semantics
+==================================
+
+Since look-up of dentries is lock-free, it can race against
+a concurrent rename operation. For example, during rename
+of file A to B, look-up of either A or B must succeed.
+So, if look-up of B happens after A has been removed from the
+hash chain but not added to the new hash chain, it may fail.
+Also, a comparison while the name is being written concurrently
+by a rename may result in false positive matches violating
+rename semantics. Issues related to race with rename are
+handled as described below :
+
+1. Look-up can be done in two ways - d_lookup() which is safe
+ from simultaneous renames and __d_lookup() which is not.
+ If __d_lookup() fails, it must be followed up by a d_lookup()
+ to correctly determine whether a dentry is in the hash table
+ or not. d_lookup() protects look-ups using a sequence
+ lock (rename_lock).
+
+2. The name associated with a dentry (d_name) may be changed if
+ a rename is allowed to happen simultaneously. To avoid memcmp()
+ in __d_lookup() go out of bounds due to a rename and false
+ positive comparison, the name comparison is done while holding the
+ per-dentry lock. This prevents concurrent renames during this
+ operation.
+
+3. Hash table walking during look-up may move to a different bucket as
+ the current dentry is moved to a different bucket due to rename.
+ But we use hlists in dcache hash table and they are null-terminated.
+ So, even if a dentry moves to a different bucket, hash chain
+ walk will terminate. [with a list_head list, it may not since
+ termination is when the list_head in the original bucket is reached].
+ Since we redo the d_parent check and compare name while holding
+ d_lock, lock-free look-up will not race against d_move().
+
+4. There can be a theoritical race when a dentry keeps coming back
+ to original bucket due to double moves. Due to this look-up may
+ consider that it has never moved and can end up in a infinite loop.
+ But this is not any worse that theoritical livelocks we already
+ have in the kernel.
+
+
+Important guidelines for filesystem developers related to dcache_rcu
+====================================================================
+
+1. Existing dcache interfaces (pre-2.5.62) exported to filesystem
+ don't change. Only dcache internal implementation changes. However
+ filesystems *must not* delete from the dentry hash chains directly
+ using the list macros like allowed earlier. They must use dcache
+ APIs like d_drop() or __d_drop() depending on the situation.
+
+2. d_flags is now protected by a per-dentry lock (d_lock). All
+ access to d_flags must be protected by it.
+
+3. For a hashed dentry, checking of d_count needs to be protected
+ by d_lock.
+
+
+Papers and other documentation on dcache locking
+================================================
+
+1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124).
+
+2. http://lse.sourceforge.net/locking/dcache/dcache.html
diff --git a/Documentation/i386/zero-page.txt b/Documentation/i386/zero-page.txt
index bbdf7261e7a9..30badb4e39f5 100644
--- a/Documentation/i386/zero-page.txt
+++ b/Documentation/i386/zero-page.txt
@@ -28,7 +28,8 @@ Offset Type Description
0xa0 16 bytes System description table truncated to 16 bytes.
( struct sys_desc_table_struct )
- 0xb0 - 0x1c3 Free. Add more parameters here if you really need them.
+ 0xb0 - 0x13f Free. Add more parameters here if you really need them.
+ 0x140- 0x1be EDID_INFO Video mode setup
0x1c4 unsigned long EFI system table pointer
0x1c8 unsigned long EFI memory descriptor size
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e7ea0b7a6496..482b31cd3028 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -769,7 +769,7 @@ running once the system is up.
noexec [IA-64]
- noexec [i386]
+ noexec [i386, x86_64]
noexec=on: enable non-executable mappings (default)
noexec=off: disable nn-executable mappings
@@ -1311,6 +1311,8 @@ running once the system is up.
uart6850= [HW,OSS]
Format: <io>,<irq>
+
+ usb-handoff [HW] Enable early USB BIOS -> OS handoff
video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt.
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
new file mode 100644
index 000000000000..36cbb0dd0f32
--- /dev/null
+++ b/Documentation/keys.txt
@@ -0,0 +1,836 @@
+ ============================
+ KERNEL KEY RETENTION SERVICE
+ ============================
+
+This service allows cryptographic keys, authentication tokens, cross-domain
+user mappings, and similar to be cached in the kernel for the use of
+filesystems other kernel services.
+
+Keyrings are permitted; these are a special type of key that can hold links to
+other keys. Processes each have three standard keyring subscriptions that a
+kernel service can search for relevant keys.
+
+The key service can be configured on by enabling:
+
+ "Security options"/"Enable access key retention support" (CONFIG_KEYS)
+
+This document has the following sections:
+
+ - Key overview
+ - Key service overview
+ - Key access permissions
+ - New procfs files
+ - Userspace system call interface
+ - Kernel services
+ - Defining a key type
+ - Request-key callback service
+ - Key access filesystem
+
+
+============
+KEY OVERVIEW
+============
+
+In this context, keys represent units of cryptographic data, authentication
+tokens, keyrings, etc.. These are represented in the kernel by struct key.
+
+Each key has a number of attributes:
+
+ - A serial number.
+ - A type.
+ - A description (for matching a key in a search).
+ - Access control information.
+ - An expiry time.
+ - A payload.
+ - State.
+
+
+ (*) Each key is issued a serial number of type key_serial_t that is unique
+ for the lifetime of that key. All serial numbers are positive non-zero
+ 32-bit integers.
+
+ Userspace programs can use a key's serial numbers as a way to gain access
+ to it, subject to permission checking.
+
+ (*) Each key is of a defined "type". Types must be registered inside the
+ kernel by a kernel service (such as a filesystem) before keys of that
+ type can be added or used. Userspace programs cannot define new types
+ directly.
+
+ Key types are represented in the kernel by struct key_type. This defines
+ a number of operations that can be performed on a key of that type.
+
+ Should a type be removed from the system, all the keys of that type will
+ be invalidated.
+
+ (*) Each key has a description. This should be a printable string. The key
+ type provides an operation to perform a match between the description on
+ a key and a criterion string.
+
+ (*) Each key has an owner user ID, a group ID and a permissions mask. These
+ are used to control what a process may do to a key from userspace, and
+ whether a kernel service will be able to find the key.
+
+ (*) Each key can be set to expire at a specific time by the key type's
+ instantiation function. Keys can also be immortal.
+
+ (*) Each key can have a payload. This is a quantity of data that represent
+ the actual "key". In the case of a keyring, this is a list of keys to
+ which the keyring links; in the case of a user-defined key, it's an
+ arbitrary blob of data.
+
+ Having a payload is not required; and the payload can, in fact, just be a
+ value stored in the struct key itself.
+
+ When a key is instantiated, the key type's instantiation function is
+ called with a blob of data, and that then creates the key's payload in
+ some way.
+
+ Similarly, when userspace wants to read back the contents of the key, if
+ permitted, another key type operation will be called to convert the key's
+ attached payload back into a blob of data.
+
+ (*) Each key can be in one of a number of basic states:
+
+ (*) Uninstantiated. The key exists, but does not have any data
+ attached. Keys being requested from userspace will be in this state.
+
+ (*) Instantiated. This is the normal state. The key is fully formed, and
+ has data attached.
+
+ (*) Negative. This is a relatively short-lived state. The key acts as a
+ note saying that a previous call out to userspace failed, and acts as
+ a throttle on key lookups. A negative key can be updated to a normal
+ state.
+
+ (*) Expired. Keys can have lifetimes set. If their lifetime is exceeded,
+ they traverse to this state. An expired key can be updated back to a
+ normal state.
+
+ (*) Revoked. A key is put in this state by userspace action. It can't be
+ found or operated upon (apart from by unlinking it).
+
+ (*) Dead. The key's type was unregistered, and so the key is now useless.
+
+
+====================
+KEY SERVICE OVERVIEW
+====================
+
+The key service provides a number of features besides keys:
+
+ (*) The key service defines two special key types:
+
+ (+) "keyring"
+
+ Keyrings are special keys that contain a list of other keys. Keyring
+ lists can be modified using various system calls. Keyrings should not
+ be given a payload when created.
+
+ (+) "user"
+
+ A key of this type has a description and a payload that are arbitrary
+ blobs of data. These can be created, updated and read by userspace,
+ and aren't intended for use by kernel services.
+
+ (*) Each process subscribes to three keyrings: a thread-specific keyring, a
+ process-specific keyring, and a session-specific keyring.
+
+ The thread-specific keyring is discarded from the child when any sort of
+ clone, fork, vfork or execve occurs. A new keyring is created only when
+ required.
+
+ The process-specific keyring is replaced with an empty one in the child
+ on clone, fork, vfork unless CLONE_THREAD is supplied, in which case it
+ is shared. execve also discards the process's process keyring and creates
+ a new one.
+
+ The session-specific keyring is persistent across clone, fork, vfork and
+ execve, even when the latter executes a set-UID or set-GID binary. A
+ process can, however, replace its current session keyring with a new one
+ by using PR_JOIN_SESSION_KEYRING. It is permitted to request an anonymous
+ new one, or to attempt to create or join one of a specific name.
+
+ The ownership of the thread and process-specific keyrings changes when
+ the real UID and GID of the thread changes.
+
+ (*) Each user ID resident in the system holds two special keyrings: a user
+ specific keyring and a default user session keyring. The default session
+ keyring is initialised with a link to the user-specific keyring.
+
+ When a process changes its real UID, if it used to have no session key, it
+ will be subscribed to the default session key for the new UID.
+
+ If a process attempts to access its session key when it doesn't have one,
+ it will be subscribed to the default for its current UID.
+
+ (*) Each user has two quotas against which the keys they own are tracked. One
+ limits the total number of keys and keyrings, the other limits the total
+ amount of description and payload space that can be consumed.
+
+ The user can view information on this and other statistics through procfs
+ files.
+
+ Process-specific and thread-specific keyrings are not counted towards a
+ user's quota.
+
+ If a system call that modifies a key or keyring in some way would put the
+ user over quota, the operation is refused and error EDQUOT is returned.
+
+ (*) There's a system call interface by which userspace programs can create
+ and manipulate keys and keyrings.
+
+ (*) There's a kernel interface by which services can register types and
+ search for keys.
+
+ (*) There's a way for the a search done from the kernel to call back to
+ userspace to request a key that can't be found in a process's keyrings.
+
+ (*) An optional filesystem is available through which the key database can be
+ viewed and manipulated.
+
+
+======================
+KEY ACCESS PERMISSIONS
+======================
+
+Keys have an owner user ID, a group access ID, and a permissions mask. The
+mask has up to eight bits each for user, group and other access. Only five of
+each set of eight bits are defined. These permissions granted are:
+
+ (*) View
+
+ This permits a key or keyring's attributes to be viewed - including key
+ type and description.
+
+ (*) Read
+
+ This permits a key's payload to be viewed or a keyring's list of linked
+ keys.
+
+ (*) Write
+
+ This permits a key's payload to be instantiated or updated, or it allows
+ a link to be added to or removed from a keyring.
+
+ (*) Search
+
+ This permits keyrings to be searched and keys to be found. Searches can
+ only recurse into nested keyrings that have search permission set.
+
+ (*) Link
+
+ This permits a key or keyring to be linked to. To create a link from a
+ keyring to a key, a process must have Write permission on the keyring and
+ Link permission on the key.
+
+For changing the ownership, group ID or permissions mask, being the owner of
+the key or having the sysadmin capability is sufficient.
+
+
+================
+NEW PROCFS FILES
+================
+
+Two files have been added to procfs by which an administrator can find out
+about the status of the key service:
+
+ (*) /proc/keys
+
+ This lists all the keys on the system, giving information about their
+ type, description and permissions. The payload of the key is not
+ available this way:
+
+ SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY
+ 00000001 I----- 39 perm 1f0000 0 0 keyring _uid_ses.0: 1/4
+ 00000002 I----- 2 perm 1f0000 0 0 keyring _uid.0: empty
+ 00000007 I----- 1 perm 1f0000 0 0 keyring _pid.1: empty
+ 0000018d I----- 1 perm 1f0000 0 0 keyring _pid.412: empty
+ 000004d2 I--Q-- 1 perm 1f0000 32 -1 keyring _uid.32: 1/4
+ 000004d3 I--Q-- 3 perm 1f0000 32 -1 keyring _uid_ses.32: empty
+ 00000892 I--QU- 1 perm 1f0000 0 0 user metal:copper: 0
+ 00000893 I--Q-N 1 35s 1f0000 0 0 user metal:silver: 0
+ 00000894 I--Q-- 1 10h 1f0000 0 0 user metal:gold: 0
+
+ The flags are:
+
+ I Instantiated
+ R Revoked
+ D Dead
+ Q Contributes to user's quota
+ U Under contruction by callback to userspace
+ N Negative key
+
+ This file must be enabled at kernel configuration time as it allows anyone
+ to list the keys database.
+
+ (*) /proc/key-users
+
+ This file lists the tracking data for each user that has at least one key
+ on the system. Such data includes quota information and statistics:
+
+ [root@andromeda root]# cat /proc/key-users
+ 0: 46 45/45 1/100 13/10000
+ 29: 2 2/2 2/100 40/10000
+ 32: 2 2/2 2/100 40/10000
+ 38: 2 2/2 2/100 40/10000
+
+ The format of each line is
+ <UID>: User ID to which this applies
+ <usage> Structure refcount
+ <inst>/<keys> Total number of keys and number instantiated
+ <keys>/<max> Key count quota
+ <bytes>/<max> Key size quota
+
+
+===============================
+USERSPACE SYSTEM CALL INTERFACE
+===============================
+
+Userspace can manipulate keys directly through three new syscalls: add_key,
+request_key and keyctl. The latter provides a number of functions for
+manipulating keys.
+
+When referring to a key directly, userspace programs should use the key's
+serial number (a positive 32-bit integer). However, there are some special
+values available for referring to special keys and keyrings that relate to the
+process making the call:
+
+ CONSTANT VALUE KEY REFERENCED
+ ============================== ====== ===========================
+ KEY_SPEC_THREAD_KEYRING -1 thread-specific keyring
+ KEY_SPEC_PROCESS_KEYRING -2 process-specific keyring
+ KEY_SPEC_SESSION_KEYRING -3 session-specific keyring
+ KEY_SPEC_USER_KEYRING -4 UID-specific keyring
+ KEY_SPEC_USER_SESSION_KEYRING -5 UID-session keyring
+ KEY_SPEC_GROUP_KEYRING -6 GID-specific keyring
+
+
+The main syscalls are:
+
+ (*) Create a new key of given type, description and payload and add it to the
+ nominated keyring:
+
+ key_serial_t add_key(const char *type, const char *desc,
+ const void *payload, size_t plen,
+ key_serial_t keyring);
+
+ If a key of the same type and description as that proposed already exists
+ in the keyring, this will try to update it with the given payload, or it
+ will return error EEXIST if that function is not supported by the key
+ type. The process must also have permission to write to the key to be
+ able to update it. The new key will have all user permissions granted and
+ no group or third party permissions.
+
+ Otherwise, this will attempt to create a new key of the specified type
+ and description, and to instantiate it with the supplied payload and
+ attach it to the keyring. In this case, an error will be generated if the
+ process does not have permission to write to the keyring.
+
+ The payload is optional, and the pointer can be NULL if not required by
+ the type. The payload is plen in size, and plen can be zero for an empty
+ payload.
+
+ A new keyring can be generated by setting type "keyring", the keyring
+ name as the description (or NULL) and setting the payload to NULL.
+
+ User defined keys can be created by specifying type "user". It is
+ recommended that a user defined key's description by prefixed with a type
+ ID and a colon, such as "krb5tgt:" for a Kerberos 5 ticket granting
+ ticket.
+
+ Any other type must have been registered with the kernel in advance by a
+ kernel service such as a filesystem.
+
+ The ID of the new or updated key is returned if successful.
+
+
+ (*) Search the process's keyrings for a key, potentially calling out to
+ userspace to create it.
+
+ key_serial_t request_key(const char *type, const char *description,
+ const char *callout_info,
+ key_serial_t dest_keyring);
+
+ This function searches all the process's keyrings in the order thread,
+ process, session for a matching key. This works very much like
+ KEYCTL_SEARCH, including the optional attachment of the discovered key to
+ a keyring.
+
+ If a key cannot be found, and if callout_info is not NULL, then
+ /sbin/request-key will be invoked in an attempt to obtain a key. The
+ callout_info string will be passed as an argument to the program.
+
+
+The keyctl syscall functions are:
+
+ (*) Map a special key ID to a real key ID for this process:
+
+ key_serial_t keyctl(KEYCTL_GET_KEYRING_ID, key_serial_t id,
+ int create);
+
+ The special key specified by "id" is looked up (with the key being
+ created if necessary) and the ID of the key or keyring thus found is
+ returned if it exists.
+
+ If the key does not yet exist, the key will be created if "create" is
+ non-zero; and the error ENOKEY will be returned if "create" is zero.
+
+
+ (*) Replace the session keyring this process subscribes to with a new one:
+
+ key_serial_t keyctl(KEYCTL_JOIN_SESSION_KEYRING, const char *name);
+
+ If name is NULL, an anonymous keyring is created attached to the process
+ as its session keyring, displacing the old session keyring.
+
+ If name is not NULL, if a keyring of that name exists, the process
+ attempts to attach it as the session keyring, returning an error if that
+ is not permitted; otherwise a new keyring of that name is created and
+ attached as the session keyring.
+
+ To attach to a named keyring, the keyring must have search permission for
+ the process's ownership.
+
+ The ID of the new session keyring is returned if successful.
+
+
+ (*) Update the specified key:
+
+ long keyctl(KEYCTL_UPDATE, key_serial_t key, const void *payload,
+ size_t plen);
+
+ This will try to update the specified key with the given payload, or it
+ will return error EOPNOTSUPP if that function is not supported by the key
+ type. The process must also have permission to write to the key to be
+ able to update it.
+
+ The payload is of length plen, and may be absent or empty as for
+ add_key().
+
+
+ (*) Revoke a key:
+
+ long keyctl(KEYCTL_REVOKE, key_serial_t key);
+
+ This makes a key unavailable for further operations. Further attempts to
+ use the key will be met with error EKEYREVOKED, and the key will no longer
+ be findable.
+
+
+ (*) Change the ownership of a key:
+
+ long keyctl(KEYCTL_CHOWN, key_serial_t key, uid_t uid, gid_t gid);
+
+ This function permits a key's owner and group ID to be changed. Either
+ one of uid or gid can be set to -1 to suppress that change.
+
+ Only the superuser can change a key's owner to something other than the
+ key's current owner. Similarly, only the superuser can change a key's
+ group ID to something other than the calling process's group ID or one of
+ its group list members.
+
+
+ (*) Change the permissions mask on a key:
+
+ long keyctl(KEYCTL_SETPERM, key_serial_t key, key_perm_t perm);
+
+ This function permits the owner of a key or the superuser to change the
+ permissions mask on a key.
+
+ Only bits the available bits are permitted; if any other bits are set,
+ error EINVAL will be returned.
+
+
+ (*) Describe a key:
+
+ long keyctl(KEYCTL_DESCRIBE, key_serial_t key, char *buffer,
+ size_t buflen);
+
+ This function returns a summary of the key's attributes (but not its
+ payload data) as a string in the buffer provided.
+
+ Unless there's an error, it always returns the amount of data it could
+ produce, even if that's too big for the buffer, but it won't copy more
+ than requested to userspace. If the buffer pointer is NULL then no copy
+ will take place.
+
+ A process must have view permission on the key for this function to be
+ successful.
+
+ If successful, a string is placed in the buffer in the following format:
+
+ <type>;<uid>;<gid>;<perm>;<description>
+
+ Where type and description are strings, uid and gid are decimal, and perm
+ is hexadecimal. A NUL character is included at the end of the string if
+ the buffer is sufficiently big.
+
+ This can be parsed with
+
+ sscanf(buffer, "%[^;];%d;%d;%o;%s", type, &uid, &gid, &mode, desc);
+
+
+ (*) Clear out a keyring:
+
+ long keyctl(KEYCTL_CLEAR, key_serial_t keyring);
+
+ This function clears the list of keys attached to a keyring. The calling
+ process must have write permission on the keyring, and it must be a
+ keyring (or else error ENOTDIR will result).
+
+
+ (*) Link a key into a keyring:
+
+ long keyctl(KEYCTL_LINK, key_serial_t keyring, key_serial_t key);
+
+ This function creates a link from the keyring to the key. The process
+ must have write permission on the keyring and must have link permission
+ on the key.
+
+ Should the keyring not be a keyring, error ENOTDIR will result; and if
+ the keyring is full, error ENFILE will result.
+
+ The link procedure checks the nesting of the keyrings, returning ELOOP if
+ it appears to deep or EDEADLK if the link would introduce a cycle.
+
+
+ (*) Unlink a key or keyring from another keyring:
+
+ long keyctl(KEYCTL_UNLINK, key_serial_t keyring, key_serial_t key);
+
+ This function looks through the keyring for the first link to the
+ specified key, and removes it if found. Subsequent links to that key are
+ ignored. The process must have write permission on the keyring.
+
+ If the keyring is not a keyring, error ENOTDIR will result; and if the
+ key is not present, error ENOENT will be the result.
+
+
+ (*) Search a keyring tree for a key:
+
+ key_serial_t keyctl(KEYCTL_SEARCH, key_serial_t keyring,
+ const char *type, const char *description,
+ key_serial_t dest_keyring);
+
+ This searches the keyring tree headed by the specified keyring until a
+ key is found that matches the type and description criteria. Each keyring
+ is checked for keys before recursion into its children occurs.
+
+ The process must have search permission on the top level keyring, or else
+ error EACCES will result. Only keyrings that the process has search
+ permission on will be recursed into, and only keys and keyrings for which
+ a process has search permission can be matched. If the specified keyring
+ is not a keyring, ENOTDIR will result.
+
+ If the search succeeds, the function will attempt to link the found key
+ into the destination keyring if one is supplied (non-zero ID). All the
+ constraints applicable to KEYCTL_LINK apply in this case too.
+
+ Error ENOKEY, EKEYREVOKED or EKEYEXPIRED will be returned if the search
+ fails. On success, the resulting key ID will be returned.
+
+
+ (*) Read the payload data from a key:
+
+ key_serial_t keyctl(KEYCTL_READ, key_serial_t keyring, char *buffer,
+ size_t buflen);
+
+ This function attempts to read the payload data from the specified key
+ into the buffer. The process must have read permission on the key to
+ succeed.
+
+ The returned data will be processed for presentation by the key type. For
+ instance, a keyring will return an array of key_serial_t entries
+ representing the IDs of all the keys to which it is subscribed. The user
+ defined key type will return its data as is. If a key type does not
+ implement this function, error EOPNOTSUPP will result.
+
+ As much of the data as can be fitted into the buffer will be copied to
+ userspace if the buffer pointer is not NULL.
+
+ On a successful return, the function will always return the amount of
+ data available rather than the amount copied.
+
+
+ (*) Instantiate a partially constructed key.
+
+ key_serial_t keyctl(KEYCTL_INSTANTIATE, key_serial_t key,
+ const void *payload, size_t plen,
+ key_serial_t keyring);
+
+ If the kernel calls back to userspace to complete the instantiation of a
+ key, userspace should use this call to supply data for the key before the
+ invoked process returns, or else the key will be marked negative
+ automatically.
+
+ The process must have write access on the key to be able to instantiate
+ it, and the key must be uninstantiated.
+
+ If a keyring is specified (non-zero), the key will also be linked into
+ that keyring, however all the constraints applying in KEYCTL_LINK apply
+ in this case too.
+
+ The payload and plen arguments describe the payload data as for add_key().
+
+
+ (*) Negatively instantiate a partially constructed key.
+
+ key_serial_t keyctl(KEYCTL_NEGATE, key_serial_t key,
+ unsigned timeout, key_serial_t keyring);
+
+ If the kernel calls back to userspace to complete the instantiation of a
+ key, userspace should use this call mark the key as negative before the
+ invoked process returns if it is unable to fulfil the request.
+
+ The process must have write access on the key to be able to instantiate
+ it, and the key must be uninstantiated.
+
+ If a keyring is specified (non-zero), the key will also be linked into
+ that keyring, however all the constraints applying in KEYCTL_LINK apply
+ in this case too.
+
+
+===============
+KERNEL SERVICES
+===============
+
+The kernel services for key managment are fairly simple to deal with. They can
+be broken down into two areas: keys and key types.
+
+Dealing with keys is fairly straightforward. Firstly, the kernel service
+registers its type, then it searches for a key of that type. It should retain
+the key as long as it has need of it, and then it should release it. For a
+filesystem or device file, a search would probably be performed during the
+open call, and the key released upon close. How to deal with conflicting keys
+due to two different users opening the same file is left to the filesystem
+author to solve.
+
+When accessing a key's payload data, the key->lock should be at least read
+locked, or else the data may be changed by update during the access.
+
+(*) To search for a key, call:
+
+ struct key *request_key(const struct key_type *type,
+ const char *description,
+ const char *callout_string);
+
+ This is used to request a key or keyring with a description that matches
+ the description specified according to the key type's match function. This
+ permits approximate matching to occur. If callout_string is not NULL, then
+ /sbin/request-key will be invoked in an attempt to obtain the key from
+ userspace. In that case, callout_string will be passed as an argument to
+ the program.
+
+ Should the function fail error ENOKEY, EKEYEXPIRED or EKEYREVOKED will be
+ returned.
+
+
+(*) When it is no longer required, the key should be released using:
+
+ void key_put(struct key *key);
+
+ This can be called from interrupt context. If CONFIG_KEYS is not set then
+ the argument will not be parsed.
+
+
+(*) Extra references can be made to a key by calling the following function:
+
+ struct key *key_get(struct key *key);
+
+ These need to be disposed of by calling key_put() when they've been
+ finished with. The key pointer passed in will be returned. If the pointer
+ is NULL or CONFIG_KEYS is not set then the key will not be dereferenced and
+ no increment will take place.
+
+
+(*) A key's serial number can be obtained by calling:
+
+ key_serial_t key_serial(struct key *key);
+
+ If key is NULL or if CONFIG_KEYS is not set then 0 will be returned (in the
+ latter case without parsing the argument).
+
+
+(*) If a keyring was found in the search, this can be further searched by:
+
+ struct key *keyring_search(struct key *keyring,
+ const struct key_type *type,
+ const char *description)
+
+ This searches the keyring tree specified for a matching key. Error ENOKEY
+ is returned upon failure. If successful, the returned key will need to be
+ released.
+
+
+(*) To check the validity of a key, this function can be called:
+
+ int validate_key(struct key *key);
+
+ This checks that the key in question hasn't expired or and hasn't been
+ revoked. Should the key be invalid, error EKEYEXPIRED or EKEYREVOKED will
+ be returned. If the key is NULL or if CONFIG_KEYS is not set then 0 will be
+ returned (in the latter case without parsing the argument).
+
+
+(*) To register a key type, the following function should be called:
+
+ int register_key_type(struct key_type *type);
+
+ This will return error EEXIST if a type of the same name is already
+ present.
+
+
+(*) To unregister a key type, call:
+
+ void unregister_key_type(struct key_type *type);
+
+
+===================
+DEFINING A KEY TYPE
+===================
+
+A kernel service may want to define its own key type. For instance, an AFS
+filesystem might want to define a Kerberos 5 ticket key type. To do this, it
+author fills in a struct key_type and registers it with the system.
+
+The structure has a number of fields, some of which are mandatory:
+
+ (*) const char *name
+
+ The name of the key type. This is used to translate a key type name
+ supplied by userspace into a pointer to the structure.
+
+
+ (*) size_t def_datalen
+
+ This is optional - it supplies the default payload data length as
+ contributed to the quota. If the key type's payload is always or almost
+ always the same size, then this is a more efficient way to do things.
+
+ The data length (and quota) on a particular key can always be changed
+ during instantiation or update by calling:
+
+ int key_payload_reserve(struct key *key, size_t datalen);
+
+ With the revised data length. Error EDQUOT will be returned if this is
+ not viable.
+
+
+ (*) int (*instantiate)(struct key *key, const void *data, size_t datalen);
+
+ This method is called to attach a payload to a key during
+ construction. The payload attached need not bear any relation to the data
+ passed to this function.
+
+ If the amount of data attached to the key differs from the size in
+ keytype->def_datalen, then key_payload_reserve() should be called.
+
+
+ (*) int (*duplicate)(struct key *key, const struct key *source);
+
+ If this type of key can be duplicated, then this method should be
+ provided. It is called to copy the payload attached to the source into
+ the new key. The data length on the new key will have been updated and
+ the quota adjusted already.
+
+ The source key will be locked against change on the source->sem, so it is
+ safe to sleep here.
+
+
+ (*) int (*update)(struct key *key, const void *data, size_t datalen);
+
+ If this type of key can be updated, then this method should be
+ provided. It is called to update a key's payload from the blob of data
+ provided.
+
+ key_payload_reserve() should be called if the data length might change
+ before any changes are actually made. Note that if this succeeds, the
+ type is committed to changing the key because it's already been altered,
+ so all memory allocation must be done first.
+
+ The key will be locked against other changers on key->sem, so it is safe
+ to sleep here.
+
+ key_payload_reserve() should be called with the key->lock write locked,
+ and the changes to the key's attached payload should be made before the
+ key is locked.
+
+
+ (*) int (*match)(const struct key *key, const void *desc);
+
+ This method is called to match a key against a description. It should
+ return non-zero if the two match, zero if they don't.
+
+
+ (*) void (*destroy)(struct key *key);
+
+ This method is optional. It is called to discard the payload data on a
+ key when it is being destroyed.
+
+
+ (*) void (*describe)(const struct key *key, struct seq_file *p);
+
+ This method is optional. It is called during /proc/keys reading to
+ summarise a key in text form.
+
+
+ (*) long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+
+ This method is optional. It is called by KEYCTL_READ to translate the
+ key's payload into something a blob of data for userspace to deal
+ with. Ideally, the blob should be in the same format as that passed in to
+ the instantiate and update methods.
+
+ If successful, the blob size that could be produced should be returned
+ rather than the size copied.
+
+
+============================
+REQUEST-KEY CALLBACK SERVICE
+============================
+
+To create a new key, the kernel will attempt to execute the following command
+line:
+
+ /sbin/request-key create <key> <uid> <gid> \
+ <threadring> <processring> <sessionring> <callout_info>
+
+<key> is the key being constructed, and the three keyrings are the process
+keyrings from the process that caused the search to be issued. These are
+included for two reasons:
+
+ (1) There may be an authentication token in one of the keyrings that is
+ required to obtain the key, eg: a Kerberos Ticket-Granting Ticket.
+
+ (2) The new key should probably be cached in one of these rings.
+
+This program should set it UID and GID to those specified before attempting to
+access any more keys. It may then look around for a user specific process to
+hand the request off to (perhaps a path held in placed in another key by, for
+example, the KDE desktop manager).
+
+The program (or whatever it calls) should finish construction of the key by
+calling KEYCTL_INSTANTIATE, which also permits it to cache the key in one of
+the keyrings (probably the session ring) before returning. Alternatively, the
+key can be marked as negative with KEYCTL_NEGATE; this also permits the key to
+be cached in one of the keyrings.
+
+If it returns with the key remaining in the unconstructed state, the key will
+be marked as being negative, it will be added to the session keyring, and an
+error will be returned to the key requestor.
+
+Supplementary information may be provided from whoever or whatever invoked
+this service. This will be passed as the <callout_info> parameter. If no such
+information was made available, then "-" will be passed as this parameter
+instead.
+
+
+Similarly, the kernel may attempt to update an expired or a soon to expire key
+by executing:
+
+ /sbin/request-key update <key> <uid> <gid> \
+ <threadring> <processring> <sessionring>
+
+In this case, the program isn't required to actually attach the key to a ring;
+the rings are provided for reference.
diff --git a/Documentation/usb/sn9c102.txt b/Documentation/usb/sn9c102.txt
index 3999069b94bc..40c2f73a707f 100644
--- a/Documentation/usb/sn9c102.txt
+++ b/Documentation/usb/sn9c102.txt
@@ -1,7 +1,7 @@
- SN9C10[12] PC Camera Controllers
+ SN9C10x PC Camera Controllers
Driver for Linux
- ================================
+ =============================
- Documentation -
@@ -49,22 +49,23 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3. Overview
===========
-This driver attempts to support the video streaming capabilities of the devices
-mounting the SONiX SN9C101 or SONiX SN9C102 PC Camera Controllers.
+This driver attempts to support the video and audio streaming capabilities of
+the devices mounting the SONiX SN9C101, SN9C102 and SN9C103 (or SUI-102) PC
+Camera Controllers.
- It's worth to note that SONiX has never collaborated with me during the
-development of this project, despite of several requests for enough detailed
+development of this project, despite several requests for enough detailed
specifications of the register tables, compression engine and video data format
of the above chips -
Up to 64 cameras can be handled at the same time. They can be connected and
disconnected from the host many times without turning off the computer, if
-your system supports the hotplug facility.
+your system supports hotplugging.
The driver relies on the Video4Linux2 and USB core modules. It has been
designed to run properly on SMP systems as well.
-The latest version of the SN9C10[12] driver can be found at the following URL:
+The latest version of the SN9C10x driver can be found at the following URL:
http://go.lamarinapunto.com/
@@ -122,12 +123,12 @@ analyze kernel messages and verify that the loading process has gone well:
Module parameters are listed below:
-------------------------------------------------------------------------------
Name: video_nr
-Type: int array (min = 0, max = 32)
+Type: int array (min = 0, max = 64)
Syntax: <-1|n[,...]>
Description: Specify V4L2 minor mode number:
-1 = use next available
n = use minor number n
- You can specify up to 32 cameras this way.
+ You can specify up to 64 cameras this way.
For example:
video_nr=-1,2,-1 would assign minor number 2 to the second
recognized camera and use auto for the first one and for every
@@ -150,17 +151,20 @@ Default: 2
7. Optional device control through "sysfs"
==========================================
-It is possible to read and write both the SN9C10[12] and the image sensor
+It is possible to read and write both the SN9C10x and the image sensor
registers by using the "sysfs" filesystem interface.
-Every time a supported device is recognized, a read-only file named "green" is
+Every time a supported device is recognized, a write-only file named "green" is
created in the /sys/class/video4linux/videoX directory. You can set the green
channel's gain by writing the desired value to it. The value may range from 0
-to 15.
+to 15 for SN9C101 or SN9C102 bridges, from 0 to 127 for SN9C103 bridges.
+Similarly, only for SN9C103 controllers, blue and red gain control files are
+available in the same directory, for which accepted values may range from 0 to
+127.
There are other four entries in the directory above for each registered camera:
"reg", "val", "i2c_reg" and "i2c_val". The first two files control the
-SN9C10[12] bridge, while the other two control the sensor chip. "reg" and
+SN9C10x bridge, while the other two control the sensor chip. "reg" and
"i2c_reg" hold the values of the current register index where the following
reading/writing operations are addressed at through "val" and "i2c_val". Their
use is not intended for end-users, unless you know what you are doing. Note
@@ -169,19 +173,21 @@ support the standard I2C protocol. Also, remember that you must be logged in as
root before writing to them.
As an example, suppose we were to want to read the value contained in the
-register number 1 of the sensor register table - which usually is the product
+register number 1 of the sensor register table - which is usually the product
identifier - of the camera registered as "/dev/video0":
[root@localhost #] cd /sys/class/video4linux/video0
[root@localhost #] echo 1 > i2c_reg
[root@localhost #] cat i2c_val
-Now let's set the green gain's register of the SN9C10[12] chip to 2:
+Note that "cat" will fail if sensor registers cannot be read.
+
+Now let's set the green gain's register of the SN9C101 or SN9C102 chips to 2:
[root@localhost #] echo 0x11 > reg
[root@localhost #] echo 2 > val
-Note that the SN9C10[12] always returns 0 when some of its registers are read.
+Note that the SN9C10x always returns 0 when some of its registers are read.
To avoid race conditions, all the I/O accesses to the files are serialized.
@@ -192,25 +198,52 @@ here. They have never collaborated with me, so no advertising -
From the point of view of a driver, what unambiguously identify a device are
its vendor and product USB identifiers. Below is a list of known identifiers of
-devices mounting the SN9C10[12] PC camera controllers:
+devices mounting the SN9C10x PC camera controllers:
Vendor ID Product ID
--------- ----------
-0xc45 0x6001
-0xc45 0x6005
-0xc45 0x6009
-0xc45 0x600d
-0xc45 0x6024
-0xc45 0x6025
-0xc45 0x6028
-0xc45 0x6029
-0xc45 0x602a
-0xc45 0x602c
-0xc45 0x6030
+0x0c45 0x6001
+0x0c45 0x6005
+0x0c45 0x6009
+0x0c45 0x600d
+0x0c45 0x6024
+0x0c45 0x6025
+0x0c45 0x6028
+0x0c45 0x6029
+0x0c45 0x602a
+0x0c45 0x602b
+0x0c45 0x602c
+0x0c45 0x6030
+0x0c45 0x6080
+0x0c45 0x6082
+0x0c45 0x6083
+0x0c45 0x6088
+0x0c45 0x608a
+0x0c45 0x608b
+0x0c45 0x608c
+0x0c45 0x608e
+0x0c45 0x608f
+0x0c45 0x60a0
+0x0c45 0x60a2
+0x0c45 0x60a3
+0x0c45 0x60a8
+0x0c45 0x60aa
+0x0c45 0x60ab
+0x0c45 0x60ac
+0x0c45 0x60ae
+0x0c45 0x60af
+0x0c45 0x60b0
+0x0c45 0x60b2
+0x0c45 0x60b3
+0x0c45 0x60b8
+0x0c45 0x60ba
+0x0c45 0x60bb
+0x0c45 0x60bc
+0x0c45 0x60be
The list above does NOT imply that all those devices work with this driver: up
-until now only the ones that mount the following image sensors are supported.
-Kernel messages will always tell you whether this is the case:
+until now only the ones that mount the following image sensors are supported;
+kernel messages will always tell you whether this is the case:
Model Manufacturer
----- ------------
@@ -219,12 +252,15 @@ PAS202BCB PixArt Imaging Inc.
TAS5110C1B Taiwan Advanced Sensor Corporation
TAS5130D1B Taiwan Advanced Sensor Corporation
+All the available control settings of each image sensor are supported through
+the V4L2 interface.
+
If you think your camera is based on the above hardware and is not actually
listed in the above table, you may try to add the specific USB VendorID and
ProductID identifiers to the sn9c102_id_table[] in the file "sn9c102_sensor.h";
then compile, load the module again and look at the kernel output.
If this works, please send an email to me reporting the kernel messages, so
-that I will add a new entry in the list of supported devices.
+that I can add a new entry in the list of supported devices.
Donations of new models for further testing and support would be much
appreciated. I won't add official support for hardware that I don't actually
@@ -238,8 +274,8 @@ have created for this purpose, which is present in "sn9c102_sensor.h"
(documentation is included there). As an example, have a look at the code in
"sn9c102_pas106b.c", which uses the mentioned interface.
-At the moment, not yet supported image sensors are: HV7131[D|E1] (VGA),
-MI03 (VGA), OV7620 (VGA).
+At the moment, possible unsupported image sensors are: HV7131x series (VGA),
+MI03x series (VGA), OV7620 (VGA), OV7630 (VGA), CIS-VF10 (VGA).
10. Notes for V4L2 application developers
@@ -254,12 +290,13 @@ device to switch to the other I/O method;
- previously mapped buffer memory must always be unmapped before calling any
of the "VIDIOC_S_CROP", "VIDIOC_TRY_FMT" and "VIDIOC_S_FMT" ioctl's. The same
number of buffers as before will be allocated again to match the size of the
-new video frames, so you have to map them again before any I/O attempts.
+new video frames, so you have to map the buffers again before any I/O attempts
+on them.
Consistently with the hardware limits, this driver also supports image
downscaling with arbitrary scaling factors from 1, 2 and 4 in both directions.
-However the V4L2 API specifications don't correctly define how the scaling
-factor can be choosen arbitrarily by the "negotiation" of the "source" and
+However, the V4L2 API specifications don't correctly define how the scaling
+factor can be chosen arbitrarily by the "negotiation" of the "source" and
"target" rectangles. To work around this flaw, we have added the convention
that, during the negotiation, whenever the "VIDIOC_S_CROP" ioctl is issued, the
scaling factor is restored to 1.
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index 7f8570a7bdba..d12607071491 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -87,22 +87,8 @@ Non Executable Mappings
noexec=on|off
- on Enable
+ on Enable(default)
off Disable
- noforce (default) Don't enable by default for heap/stack/data,
- but allow PROT_EXEC to be effective
-
- noexec32=opt{,opt}
-
- Control the no exec default for 32bit processes.
- Requires noexec=on or noexec=noforce to be effective.
-
- Valid options:
- all,on Heap,stack,data is non executable.
- off (default) Heap,stack,data is executable
- stack Stack is non executable, heap/data is.
- force Don't imply PROT_EXEC for PROT_READ
- compat (default) Imply PROT_EXEC for PROT_READ
SMP
diff --git a/MAINTAINERS b/MAINTAINERS
index b2b5643bc88c..1d9a0960e67e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -947,7 +947,7 @@ S: Maintained
HPUSBSCSI
P: Oliver Neukum
-M: drivers@neukum.org
+M: oliver@neukum.name
S: Maintained
I2C AND SENSORS DRIVERS
@@ -1437,7 +1437,7 @@ S: Maintained
MICROTEK X6 SCANNER
P: Oliver Neukum
-M: drivers@neukum.org
+M: oliver@neukum.name
S: Maintained
MIPS
@@ -2206,8 +2206,8 @@ W: http://www.kernel.dk
S: Maintained
USB ACM DRIVER
-P: Vojtech Pavlik
-M: vojtech@suse.cz
+P: Oliver Neukum
+M: oliver@neukum.name
L: linux-usb-users@lists.sourceforge.net
L: linux-usb-devel@lists.sourceforge.net
S: Maintained
@@ -2250,7 +2250,7 @@ S: Maintained
USB KAWASAKI LSI DRIVER
P: Oliver Neukum
-M: drivers@neukum.org
+M: oliver@neukum.name
L: linux-usb-users@lists.sourceforge.net
L: linux-usb-devel@lists.sourceforge.net
S: Maintained
@@ -2367,7 +2367,7 @@ L: linux-usb-devel@lists.sourceforge.net
W: http://www.connecttech.com
S: Supported
-USB SN9C10[12] DRIVER
+USB SN9C10x DRIVER
P: Luca Risolia
M: luca.risolia@studio.unibo.it
L: linux-usb-devel@lists.sourceforge.net
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 5ff439b04bae..e74677115e2b 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -138,6 +138,9 @@ irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs)
while (nticks > 0) {
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
nticks--;
}
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 3043e6de75d9..2e16689ec4c6 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -343,6 +343,9 @@ void timer_tick(struct pt_regs *regs)
do_leds();
do_set_rtc();
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
}
void (*init_arch_time)(void);
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index e5e97fef86ea..a35ade06e0c9 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -17,8 +17,10 @@
#include <asm/arch/pxa-regs.h>
#include <linux/init.h>
#include <linux/pm.h>
+#include <linux/device.h>
#include <asm/hardware.h>
+#include <asm/irq.h>
#include "generic.h"
@@ -117,3 +119,45 @@ unsigned int get_lcdclk_frequency_10khz(void)
EXPORT_SYMBOL(get_clk_frequency_khz);
EXPORT_SYMBOL(get_memclk_frequency_10khz);
EXPORT_SYMBOL(get_lcdclk_frequency_10khz);
+
+
+/*
+ * device registration specific to PXA27x.
+ */
+
+static u64 pxa27x_dmamask = 0xffffffffUL;
+
+static struct resource pxa27x_ohci_resources[] = {
+ [0] = {
+ .start = 0x4C000000,
+ .end = 0x4C00ff6f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_USBH1,
+ .end = IRQ_USBH1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device ohci_device = {
+ .name = "pxa27x-ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &pxa27x_dmamask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(pxa27x_ohci_resources),
+ .resource = pxa27x_ohci_resources,
+};
+
+static struct platform_device *devices[] __initdata = {
+ &ohci_device,
+};
+
+static int __init pxa27x_init(void)
+{
+ return platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+subsys_initcall(pxa27x_init);
diff --git a/arch/arm26/kernel/time.c b/arch/arm26/kernel/time.c
index dbbc21404727..a7a18c4b21ac 100644
--- a/arch/arm26/kernel/time.c
+++ b/arch/arm26/kernel/time.c
@@ -166,6 +166,9 @@ EXPORT_SYMBOL(do_settimeofday);
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
do_set_rtc(); //FIME - EVERY timer IRQ?
profile_tick(CPU_PROFILING, regs);
return IRQ_HANDLED; //FIXME - is this right?
diff --git a/arch/arm26/machine/dma.c b/arch/arm26/machine/dma.c
index 44090c40cab6..0d8ea1992baa 100644
--- a/arch/arm26/machine/dma.c
+++ b/arch/arm26/machine/dma.c
@@ -47,7 +47,7 @@ static void arc_floppy_data_enable_dma(dmach_t channel, dma_t *dma)
&fdc1772_dma_read_end - &fdc1772_dma_read);
fdc1772_setupdma(dma->buf.length, dma->buf.__address); /* Sets data pointer up */
enable_fiq(FIQ_FLOPPYDATA);
- loacl_irq_restore(flags);
+ local_irq_restore(flags);
}
break;
diff --git a/arch/cris/arch-v10/kernel/fasttimer.c b/arch/cris/arch-v10/kernel/fasttimer.c
index 53b94eb3e1af..7dac98958805 100644
--- a/arch/cris/arch-v10/kernel/fasttimer.c
+++ b/arch/cris/arch-v10/kernel/fasttimer.c
@@ -102,7 +102,6 @@
#include <asm/rtc.h>
#include <linux/config.h>
-#include <linux/version.h>
#include <asm/arch/svinto.h>
#include <asm/fasttimer.h>
@@ -599,23 +598,8 @@ void schedule_usleep(unsigned long us)
#ifdef CONFIG_PROC_FS
static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- ,int *eof, void *data_unused
-#else
- ,int unused
-#endif
- );
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
+ ,int *eof, void *data_unused);
static struct proc_dir_entry *fasttimer_proc_entry;
-#else
-static struct proc_dir_entry fasttimer_proc_entry =
-{
- 0, 9, "fasttimer",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL /* ops -- default to array */,
- &proc_fasttimer_read /* get_info */,
-};
-#endif
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_PROC_FS
@@ -624,12 +608,7 @@ static struct proc_dir_entry fasttimer_proc_entry =
#define BIG_BUF_SIZE (500 + NUM_TIMER_STATS * 300)
static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- ,int *eof, void *data_unused
-#else
- ,int unused
-#endif
- )
+ ,int *eof, void *data_unused)
{
unsigned long flags;
int i = 0;
@@ -805,9 +784,7 @@ static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len
memcpy(buf, bigbuf + offset, len);
*start = buf;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
*eof = 1;
-#endif
return len;
}
@@ -982,12 +959,8 @@ void fast_timer_init(void)
}
#endif
#ifdef CONFIG_PROC_FS
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
if ((fasttimer_proc_entry = create_proc_entry( "fasttimer", 0, 0 )))
fasttimer_proc_entry->read_proc = proc_fasttimer_read;
-#else
- proc_register_dynamic(&proc_root, &fasttimer_proc_entry);
-#endif
#endif /* PROC_FS */
if(request_irq(TIMER1_IRQ_NBR, timer1_handler, SA_SHIRQ,
"fast timer int", NULL))
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 298e86a01c01..89e4efb9984a 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -227,6 +227,9 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* call the real timer interrupt handler */
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
/*
* If we have an externally synchronized Linux clock, then update
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 0b293d652bf4..8a600218334d 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -46,6 +46,9 @@ static void timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
platform_timer_eoi();
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
profile_tick(CPU_PROFILING, regs);
}
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S
index a6e043c46e66..925d3f5a3824 100644
--- a/arch/i386/boot/video.S
+++ b/arch/i386/boot/video.S
@@ -1936,7 +1936,7 @@ store_edid:
movl $0x13131313, %eax # memset block with 0x13
movw $32, %cx
- movw $0x440, %di
+ movw $0x140, %di
cld
rep
stosl
@@ -1945,7 +1945,7 @@ store_edid:
movw $0x01, %bx
movw $0x00, %cx
movw $0x01, %dx
- movw $0x440, %di
+ movw $0x140, %di
int $0x10
popw %di # restore all registers
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 8a0536250dd3..4d60d9b9c3ac 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -450,6 +450,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
{
unsigned int irq;
+ unsigned int plat_gsi = gsi;
#ifdef CONFIG_PCI
/*
@@ -471,10 +472,10 @@ unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
#ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
- mp_register_gsi(gsi, edge_level, active_high_low);
+ plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
}
#endif
- acpi_gsi_to_irq(gsi, &irq);
+ acpi_gsi_to_irq(plat_gsi, &irq);
return irq;
}
EXPORT_SYMBOL(acpi_register_gsi);
diff --git a/arch/i386/kernel/cpu/mcheck/k7.c b/arch/i386/kernel/cpu/mcheck/k7.c
index 304bf420f0f1..dcf9f068a5ed 100644
--- a/arch/i386/kernel/cpu/mcheck/k7.c
+++ b/arch/i386/kernel/cpu/mcheck/k7.c
@@ -54,6 +54,7 @@ static asmlinkage void k7_machine_check(struct pt_regs * regs, long error_code)
wrmsr (MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
/* Serialize */
wmb();
+ add_taint(TAINT_MACHINE_CHECK);
}
}
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index a1664bb1577e..7864ddfccf07 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -48,6 +48,7 @@ static void mce_checkregs (void *info)
/* Serialize */
wmb();
+ add_taint(TAINT_MACHINE_CHECK);
}
}
}
diff --git a/arch/i386/kernel/cpu/mcheck/p4.c b/arch/i386/kernel/cpu/mcheck/p4.c
index b31fc006f1bf..51175846dd29 100644
--- a/arch/i386/kernel/cpu/mcheck/p4.c
+++ b/arch/i386/kernel/cpu/mcheck/p4.c
@@ -40,6 +40,7 @@ static void unexpected_thermal_interrupt(struct pt_regs *regs)
{
printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
smp_processor_id());
+ add_taint(TAINT_MACHINE_CHECK);
}
/* P4/Xeon Thermal transition interrupt handler */
@@ -60,6 +61,7 @@ static void intel_thermal_interrupt(struct pt_regs *regs)
printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu);
printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n",
cpu);
+ add_taint(TAINT_MACHINE_CHECK);
} else {
printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
}
@@ -222,6 +224,7 @@ static asmlinkage void intel_machine_check(struct pt_regs * regs, long error_cod
wrmsr(msr, 0UL, 0UL);
/* Serialize */
wmb();
+ add_taint(TAINT_MACHINE_CHECK);
}
}
mcgstl &= ~(1<<2);
diff --git a/arch/i386/kernel/cpu/mcheck/p5.c b/arch/i386/kernel/cpu/mcheck/p5.c
index c92cd661c5ef..e62b1f888a4e 100644
--- a/arch/i386/kernel/cpu/mcheck/p5.c
+++ b/arch/i386/kernel/cpu/mcheck/p5.c
@@ -25,6 +25,7 @@ static asmlinkage void pentium_machine_check(struct pt_regs * regs, long error_c
printk(KERN_EMERG "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype);
if(lotype&(1<<5))
printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id());
+ add_taint(TAINT_MACHINE_CHECK);
}
/* Set up machine check reporting for processors with Intel style MCE */
diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c
index d63e9578c96c..c8778643b666 100644
--- a/arch/i386/kernel/cpu/mcheck/p6.c
+++ b/arch/i386/kernel/cpu/mcheck/p6.c
@@ -72,6 +72,7 @@ static asmlinkage void intel_machine_check(struct pt_regs * regs, long error_cod
wrmsr (msr, 0UL, 0UL);
/* Serialize */
wmb();
+ add_taint(TAINT_MACHINE_CHECK);
}
}
mcgstl &= ~(1<<2);
diff --git a/arch/i386/kernel/cpu/mcheck/winchip.c b/arch/i386/kernel/cpu/mcheck/winchip.c
index ddb579da43dd..64090175f4f5 100644
--- a/arch/i386/kernel/cpu/mcheck/winchip.c
+++ b/arch/i386/kernel/cpu/mcheck/winchip.c
@@ -19,6 +19,7 @@
static asmlinkage void winchip_machine_check(struct pt_regs * regs, long error_code)
{
printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
+ add_taint(TAINT_MACHINE_CHECK);
}
/* Set up machine check reporting on the Winchip C6 series */
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index b158ceb8b126..c6a2e244a073 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -867,6 +867,9 @@ ENTRY(sys_call_table)
.long sys_mq_getsetattr
.long sys_ni_syscall /* reserved for kexec */
.long sys_waitid
- .long sys_setaltroot
+ .long sys_setaltroot /* 285 */
+ .long sys_add_key
+ .long sys_request_key
+ .long sys_keyctl
syscall_table_size=(.-sys_call_table)
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index aca20edc5ea1..4de7538043f5 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -69,7 +69,8 @@
* Thanks to Stuart Swales for pointing out this bug.
*/
-
+//#define DEBUG /* pr_debug */
+#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -88,12 +89,6 @@ MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
MODULE_LICENSE("GPL");
#define MICROCODE_VERSION "1.14"
-#define MICRO_DEBUG 0
-#if MICRO_DEBUG
-#define dprintk(x...) printk(KERN_INFO x)
-#else
-#define dprintk(x...)
-#endif
#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
@@ -172,7 +167,7 @@ static void collect_cpu_info (void *unused)
__asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
/* get the current revision from MSR 0x8B */
rdmsr(MSR_IA32_UCODE_REV, val[0], uci->rev);
- dprintk("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
+ pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
uci->sig, uci->pf, uci->rev);
}
@@ -180,22 +175,22 @@ static inline void mark_microcode_update (int cpu_num, microcode_header_t *mc_he
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
- dprintk("Microcode Found.\n");
- dprintk(" Header Revision 0x%x\n", mc_header->hdrver);
- dprintk(" Loader Revision 0x%x\n", mc_header->ldrver);
- dprintk(" Revision 0x%x \n", mc_header->rev);
- dprintk(" Date %x/%x/%x\n",
+ pr_debug("Microcode Found.\n");
+ pr_debug(" Header Revision 0x%x\n", mc_header->hdrver);
+ pr_debug(" Loader Revision 0x%x\n", mc_header->ldrver);
+ pr_debug(" Revision 0x%x \n", mc_header->rev);
+ pr_debug(" Date %x/%x/%x\n",
((mc_header->date >> 24 ) & 0xff),
((mc_header->date >> 16 ) & 0xff),
(mc_header->date & 0xFFFF));
- dprintk(" Signature 0x%x\n", sig);
- dprintk(" Type 0x%x Family 0x%x Model 0x%x Stepping 0x%x\n",
+ pr_debug(" Signature 0x%x\n", sig);
+ pr_debug(" Type 0x%x Family 0x%x Model 0x%x Stepping 0x%x\n",
((sig >> 12) & 0x3),
((sig >> 8) & 0xf),
((sig >> 4) & 0xf),
((sig & 0xf)));
- dprintk(" Processor Flags 0x%x\n", pf);
- dprintk(" Checksum 0x%x\n", cksum);
+ pr_debug(" Processor Flags 0x%x\n", pf);
+ pr_debug(" Checksum 0x%x\n", cksum);
if (mc_header->rev < uci->rev) {
printk(KERN_ERR "microcode: CPU%d not 'upgrading' to earlier revision"
@@ -209,7 +204,7 @@ static inline void mark_microcode_update (int cpu_num, microcode_header_t *mc_he
goto out;
}
- dprintk("microcode: CPU%d found a matching microcode update with "
+ pr_debug("microcode: CPU%d found a matching microcode update with "
" revision 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev);
uci->cksum = cksum;
uci->pf = pf; /* keep the original mc pf for cksum calculation */
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 41cb82604f7f..8b6c7e8187af 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -1051,7 +1051,7 @@ void __init mp_config_acpi_legacy_irqs (void)
int (*platform_rename_gsi)(int ioapic, int gsi);
-void mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
+int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
{
int ioapic = -1;
int ioapic_pin = 0;
@@ -1060,13 +1060,13 @@ void mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
#ifdef CONFIG_ACPI_BUS
/* Don't set up the ACPI SCI because it's already set up */
if (acpi_fadt.sci_int == gsi)
- return;
+ return gsi;
#endif
ioapic = mp_find_ioapic(gsi);
if (ioapic < 0) {
printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
- return;
+ return gsi;
}
ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
@@ -1085,12 +1085,12 @@ void mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
printk(KERN_ERR "Invalid reference to IOAPIC pin "
"%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
ioapic_pin);
- return;
+ return gsi;
}
if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
- return;
+ return gsi;
}
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
@@ -1098,6 +1098,7 @@ void mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
+ return gsi;
}
#endif /*CONFIG_X86_IO_APIC && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)*/
diff --git a/arch/i386/kernel/numaq.c b/arch/i386/kernel/numaq.c
index ed41eebf09e8..38c762daba5a 100644
--- a/arch/i386/kernel/numaq.c
+++ b/arch/i386/kernel/numaq.c
@@ -28,6 +28,7 @@
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/module.h>
+#include <linux/nodemask.h>
#include <asm/numaq.h>
/* These are needed before the pgdat's are created */
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index e80cd8e417e0..bb55e0d5187e 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -28,6 +28,7 @@
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/acpi.h>
+#include <linux/nodemask.h>
#include <asm/srat.h>
/*
diff --git a/arch/i386/kernel/timers/common.c b/arch/i386/kernel/timers/common.c
index 7271b061556a..f7f90005e22e 100644
--- a/arch/i386/kernel/timers/common.c
+++ b/arch/i386/kernel/timers/common.c
@@ -5,6 +5,7 @@
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/errno.h>
+#include <linux/jiffies.h>
#include <asm/io.h>
#include <asm/timer.h>
diff --git a/arch/i386/mach-default/topology.c b/arch/i386/mach-default/topology.c
index c70547a7e81e..37bfa9144c8b 100644
--- a/arch/i386/mach-default/topology.c
+++ b/arch/i386/mach-default/topology.c
@@ -27,6 +27,7 @@
*/
#include <linux/init.h>
#include <linux/smp.h>
+#include <linux/nodemask.h>
#include <asm/cpu.h>
struct i386_cpu cpu_devices[NR_CPUS];
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index efdcb0da9ffd..7dace975bae6 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -28,6 +28,7 @@
#include <linux/mmzone.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
+#include <linux/nodemask.h>
#include <asm/e820.h>
#include <asm/setup.h>
#include <asm/mmzone.h>
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index 7e4b12121df2..5f6e3e4b66c9 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -247,6 +247,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
page = pmd_page(*pmd);
pmd_clear(pmd);
+ mm->nr_ptes--;
dec_page_state(nr_page_table_pages);
page_cache_release(page);
}
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 6e012ba55879..fb010358ab28 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -43,6 +43,7 @@
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/mmzone.h>
+#include <linux/nodemask.h>
#include <asm/io.h>
#include <asm/iosapic.h>
#include <asm/machvec.h>
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 853d8dae7c3d..992854fa4b4c 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -67,14 +67,8 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
profile_tick(CPU_PROFILING, regs);
while (1) {
-#ifdef CONFIG_SMP
- /*
- * For UP, this is done in do_timer(). Weird, but
- * fixing that would require updates to all
- * platforms.
- */
update_process_times(user_mode(regs));
-#endif
+
new_itm += local_cpu_data->itm_delta;
if (smp_processor_id() == TIME_KEEPER_ID) {
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index eb464cbb9319..72e9c16f34ed 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -16,6 +16,7 @@
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/efi.h>
+#include <linux/nodemask.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/meminit.h>
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 11d122980d3b..e47e19588525 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -45,6 +45,9 @@ static inline int set_rtc_mmss(unsigned long nowtime)
static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
{
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
profile_tick(CPU_PROFILING, regs);
#ifdef CONFIG_HEARTBEAT
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index bca65bd06b61..3cf200dc7e7d 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -85,6 +85,9 @@ static irqreturn_t sun3_int5(int irq, void *dev_id, struct pt_regs *fp)
intersil_clear();
#endif
do_timer(fp);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(fp));
+#endif
if(!(kstat_cpu(0).irqs[SYS_IRQS + irq] % 20))
sun3_leds(led_pattern[(kstat_cpu(0).irqs[SYS_IRQS+irq]%160)
/20]);
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index e328f2810b17..5c3ca671627c 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -57,6 +57,9 @@ static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
write_seqlock(&xtime_lock);
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
if (current->pid)
profile_tick(CPU_PROFILING, regs);
diff --git a/arch/mips/au1000/common/time.c b/arch/mips/au1000/common/time.c
index a72de97275f0..d9f4c1bb6ecb 100644
--- a/arch/mips/au1000/common/time.c
+++ b/arch/mips/au1000/common/time.c
@@ -99,6 +99,9 @@ void mips_timer_interrupt(struct pt_regs *regs)
kstat_this_cpu.irqs[irq]++;
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
r4k_cur += r4k_offset;
ack_r4ktimer(r4k_cur);
@@ -137,6 +140,9 @@ void counter0_irq(int irq, void *dev_id, struct pt_regs *regs)
while (time_elapsed > 0) {
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
time_elapsed -= MATCH20_INC;
last_match20 += MATCH20_INC;
jiffie_drift++;
@@ -153,6 +159,9 @@ void counter0_irq(int irq, void *dev_id, struct pt_regs *regs)
if (jiffie_drift >= 999) {
jiffie_drift -= 999;
do_timer(regs); /* increment jiffies by one */
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
}
}
diff --git a/arch/mips/au1000/db1x00/mirage_ts.c b/arch/mips/au1000/db1x00/mirage_ts.c
index 585861e80b64..58d41c29395e 100644
--- a/arch/mips/au1000/db1x00/mirage_ts.c
+++ b/arch/mips/au1000/db1x00/mirage_ts.c
@@ -68,7 +68,7 @@ int wm97xx_comodule_present = 1;
#define err(format, arg...) printk(KERN_ERR TS_NAME ": " format "\n" , ## arg)
#define info(format, arg...) printk(KERN_INFO TS_NAME ": " format "\n" , ## arg)
#define warn(format, arg...) printk(KERN_WARNING TS_NAME ": " format "\n" , ## arg)
-#define DPRINTK(format, arg...) printk(__FUNCTION__ ": " format "\n" , ## arg)
+#define DPRINTK(format, arg...) printk("%s: " format "\n", __FUNCTION__ , ## arg)
#define PEN_DOWN_IRQ AU1000_GPIO_7
diff --git a/arch/mips/baget/time.c b/arch/mips/baget/time.c
index ed82c62df3d6..e715cb6bc0fb 100644
--- a/arch/mips/baget/time.c
+++ b/arch/mips/baget/time.c
@@ -52,6 +52,9 @@ void static timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
if (timer_intr_valid()) {
sti();
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
}
}
diff --git a/arch/mips/galileo-boards/ev96100/time.c b/arch/mips/galileo-boards/ev96100/time.c
index 10189b95d56b..8cbe8426491a 100644
--- a/arch/mips/galileo-boards/ev96100/time.c
+++ b/arch/mips/galileo-boards/ev96100/time.c
@@ -73,6 +73,9 @@ void mips_timer_interrupt(struct pt_regs *regs)
do {
kstat_this_cpu.irqs[irq]++;
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
r4k_cur += r4k_offset;
ack_r4ktimer(r4k_cur);
diff --git a/arch/mips/gt64120/common/time.c b/arch/mips/gt64120/common/time.c
index 44f33caf298e..edcea9fbeda5 100644
--- a/arch/mips/gt64120/common/time.c
+++ b/arch/mips/gt64120/common/time.c
@@ -36,6 +36,9 @@ static void gt64120_irq(int irq, void *dev_id, struct pt_regs *regs)
handled = 1;
irq_src &= ~0x00000800;
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
}
GT_WRITE(GT_INTRCAUSE_OFS, 0);
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 915e5539e7f2..0a4bf6b3256d 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -418,10 +418,7 @@ void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
if (current->pid)
profile_tick(CPU_PROFILING, regs);
-#ifdef CONFIG_SMP
- /* in UP mode, update_process_times() is invoked by do_timer() */
update_process_times(user_mode(regs));
-#endif
}
/*
diff --git a/arch/mips/momentum/ocelot_g/gt-irq.c b/arch/mips/momentum/ocelot_g/gt-irq.c
index 93708965be11..cede65a40632 100644
--- a/arch/mips/momentum/ocelot_g/gt-irq.c
+++ b/arch/mips/momentum/ocelot_g/gt-irq.c
@@ -134,6 +134,9 @@ static irqreturn_t gt64240_p0int_irq(int irq, void *dev, struct pt_regs *regs)
/* handle the timer call */
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
}
if (irq_src) {
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index cc0419ad7407..9d384d13132d 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -112,9 +112,7 @@ again:
if (cpu == 0)
do_timer(regs);
-#ifdef CONFIG_SMP
update_process_times(user_mode(regs));
-#endif /* CONFIG_SMP */
/*
* If we have an externally synchronized Linux clock, then update
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 8f4ad0cf228d..6cf7407344ba 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -79,6 +79,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
while (nticks--) {
#ifdef CONFIG_SMP
smp_do_timer(regs);
+#else
+ update_process_times(user_mode(regs));
#endif
if (cpu == 0) {
write_seqlock(&xtime_lock);
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
index 60e7ef5ad9ab..8839f6f07dab 100644
--- a/arch/ppc/kernel/time.c
+++ b/arch/ppc/kernel/time.c
@@ -150,6 +150,9 @@ void timer_interrupt(struct pt_regs * regs)
write_seqlock(&xtime_lock);
tb_last_stamp = jiffy_stamp;
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
/*
* update the rtc when needed, this should be performed on the
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c
index cfc3c0b7b1c2..e7b94ada0e2d 100644
--- a/arch/ppc64/kernel/sysfs.c
+++ b/arch/ppc64/kernel/sysfs.c
@@ -6,6 +6,8 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/module.h>
+#include <linux/nodemask.h>
+
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/cputable.h>
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index ef44a3ec40dc..66fe6f6711b5 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -277,6 +277,9 @@ int timer_interrupt(struct pt_regs * regs)
write_seqlock(&xtime_lock);
tb_last_stamp = lpaca->next_jiffy_update_tb;
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
timer_sync_xtime( cur_tb );
timer_check_rtc();
write_sequnlock(&xtime_lock);
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index f9d47ac8f6d5..bf7022c94852 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -213,6 +213,7 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
}
page = pmd_page(*pmd);
pmd_clear(pmd);
+ mm->nr_ptes--;
dec_page_state(nr_page_table_pages);
pte_free_tlb(tlb, page);
}
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index 977140f78a37..43e8bc084996 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
+#include <linux/nodemask.h>
#include <asm/lmb.h>
#include <asm/machdep.h>
#include <asm/abs_addr.h>
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 8f279421646f..543e0d8817c6 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -233,8 +233,10 @@ void account_ticks(struct pt_regs *regs)
while (ticks--)
update_process_times(user_mode(regs));
#else
- while (ticks--)
+ while (ticks--) {
do_timer(regs);
+ update_process_times(user_mode(regs));
+ }
#endif
s390_do_profile(regs);
}
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 7bb5da460707..149803d26436 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -266,6 +266,9 @@ static long last_rtc_update;
static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
profile_tick(CPU_PROFILING, regs);
#ifdef CONFIG_HEARTBEAT
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
index b3738ed371d8..9d73104f6817 100644
--- a/arch/sh64/kernel/time.c
+++ b/arch/sh64/kernel/time.c
@@ -309,6 +309,9 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg
ctc_last_interrupt = (unsigned long) current_ctc;
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
profile_tick(CPU_PROFILING, regs);
#ifdef CONFIG_HEARTBEAT
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index c982473d1be0..918d3f23bdc8 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -716,6 +716,9 @@ static irqreturn_t pcic_timer_handler (int irq, void *h, struct pt_regs *regs)
write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
pcic_clear_clock_irq();
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
write_sequnlock(&xtime_lock);
return IRQ_HANDLED;
}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 60507d394868..6af5019658ba 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -134,6 +134,10 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
clear_clock_irq();
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
+
/* Determine when to update the Mostek clock. */
if ((time_status & STA_UNSYNC) == 0 &&
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index b50e16d49227..96d7cba29679 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -64,7 +64,16 @@ static unsigned long mstk48t59_regs = 0UL;
static int set_rtc_mmss(unsigned long);
-struct sparc64_tick_ops *tick_ops;
+static __init unsigned long dummy_get_tick(void)
+{
+ return 0;
+}
+
+static __initdata struct sparc64_tick_ops dummy_tick_ops = {
+ .get_tick = dummy_get_tick,
+};
+
+struct sparc64_tick_ops *tick_ops = &dummy_tick_ops;
#define TICK_PRIV_BIT (1UL << 63)
@@ -462,6 +471,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
do {
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(regs));
#endif
do_timer(regs);
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
index 5ce5668825fc..aee0f7f1aecc 100644
--- a/arch/um/kernel/time_kern.c
+++ b/arch/um/kernel/time_kern.c
@@ -169,11 +169,9 @@ void __const_udelay(um_udelay_t usecs)
void timer_handler(int sig, union uml_pt_regs *regs)
{
-#ifdef CONFIG_SMP
local_irq_disable();
update_process_times(user_context(UPT_SP(regs)));
local_irq_enable();
-#endif
if(current_thread->cpu == 0)
timer_irq(regs);
}
diff --git a/arch/v850/kernel/time.c b/arch/v850/kernel/time.c
index d0266de065dc..f722a268238a 100644
--- a/arch/v850/kernel/time.c
+++ b/arch/v850/kernel/time.c
@@ -56,6 +56,9 @@ static irqreturn_t timer_interrupt (int irq, void *dummy, struct pt_regs *regs)
mach_tick ();
do_timer (regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
profile_tick(CPU_PROFILING, regs);
#if 0
/*
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index b802b5f08aaa..b2ef6299812a 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -182,6 +182,7 @@ struct elf_prpsinfo
#define user user32
#define __ASM_X86_64_ELF_H 1
+#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
//#include <asm/ia32.h>
#include <linux/elf.h>
@@ -360,11 +361,11 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
mpnt->vm_end = IA32_STACK_TOP;
if (executable_stack == EXSTACK_ENABLE_X)
- mpnt->vm_flags = vm_stack_flags32 | VM_EXEC;
+ mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
else if (executable_stack == EXSTACK_DISABLE_X)
- mpnt->vm_flags = vm_stack_flags32 & ~VM_EXEC;
+ mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
else
- mpnt->vm_flags = vm_stack_flags32;
+ mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ?
PAGE_COPY_EXEC : PAGE_COPY;
insert_vm_struct(mm, mpnt);
@@ -390,9 +391,6 @@ elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int p
unsigned long map_addr;
struct task_struct *me = current;
- if (prot & PROT_READ)
- prot |= vm_force_exec32;
-
down_write(&me->mm->mmap_sem);
map_addr = do_mmap(filep, ELF_PAGESTART(addr),
eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot,
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index a854fb32963a..892555082bf9 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -218,9 +218,6 @@ sys32_mmap(struct mmap_arg_struct __user *arg)
return -EBADF;
}
- if (a.prot & PROT_READ)
- a.prot |= vm_force_exec32;
-
mm = current->mm;
down_write(&mm->mmap_sem);
retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, a.offset>>PAGE_SHIFT);
@@ -235,8 +232,6 @@ sys32_mmap(struct mmap_arg_struct __user *arg)
asmlinkage long
sys32_mprotect(unsigned long start, size_t len, unsigned long prot)
{
- if (prot & PROT_READ)
- prot |= vm_force_exec32;
return sys_mprotect(start,len,prot);
}
@@ -1044,9 +1039,6 @@ asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
return -EBADF;
}
- if (prot & PROT_READ)
- prot |= vm_force_exec32;
-
down_write(&mm->mmap_sem);
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
up_write(&mm->mmap_sem);
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index cf1fe8b7fa5b..c771dc8e3f60 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -191,6 +191,8 @@ void do_machine_check(struct pt_regs * regs, long error_code)
panicm = m;
panicm_found = 1;
}
+
+ tainted |= TAINT_MACHINE_CHECK;
}
/* Never do anything final in the polling timer */
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 646b3c0a9171..55e11e6b1f03 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -895,25 +895,25 @@ void __init mp_config_acpi_legacy_irqs (void)
return;
}
-void mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
+int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
{
int ioapic = -1;
int ioapic_pin = 0;
int idx, bit = 0;
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
- return;
+ return gsi;
#ifdef CONFIG_ACPI_BUS
/* Don't set up the ACPI SCI because it's already set up */
if (acpi_fadt.sci_int == gsi)
- return;
+ return gsi;
#endif
ioapic = mp_find_ioapic(gsi);
if (ioapic < 0) {
printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
- return;
+ return gsi;
}
ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
@@ -929,12 +929,12 @@ void mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
printk(KERN_ERR "Invalid reference to IOAPIC pin "
"%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
ioapic_pin);
- return;
+ return gsi;
}
if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
- return;
+ return gsi;
}
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
@@ -942,6 +942,7 @@ void mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
+ return gsi;
}
#endif /*CONFIG_X86_IO_APIC*/
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index b83680afe01a..b7f2bfa57c55 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -43,80 +43,27 @@ char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
unsigned long __supported_pte_mask = ~0UL;
static int do_not_nx __initdata = 0;
-unsigned long vm_stack_flags = __VM_STACK_FLAGS;
-unsigned long vm_stack_flags32 = __VM_STACK_FLAGS;
-unsigned long vm_data_default_flags = __VM_DATA_DEFAULT_FLAGS;
-unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS;
-unsigned long vm_force_exec32 = PROT_EXEC;
/* noexec=on|off
Control non executable mappings for 64bit processes.
-on Enable
+on Enable(default)
off Disable
-noforce (default) Don't enable by default for heap/stack/data,
- but allow PROT_EXEC to be effective
-
*/
static int __init nonx_setup(char *str)
{
if (!strcmp(str, "on")) {
__supported_pte_mask |= _PAGE_NX;
do_not_nx = 0;
- vm_data_default_flags &= ~VM_EXEC;
- vm_stack_flags &= ~VM_EXEC;
- } else if (!strcmp(str, "noforce") || !strcmp(str, "off")) {
- do_not_nx = (str[0] == 'o');
- if (do_not_nx)
- __supported_pte_mask &= ~_PAGE_NX;
- vm_data_default_flags |= VM_EXEC;
- vm_stack_flags |= VM_EXEC;
+ } else if (!strcmp(str, "off")) {
+ do_not_nx = 1;
+ __supported_pte_mask &= ~_PAGE_NX;
}
return 1;
}
__setup("noexec=", nonx_setup);
-/* noexec32=opt{,opt}
-
-Control the no exec default for 32bit processes. Can be also overwritten
-per executable using ELF header flags (e.g. needed for the X server)
-Requires noexec=on or noexec=noforce to be effective.
-
-Valid options:
- all,on Heap,stack,data is non executable.
- off (default) Heap,stack,data is executable
- stack Stack is non executable, heap/data is.
- force Don't imply PROT_EXEC for PROT_READ
- compat (default) Imply PROT_EXEC for PROT_READ
-
-*/
- static int __init nonx32_setup(char *s)
- {
- while (*s) {
- if (!strncmp(s, "all", 3) || !strncmp(s,"on",2)) {
- vm_data_default_flags32 &= ~VM_EXEC;
- vm_stack_flags32 &= ~VM_EXEC;
- } else if (!strncmp(s, "off",3)) {
- vm_data_default_flags32 |= VM_EXEC;
- vm_stack_flags32 |= VM_EXEC;
- } else if (!strncmp(s, "stack", 5)) {
- vm_data_default_flags32 |= VM_EXEC;
- vm_stack_flags32 &= ~VM_EXEC;
- } else if (!strncmp(s, "force",5)) {
- vm_force_exec32 = 0;
- } else if (!strncmp(s, "compat",5)) {
- vm_force_exec32 = PROT_EXEC;
- }
- s += strcspn(s, ",");
- if (*s == ',')
- ++s;
- }
- return 1;
-}
-
-__setup("noexec32=", nonx32_setup);
-
/*
* Great future plan:
* Declare PDA itself and support (irqstack,tss,pml4) as per cpu data.
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index ee941842d710..289b81dae486 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -427,6 +427,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
/*
* In the SMP case we use the local APIC timer interrupt to do the profiling,
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 13ec453e2756..330b0cd6438c 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/seqlock.h>
+#include <linux/jiffies.h>
#include <asm/vsyscall.h>
#include <asm/pgtable.h>
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index 0825e37fd9d8..96cf21236cf2 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/module.h>
+#include <linux/nodemask.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
#include <asm/types.h>
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index df85a94d3ba6..f136fc23f9cf 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -10,6 +10,8 @@
#include <linux/mmzone.h>
#include <linux/ctype.h>
#include <linux/module.h>
+#include <linux/nodemask.h>
+
#include <asm/e820.h>
#include <asm/proto.h>
#include <asm/dma.h>
@@ -151,9 +153,9 @@ void __init numa_init_array(void)
for (i = 0; i < MAXNODE; i++) {
if (node_online(i))
continue;
- rr = find_next_bit(node_online_map, MAX_NUMNODES, rr);
+ rr = next_node(rr, node_online_map);
if (rr == MAX_NUMNODES)
- rr = find_first_bit(node_online_map, MAX_NUMNODES);
+ rr = first_node(node_online_map);
node_data[i] = node_data[rr];
cpu_to_node[i] = rr;
rr++;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a1d50242b8cd..6a43c807497d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -356,6 +356,39 @@ config LBD
your machine, or if you want to have a raid or loopback device
bigger than 2TB. Otherwise say N.
+config CDROM_PKTCDVD
+ tristate "Packet writing on CD/DVD media"
+ help
+ If you have a CDROM drive that supports packet writing, say Y to
+ include preliminary support. It should work with any MMC/Mt Fuji
+ compliant ATAPI or SCSI drive, which is just about any newer CD
+ writer.
+
+ Currently only writing to CD-RW, DVD-RW and DVD+RW discs is possible.
+ DVD-RW disks must be in restricted overwrite mode.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pktcdvd.
+
+config CDROM_PKTCDVD_BUFFERS
+ int "Free buffers for data gathering"
+ depends on CDROM_PKTCDVD
+ default "8"
+ help
+ This controls the maximum number of active concurrent packets. More
+ concurrent packets can increase write performance, but also require
+ more memory. Each concurrent packet will require approximately 64Kb
+ of non-swappable kernel memory, memory which will be allocated at
+ pktsetup time.
+
+config CDROM_PKTCDVD_WCACHE
+ bool "Enable write caching"
+ depends on CDROM_PKTCDVD
+ help
+ If enabled, write caching will be set for the CD-R/W device. For now
+ this option is dangerous unless the CD-RW media is known good, as we
+ don't do deferred write error handling yet.
+
source "drivers/s390/block/Kconfig"
endmenu
diff --git a/drivers/block/Kconfig.iosched b/drivers/block/Kconfig.iosched
index d938c5fd130b..e0ba6c93717e 100644
--- a/drivers/block/Kconfig.iosched
+++ b/drivers/block/Kconfig.iosched
@@ -1,5 +1,5 @@
config IOSCHED_NOOP
- bool "No-op I/O scheduler" if EMBEDDED
+ bool
default y
---help---
The no-op I/O scheduler is a minimal scheduler that does basic merging
@@ -9,7 +9,7 @@ config IOSCHED_NOOP
the kernel.
config IOSCHED_AS
- bool "Anticipatory I/O scheduler" if EMBEDDED
+ tristate "Anticipatory I/O scheduler"
default y
---help---
The anticipatory I/O scheduler is the default disk scheduler. It is
@@ -18,7 +18,7 @@ config IOSCHED_AS
slower in some cases especially some database loads.
config IOSCHED_DEADLINE
- bool "Deadline I/O scheduler" if EMBEDDED
+ tristate "Deadline I/O scheduler"
default y
---help---
The deadline I/O scheduler is simple and compact, and is often as
@@ -28,7 +28,7 @@ config IOSCHED_DEADLINE
anticipatory I/O scheduler and so is a good choice.
config IOSCHED_CFQ
- bool "CFQ I/O scheduler" if EMBEDDED
+ tristate "CFQ I/O scheduler"
default y
---help---
The CFQ I/O scheduler tries to distribute bandwidth equally
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index c8fbbf14ce94..1cf09a1c065b 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_BLK_DEV_XD) += xd.o
obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
+obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 0ef6a665d93e..0aa3ee8c309b 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -614,7 +614,7 @@ static void as_antic_stop(struct as_data *ad)
static void as_antic_timeout(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@@ -945,7 +945,7 @@ static void update_write_batch(struct as_data *ad)
*/
static void as_completed_request(request_queue_t *q, struct request *rq)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
WARN_ON(!list_empty(&rq->queuelist));
@@ -1030,7 +1030,7 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
{
struct as_rq *arq = RQ_DATA(rq);
const int data_dir = arq->is_sync;
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
WARN_ON(arq->state != AS_RQ_QUEUED);
@@ -1361,7 +1361,7 @@ fifo_expired:
static struct request *as_next_request(request_queue_t *q)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct request *rq = NULL;
/*
@@ -1469,7 +1469,7 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
*/
static void as_requeue_request(request_queue_t *q, struct request *rq)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
if (arq) {
@@ -1509,7 +1509,7 @@ static void as_account_queued_request(struct as_data *ad, struct request *rq)
static void
as_insert_request(request_queue_t *q, struct request *rq, int where)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
if (arq) {
@@ -1562,7 +1562,7 @@ as_insert_request(request_queue_t *q, struct request *rq, int where)
*/
static int as_queue_empty(request_queue_t *q)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
if (!list_empty(&ad->fifo_list[REQ_ASYNC])
|| !list_empty(&ad->fifo_list[REQ_SYNC])
@@ -1601,7 +1601,7 @@ as_latter_request(request_queue_t *q, struct request *rq)
static int
as_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
sector_t rb_key = bio->bi_sector + bio_sectors(bio);
struct request *__rq;
int ret;
@@ -1656,7 +1656,7 @@ out_insert:
static void as_merged_request(request_queue_t *q, struct request *req)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(req);
/*
@@ -1701,7 +1701,7 @@ static void
as_merged_requests(request_queue_t *q, struct request *req,
struct request *next)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(req);
struct as_rq *anext = RQ_DATA(next);
@@ -1788,7 +1788,7 @@ static void as_work_handler(void *data)
static void as_put_request(request_queue_t *q, struct request *rq)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
if (!arq) {
@@ -1807,7 +1807,7 @@ static void as_put_request(request_queue_t *q, struct request *rq)
static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
{
- struct as_data *ad = q->elevator.elevator_data;
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
if (arq) {
@@ -1828,21 +1828,21 @@ static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
static int as_may_queue(request_queue_t *q, int rw)
{
- int ret = 0;
- struct as_data *ad = q->elevator.elevator_data;
+ int ret = ELV_MQUEUE_MAY;
+ struct as_data *ad = q->elevator->elevator_data;
struct io_context *ioc;
if (ad->antic_status == ANTIC_WAIT_REQ ||
ad->antic_status == ANTIC_WAIT_NEXT) {
ioc = as_get_io_context();
if (ad->io_context == ioc)
- ret = 1;
+ ret = ELV_MQUEUE_MUST;
put_io_context(ioc);
}
return ret;
}
-static void as_exit(request_queue_t *q, elevator_t *e)
+static void as_exit_queue(elevator_t *e)
{
struct as_data *ad = e->elevator_data;
@@ -1862,7 +1862,7 @@ static void as_exit(request_queue_t *q, elevator_t *e)
* initialize elevator private data (as_data), and alloc a arq for
* each request on the free lists
*/
-static int as_init(request_queue_t *q, elevator_t *e)
+static int as_init_queue(request_queue_t *q, elevator_t *e)
{
struct as_data *ad;
int i;
@@ -1962,10 +1962,10 @@ static ssize_t as_est_show(struct as_data *ad, char *page)
return pos;
}
-#define SHOW_FUNCTION(__FUNC, __VAR) \
+#define SHOW_FUNCTION(__FUNC, __VAR) \
static ssize_t __FUNC(struct as_data *ad, char *page) \
-{ \
- return as_var_show(__VAR, (page)); \
+{ \
+ return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
}
SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
@@ -1982,6 +1982,7 @@ static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \
*(__PTR) = (MIN); \
else if (*(__PTR) > (MAX)) \
*(__PTR) = (MAX); \
+ *(__PTR) = msecs_to_jiffies(*(__PTR)); \
return ret; \
}
STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
@@ -2070,39 +2071,64 @@ static struct kobj_type as_ktype = {
.default_attrs = default_attrs,
};
-static int __init as_slab_setup(void)
+static struct elevator_type iosched_as = {
+ .ops = {
+ .elevator_merge_fn = as_merge,
+ .elevator_merged_fn = as_merged_request,
+ .elevator_merge_req_fn = as_merged_requests,
+ .elevator_next_req_fn = as_next_request,
+ .elevator_add_req_fn = as_insert_request,
+ .elevator_remove_req_fn = as_remove_request,
+ .elevator_requeue_req_fn = as_requeue_request,
+ .elevator_queue_empty_fn = as_queue_empty,
+ .elevator_completed_req_fn = as_completed_request,
+ .elevator_former_req_fn = as_former_request,
+ .elevator_latter_req_fn = as_latter_request,
+ .elevator_set_req_fn = as_set_request,
+ .elevator_put_req_fn = as_put_request,
+ .elevator_may_queue_fn = as_may_queue,
+ .elevator_init_fn = as_init_queue,
+ .elevator_exit_fn = as_exit_queue,
+ },
+
+ .elevator_ktype = &as_ktype,
+ .elevator_name = "anticipatory",
+ .elevator_owner = THIS_MODULE,
+};
+
+int as_init(void)
{
+ int ret;
+
arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
0, 0, NULL, NULL);
-
if (!arq_pool)
- panic("as: can't init slab pool\n");
+ return -ENOMEM;
- return 0;
+ ret = elv_register(&iosched_as);
+ if (!ret) {
+ /*
+ * don't allow AS to get unregistered, since we would have
+ * to browse all tasks in the system and release their
+ * as_io_context first
+ */
+ __module_get(THIS_MODULE);
+ return 0;
+ }
+
+ kmem_cache_destroy(arq_pool);
+ return ret;
}
-subsys_initcall(as_slab_setup);
-
-elevator_t iosched_as = {
- .elevator_merge_fn = as_merge,
- .elevator_merged_fn = as_merged_request,
- .elevator_merge_req_fn = as_merged_requests,
- .elevator_next_req_fn = as_next_request,
- .elevator_add_req_fn = as_insert_request,
- .elevator_remove_req_fn = as_remove_request,
- .elevator_requeue_req_fn = as_requeue_request,
- .elevator_queue_empty_fn = as_queue_empty,
- .elevator_completed_req_fn = as_completed_request,
- .elevator_former_req_fn = as_former_request,
- .elevator_latter_req_fn = as_latter_request,
- .elevator_set_req_fn = as_set_request,
- .elevator_put_req_fn = as_put_request,
- .elevator_may_queue_fn = as_may_queue,
- .elevator_init_fn = as_init,
- .elevator_exit_fn = as_exit,
-
- .elevator_ktype = &as_ktype,
- .elevator_name = "anticipatory",
-};
+void as_exit(void)
+{
+ kmem_cache_destroy(arq_pool);
+ elv_unregister(&iosched_as);
+}
+
+module_init(as_init);
+module_exit(as_exit);
-EXPORT_SYMBOL(iosched_as);
+MODULE_AUTHOR("Nick Piggin");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("anticipatory IO scheduler");
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index 068f4eae0b5c..cf7fc7609e67 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -22,96 +22,216 @@
#include <linux/rbtree.h>
#include <linux/mempool.h>
+static unsigned long max_elapsed_crq;
+static unsigned long max_elapsed_dispatch;
+
/*
* tunables
*/
-static int cfq_quantum = 4;
-static int cfq_queued = 8;
+static int cfq_quantum = 4; /* max queue in one round of service */
+static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
+static int cfq_service = HZ; /* period over which service is avg */
+static int cfq_fifo_expire_r = HZ / 2; /* fifo timeout for sync requests */
+static int cfq_fifo_expire_w = 5 * HZ; /* fifo timeout for async requests */
+static int cfq_fifo_rate = HZ / 8; /* fifo expiry rate */
+static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
+static int cfq_back_penalty = 2; /* penalty of a backwards seek */
+/*
+ * for the hash of cfqq inside the cfqd
+ */
#define CFQ_QHASH_SHIFT 6
#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
-#define list_entry_qhash(entry) list_entry((entry), struct cfq_queue, cfq_hash)
+#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
-#define CFQ_MHASH_SHIFT 8
+/*
+ * for the hash of crq inside the cfqq
+ */
+#define CFQ_MHASH_SHIFT 6
#define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
#define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
-#define CFQ_MHASH_FN(sec) (hash_long(CFQ_MHASH_BLOCK((sec)),CFQ_MHASH_SHIFT))
-#define ON_MHASH(crq) !list_empty(&(crq)->hash)
+#define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) list_entry((ptr), struct cfq_rq, hash)
+#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
-#define RQ_DATA(rq) ((struct cfq_rq *) (rq)->elevator_private)
+#define RQ_DATA(rq) (rq)->elevator_private
+
+/*
+ * rb-tree defines
+ */
+#define RB_NONE (2)
+#define RB_EMPTY(node) ((node)->rb_node == NULL)
+#define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE
+#define RB_CLEAR(node) do { \
+ (node)->rb_parent = NULL; \
+ RB_CLEAR_COLOR((node)); \
+ (node)->rb_right = NULL; \
+ (node)->rb_left = NULL; \
+} while (0)
+#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
+#define ON_RB(node) ((node)->rb_color != RB_NONE)
+#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
+#define rq_rb_key(rq) (rq)->sector
+
+/*
+ * threshold for switching off non-tag accounting
+ */
+#define CFQ_MAX_TAG (4)
+
+/*
+ * sort key types and names
+ */
+enum {
+ CFQ_KEY_PGID,
+ CFQ_KEY_TGID,
+ CFQ_KEY_UID,
+ CFQ_KEY_GID,
+ CFQ_KEY_LAST,
+};
+
+static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL };
+
+/*
+ * spare queue
+ */
+#define CFQ_KEY_SPARE (~0UL)
static kmem_cache_t *crq_pool;
static kmem_cache_t *cfq_pool;
-static mempool_t *cfq_mpool;
+static kmem_cache_t *cfq_ioc_pool;
struct cfq_data {
struct list_head rr_list;
- struct list_head *dispatch;
- struct list_head *cfq_hash;
+ struct list_head empty_list;
- struct list_head *crq_hash;
+ struct hlist_head *cfq_hash;
+ struct hlist_head *crq_hash;
+ /* queues on rr_list (ie they have pending requests */
unsigned int busy_queues;
+
unsigned int max_queued;
+ atomic_t ref;
+
+ int key_type;
+
mempool_t *crq_pool;
request_queue_t *queue;
+ sector_t last_sector;
+
+ int rq_in_driver;
+
/*
- * tunables
+ * tunables, see top of file
*/
unsigned int cfq_quantum;
unsigned int cfq_queued;
+ unsigned int cfq_fifo_expire_r;
+ unsigned int cfq_fifo_expire_w;
+ unsigned int cfq_fifo_batch_expire;
+ unsigned int cfq_back_penalty;
+ unsigned int cfq_back_max;
+ unsigned int find_best_crq;
+
+ unsigned int cfq_tagged;
};
struct cfq_queue {
- struct list_head cfq_hash;
+ /* reference count */
+ atomic_t ref;
+ /* parent cfq_data */
+ struct cfq_data *cfqd;
+ /* hash of mergeable requests */
+ struct hlist_node cfq_hash;
+ /* hash key */
+ unsigned long key;
+ /* whether queue is on rr (or empty) list */
+ int on_rr;
+ /* on either rr or empty list of cfqd */
struct list_head cfq_list;
+ /* sorted list of pending requests */
struct rb_root sort_list;
- int pid;
+ /* if fifo isn't expired, next request to serve */
+ struct cfq_rq *next_crq;
+ /* requests queued in sort_list */
int queued[2];
-#if 0
- /*
- * with a simple addition like this, we can do io priorities. almost.
- * does need a split request free list, too.
- */
- int io_prio
-#endif
+ /* currently allocated requests */
+ int allocated[2];
+ /* fifo list of requests in sort_list */
+ struct list_head fifo[2];
+ /* last time fifo expired */
+ unsigned long last_fifo_expire;
+
+ int key_type;
+
+ unsigned long service_start;
+ unsigned long service_used;
+
+ unsigned int max_rate;
+
+ /* number of requests that have been handed to the driver */
+ int in_flight;
+ /* number of currently allocated requests */
+ int alloc_limit[2];
};
struct cfq_rq {
struct rb_node rb_node;
sector_t rb_key;
-
struct request *request;
+ struct hlist_node hash;
struct cfq_queue *cfq_queue;
+ struct cfq_io_context *io_context;
+
+ unsigned long service_start;
+ unsigned long queue_start;
- struct list_head hash;
+ unsigned int in_flight : 1;
+ unsigned int accounted : 1;
+ unsigned int is_sync : 1;
+ unsigned int is_write : 1;
};
-static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq);
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid);
-static void cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct cfq_rq *crq);
+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long);
+static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
+static void cfq_update_next_crq(struct cfq_rq *);
+static void cfq_put_cfqd(struct cfq_data *cfqd);
/*
- * lots of deadline iosched dupes, can be abstracted later...
+ * what the fairness is based on (ie how processes are grouped and
+ * differentiated)
*/
-static inline void __cfq_del_crq_hash(struct cfq_rq *crq)
+static inline unsigned long
+cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk)
{
- list_del_init(&crq->hash);
+ /*
+ * optimize this so that ->key_type is the offset into the struct
+ */
+ switch (cfqd->key_type) {
+ case CFQ_KEY_PGID:
+ return process_group(tsk);
+ default:
+ case CFQ_KEY_TGID:
+ return tsk->tgid;
+ case CFQ_KEY_UID:
+ return tsk->uid;
+ case CFQ_KEY_GID:
+ return tsk->gid;
+ }
}
+/*
+ * lots of deadline iosched dupes, can be abstracted later...
+ */
static inline void cfq_del_crq_hash(struct cfq_rq *crq)
{
- if (ON_MHASH(crq))
- __cfq_del_crq_hash(crq);
+ hlist_del_init(&crq->hash);
}
static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
@@ -120,32 +240,32 @@ static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
if (q->last_merge == crq->request)
q->last_merge = NULL;
+
+ cfq_update_next_crq(crq);
}
static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
{
- struct request *rq = crq->request;
+ const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
- BUG_ON(ON_MHASH(crq));
+ BUG_ON(!hlist_unhashed(&crq->hash));
- list_add(&crq->hash, &cfqd->crq_hash[CFQ_MHASH_FN(rq_hash_key(rq))]);
+ hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
}
static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
{
- struct list_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
- struct list_head *entry, *next = hash_list->next;
+ struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
+ struct hlist_node *entry, *next;
- while ((entry = next) != hash_list) {
+ hlist_for_each_safe(entry, next, hash_list) {
struct cfq_rq *crq = list_entry_hash(entry);
struct request *__rq = crq->request;
- next = entry->next;
-
- BUG_ON(!ON_MHASH(crq));
+ BUG_ON(hlist_unhashed(&crq->hash));
if (!rq_mergeable(__rq)) {
- __cfq_del_crq_hash(crq);
+ cfq_del_crq_hash(crq);
continue;
}
@@ -157,29 +277,257 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
}
/*
- * rb tree support functions
+ * Lifted from AS - choose which of crq1 and crq2 that is best served now.
+ * We choose the request that is closest to the head right now. Distance
+ * behind the head are penalized and only allowed to a certain extent.
*/
-#define RB_NONE (2)
-#define RB_EMPTY(node) ((node)->rb_node == NULL)
-#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
-#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
-#define ON_RB(node) ((node)->rb_color != RB_NONE)
-#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
-#define rq_rb_key(rq) (rq)->sector
+static struct cfq_rq *
+cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
+{
+ sector_t last, s1, s2, d1 = 0, d2 = 0;
+ int r1_wrap = 0, r2_wrap = 0; /* requests are behind the disk head */
+ unsigned long back_max;
+
+ if (crq1 == NULL || crq1 == crq2)
+ return crq2;
+ if (crq2 == NULL)
+ return crq1;
-static inline void cfq_del_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
+ s1 = crq1->request->sector;
+ s2 = crq2->request->sector;
+
+ last = cfqd->last_sector;
+
+#if 0
+ if (!list_empty(&cfqd->queue->queue_head)) {
+ struct list_head *entry = &cfqd->queue->queue_head;
+ unsigned long distance = ~0UL;
+ struct request *rq;
+
+ while ((entry = entry->prev) != &cfqd->queue->queue_head) {
+ rq = list_entry_rq(entry);
+
+ if (blk_barrier_rq(rq))
+ break;
+
+ if (distance < abs(s1 - rq->sector + rq->nr_sectors)) {
+ distance = abs(s1 - rq->sector +rq->nr_sectors);
+ last = rq->sector + rq->nr_sectors;
+ }
+ if (distance < abs(s2 - rq->sector + rq->nr_sectors)) {
+ distance = abs(s2 - rq->sector +rq->nr_sectors);
+ last = rq->sector + rq->nr_sectors;
+ }
+ }
+ }
+#endif
+
+ /*
+ * by definition, 1KiB is 2 sectors
+ */
+ back_max = cfqd->cfq_back_max * 2;
+
+ /*
+ * Strict one way elevator _except_ in the case where we allow
+ * short backward seeks which are biased as twice the cost of a
+ * similar forward seek.
+ */
+ if (s1 >= last)
+ d1 = s1 - last;
+ else if (s1 + back_max >= last)
+ d1 = (last - s1) * cfqd->cfq_back_penalty;
+ else
+ r1_wrap = 1;
+
+ if (s2 >= last)
+ d2 = s2 - last;
+ else if (s2 + back_max >= last)
+ d2 = (last - s2) * cfqd->cfq_back_penalty;
+ else
+ r2_wrap = 1;
+
+ /* Found required data */
+ if (!r1_wrap && r2_wrap)
+ return crq1;
+ else if (!r2_wrap && r1_wrap)
+ return crq2;
+ else if (r1_wrap && r2_wrap) {
+ /* both behind the head */
+ if (s1 <= s2)
+ return crq1;
+ else
+ return crq2;
+ }
+
+ /* Both requests in front of the head */
+ if (d1 < d2)
+ return crq1;
+ else if (d2 < d1)
+ return crq2;
+ else {
+ if (s1 >= s2)
+ return crq1;
+ else
+ return crq2;
+ }
+}
+
+/*
+ * would be nice to take fifo expire time into account as well
+ */
+static struct cfq_rq *
+cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct cfq_rq *last)
+{
+ struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
+ struct rb_node *rbnext, *rbprev;
+
+ if (!ON_RB(&last->rb_node))
+ return NULL;
+
+ if ((rbnext = rb_next(&last->rb_node)) == NULL)
+ rbnext = rb_first(&cfqq->sort_list);
+
+ rbprev = rb_prev(&last->rb_node);
+
+ if (rbprev)
+ crq_prev = rb_entry_crq(rbprev);
+ if (rbnext)
+ crq_next = rb_entry_crq(rbnext);
+
+ return cfq_choose_req(cfqd, crq_next, crq_prev);
+}
+
+static void cfq_update_next_crq(struct cfq_rq *crq)
{
+ struct cfq_queue *cfqq = crq->cfq_queue;
+
+ if (cfqq->next_crq == crq)
+ cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
+}
+
+static int cfq_check_sort_rr_list(struct cfq_queue *cfqq)
+{
+ struct list_head *head = &cfqq->cfqd->rr_list;
+ struct list_head *next, *prev;
+
+ /*
+ * list might still be ordered
+ */
+ next = cfqq->cfq_list.next;
+ if (next != head) {
+ struct cfq_queue *cnext = list_entry_cfqq(next);
+
+ if (cfqq->service_used > cnext->service_used)
+ return 1;
+ }
+
+ prev = cfqq->cfq_list.prev;
+ if (prev != head) {
+ struct cfq_queue *cprev = list_entry_cfqq(prev);
+
+ if (cfqq->service_used < cprev->service_used)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue)
+{
+ struct list_head *entry = &cfqq->cfqd->rr_list;
+
+ if (!cfqq->on_rr)
+ return;
+ if (!new_queue && !cfq_check_sort_rr_list(cfqq))
+ return;
+
+ list_del(&cfqq->cfq_list);
+
+ /*
+ * sort by our mean service_used, sub-sort by in-flight requests
+ */
+ while ((entry = entry->prev) != &cfqq->cfqd->rr_list) {
+ struct cfq_queue *__cfqq = list_entry_cfqq(entry);
+
+ if (cfqq->service_used > __cfqq->service_used)
+ break;
+ else if (cfqq->service_used == __cfqq->service_used) {
+ struct list_head *prv;
+
+ while ((prv = entry->prev) != &cfqq->cfqd->rr_list) {
+ __cfqq = list_entry_cfqq(prv);
+
+ WARN_ON(__cfqq->service_used > cfqq->service_used);
+ if (cfqq->service_used != __cfqq->service_used)
+ break;
+ if (cfqq->in_flight > __cfqq->in_flight)
+ break;
+
+ entry = prv;
+ }
+ }
+ }
+
+ list_add(&cfqq->cfq_list, entry);
+}
+
+/*
+ * add to busy list of queues for service, trying to be fair in ordering
+ * the pending list according to requests serviced
+ */
+static inline void
+cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ /*
+ * it's currently on the empty list
+ */
+ cfqq->on_rr = 1;
+ cfqd->busy_queues++;
+
+ if (time_after(jiffies, cfqq->service_start + cfq_service))
+ cfqq->service_used >>= 3;
+
+ cfq_sort_rr_list(cfqq, 1);
+}
+
+static inline void
+cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ list_move(&cfqq->cfq_list, &cfqd->empty_list);
+ cfqq->on_rr = 0;
+
+ BUG_ON(!cfqd->busy_queues);
+ cfqd->busy_queues--;
+}
+
+/*
+ * rb tree support functions
+ */
+static inline void cfq_del_crq_rb(struct cfq_rq *crq)
+{
+ struct cfq_queue *cfqq = crq->cfq_queue;
+
if (ON_RB(&crq->rb_node)) {
- cfqq->queued[rq_data_dir(crq->request)]--;
+ struct cfq_data *cfqd = cfqq->cfqd;
+
+ BUG_ON(!cfqq->queued[crq->is_sync]);
+
+ cfq_update_next_crq(crq);
+
+ cfqq->queued[crq->is_sync]--;
rb_erase(&crq->rb_node, &cfqq->sort_list);
- crq->cfq_queue = NULL;
+ RB_CLEAR_COLOR(&crq->rb_node);
+
+ if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr)
+ cfq_del_cfqq_rr(cfqd, cfqq);
}
}
static struct cfq_rq *
-__cfq_add_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
+__cfq_add_crq_rb(struct cfq_rq *crq)
{
- struct rb_node **p = &cfqq->sort_list.rb_node;
+ struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
struct rb_node *parent = NULL;
struct cfq_rq *__crq;
@@ -199,30 +547,50 @@ __cfq_add_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
return NULL;
}
-static void
-cfq_add_crq_rb(struct cfq_data *cfqd, struct cfq_queue *cfqq,struct cfq_rq *crq)
+static void cfq_add_crq_rb(struct cfq_rq *crq)
{
+ struct cfq_queue *cfqq = crq->cfq_queue;
+ struct cfq_data *cfqd = cfqq->cfqd;
struct request *rq = crq->request;
struct cfq_rq *__alias;
crq->rb_key = rq_rb_key(rq);
- cfqq->queued[rq_data_dir(rq)]++;
-retry:
- __alias = __cfq_add_crq_rb(cfqq, crq);
- if (!__alias) {
- rb_insert_color(&crq->rb_node, &cfqq->sort_list);
- crq->cfq_queue = cfqq;
- return;
+ cfqq->queued[crq->is_sync]++;
+
+ /*
+ * looks a little odd, but the first insert might return an alias.
+ * if that happens, put the alias on the dispatch list
+ */
+ while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
+ cfq_dispatch_sort(cfqd->queue, __alias);
+
+ rb_insert_color(&crq->rb_node, &cfqq->sort_list);
+
+ if (!cfqq->on_rr)
+ cfq_add_cfqq_rr(cfqd, cfqq);
+
+ /*
+ * check if this request is a better next-serve candidate
+ */
+ cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+}
+
+static inline void
+cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
+{
+ if (ON_RB(&crq->rb_node)) {
+ rb_erase(&crq->rb_node, &cfqq->sort_list);
+ cfqq->queued[crq->is_sync]--;
}
- cfq_dispatch_sort(cfqd, cfqq, __alias);
- goto retry;
+ cfq_add_crq_rb(crq);
}
static struct request *
cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
{
- struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
+ const unsigned long key = cfq_hash_key(cfqd, current);
+ struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key);
struct rb_node *n;
if (!cfqq)
@@ -244,30 +612,44 @@ out:
return NULL;
}
-static void cfq_remove_request(request_queue_t *q, struct request *rq)
+/*
+ * make sure the service time gets corrected on reissue of this request
+ */
+static void cfq_requeue_request(request_queue_t *q, struct request *rq)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
struct cfq_rq *crq = RQ_DATA(rq);
if (crq) {
struct cfq_queue *cfqq = crq->cfq_queue;
+ if (cfqq->cfqd->cfq_tagged) {
+ cfqq->service_used--;
+ cfq_sort_rr_list(cfqq, 0);
+ }
+
+ crq->accounted = 0;
+ cfqq->cfqd->rq_in_driver--;
+ }
+ list_add(&rq->queuelist, &q->queue_head);
+}
+
+static void cfq_remove_request(request_queue_t *q, struct request *rq)
+{
+ struct cfq_rq *crq = RQ_DATA(rq);
+
+ if (crq) {
cfq_remove_merge_hints(q, crq);
list_del_init(&rq->queuelist);
- if (cfqq) {
- cfq_del_crq_rb(cfqq, crq);
-
- if (RB_EMPTY(&cfqq->sort_list))
- cfq_put_queue(cfqd, cfqq);
- }
+ if (crq->cfq_queue)
+ cfq_del_crq_rb(crq);
}
}
static int
cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *__rq;
int ret;
@@ -305,7 +687,7 @@ out_insert:
static void cfq_merged_request(request_queue_t *q, struct request *req)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_rq *crq = RQ_DATA(req);
cfq_del_crq_hash(crq);
@@ -314,193 +696,546 @@ static void cfq_merged_request(request_queue_t *q, struct request *req)
if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
struct cfq_queue *cfqq = crq->cfq_queue;
- cfq_del_crq_rb(cfqq, crq);
- cfq_add_crq_rb(cfqd, cfqq, crq);
+ cfq_update_next_crq(crq);
+ cfq_reposition_crq_rb(cfqq, crq);
}
q->last_merge = req;
}
static void
-cfq_merged_requests(request_queue_t *q, struct request *req,
+cfq_merged_requests(request_queue_t *q, struct request *rq,
struct request *next)
{
- cfq_merged_request(q, req);
+ struct cfq_rq *crq = RQ_DATA(rq);
+ struct cfq_rq *cnext = RQ_DATA(next);
+
+ cfq_merged_request(q, rq);
+
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(cnext->queue_start, crq->queue_start)) {
+ list_move(&rq->queuelist, &next->queuelist);
+ crq->queue_start = cnext->queue_start;
+ }
+ }
+
+ cfq_update_next_crq(cnext);
cfq_remove_request(q, next);
}
-static void
-cfq_dispatch_sort(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct cfq_rq *crq)
+/*
+ * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
+ * this function sector sorts the selected request to minimize seeks. we start
+ * at cfqd->last_sector, not 0.
+ */
+static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
{
- struct list_head *head = cfqd->dispatch, *entry = head;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_queue *cfqq = crq->cfq_queue;
+ struct list_head *head = &q->queue_head, *entry = head;
struct request *__rq;
+ sector_t last;
- cfq_del_crq_rb(cfqq, crq);
- cfq_remove_merge_hints(cfqd->queue, crq);
+ cfq_del_crq_rb(crq);
+ cfq_remove_merge_hints(q, crq);
+ list_del(&crq->request->queuelist);
- if (!list_empty(head)) {
- __rq = list_entry_rq(head->next);
+ last = cfqd->last_sector;
+ while ((entry = entry->prev) != head) {
+ __rq = list_entry_rq(entry);
- if (crq->request->sector < __rq->sector) {
- entry = head->prev;
- goto link;
+ if (blk_barrier_rq(crq->request))
+ break;
+ if (!blk_fs_request(crq->request))
+ break;
+
+ if (crq->request->sector > __rq->sector)
+ break;
+ if (__rq->sector > last && crq->request->sector < last) {
+ last = crq->request->sector;
+ break;
}
}
- while ((entry = entry->prev) != head) {
- __rq = list_entry_rq(entry);
+ cfqd->last_sector = last;
+ crq->in_flight = 1;
+ cfqq->in_flight++;
+ list_add(&crq->request->queuelist, entry);
+}
- if (crq->request->sector <= __rq->sector)
- break;
+/*
+ * return expired entry, or NULL to just start from scratch in rbtree
+ */
+static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
+{
+ struct cfq_data *cfqd = cfqq->cfqd;
+ const int reads = !list_empty(&cfqq->fifo[0]);
+ const int writes = !list_empty(&cfqq->fifo[1]);
+ unsigned long now = jiffies;
+ struct cfq_rq *crq;
+
+ if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire))
+ return NULL;
+
+ crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist));
+ if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) {
+ cfqq->last_fifo_expire = now;
+ return crq;
+ }
+
+ crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist));
+ if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) {
+ cfqq->last_fifo_expire = now;
+ return crq;
}
-link:
- list_add_tail(&crq->request->queuelist, entry);
+ return NULL;
}
+/*
+ * dispatch a single request from given queue
+ */
static inline void
-__cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
+cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd,
+ struct cfq_queue *cfqq)
{
- struct cfq_rq *crq = rb_entry_crq(rb_first(&cfqq->sort_list));
+ struct cfq_rq *crq;
+
+ /*
+ * follow expired path, else get first next available
+ */
+ if ((crq = cfq_check_fifo(cfqq)) == NULL) {
+ if (cfqd->find_best_crq)
+ crq = cfqq->next_crq;
+ else
+ crq = rb_entry_crq(rb_first(&cfqq->sort_list));
+ }
+
+ cfqd->last_sector = crq->request->sector + crq->request->nr_sectors;
- cfq_dispatch_sort(cfqd, cfqq, crq);
+ /*
+ * finally, insert request into driver list
+ */
+ cfq_dispatch_sort(q, crq);
}
-static int cfq_dispatch_requests(request_queue_t *q, struct cfq_data *cfqd)
+static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch)
{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
struct list_head *entry, *tmp;
- int ret, queued, good_queues;
+ int queued, busy_queues, first_round;
if (list_empty(&cfqd->rr_list))
return 0;
- queued = ret = 0;
+ queued = 0;
+ first_round = 1;
restart:
- good_queues = 0;
+ busy_queues = 0;
list_for_each_safe(entry, tmp, &cfqd->rr_list) {
- cfqq = list_entry_cfqq(cfqd->rr_list.next);
+ cfqq = list_entry_cfqq(entry);
BUG_ON(RB_EMPTY(&cfqq->sort_list));
- __cfq_dispatch_requests(q, cfqd, cfqq);
+ /*
+ * first round of queueing, only select from queues that
+ * don't already have io in-flight
+ */
+ if (first_round && cfqq->in_flight)
+ continue;
- if (RB_EMPTY(&cfqq->sort_list))
- cfq_put_queue(cfqd, cfqq);
- else
- good_queues++;
+ cfq_dispatch_request(q, cfqd, cfqq);
+
+ if (!RB_EMPTY(&cfqq->sort_list))
+ busy_queues++;
queued++;
- ret = 1;
}
- if ((queued < cfqd->cfq_quantum) && good_queues)
+ if ((queued < max_dispatch) && (busy_queues || first_round)) {
+ first_round = 0;
goto restart;
+ }
- return ret;
+ return queued;
+}
+
+static inline void cfq_account_dispatch(struct cfq_rq *crq)
+{
+ struct cfq_queue *cfqq = crq->cfq_queue;
+ struct cfq_data *cfqd = cfqq->cfqd;
+ unsigned long now, elapsed;
+
+ /*
+ * accounted bit is necessary since some drivers will call
+ * elv_next_request() many times for the same request (eg ide)
+ */
+ if (crq->accounted)
+ return;
+
+ now = jiffies;
+ if (cfqq->service_start == ~0UL)
+ cfqq->service_start = now;
+
+ /*
+ * on drives with tagged command queueing, command turn-around time
+ * doesn't necessarily reflect the time spent processing this very
+ * command inside the drive. so do the accounting differently there,
+ * by just sorting on the number of requests
+ */
+ if (cfqd->cfq_tagged) {
+ if (time_after(now, cfqq->service_start + cfq_service)) {
+ cfqq->service_start = now;
+ cfqq->service_used /= 10;
+ }
+
+ cfqq->service_used++;
+ cfq_sort_rr_list(cfqq, 0);
+ }
+
+ elapsed = now - crq->queue_start;
+ if (elapsed > max_elapsed_dispatch)
+ max_elapsed_dispatch = elapsed;
+
+ crq->accounted = 1;
+ crq->service_start = now;
+
+ if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) {
+ cfqq->cfqd->cfq_tagged = 1;
+ printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG);
+ }
+}
+
+static inline void
+cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
+{
+ struct cfq_data *cfqd = cfqq->cfqd;
+
+ WARN_ON(!cfqd->rq_in_driver);
+ cfqd->rq_in_driver--;
+
+ if (!cfqd->cfq_tagged) {
+ unsigned long now = jiffies;
+ unsigned long duration = now - crq->service_start;
+
+ if (time_after(now, cfqq->service_start + cfq_service)) {
+ cfqq->service_start = now;
+ cfqq->service_used >>= 3;
+ }
+
+ cfqq->service_used += duration;
+ cfq_sort_rr_list(cfqq, 0);
+
+ if (duration > max_elapsed_crq)
+ max_elapsed_crq = duration;
+ }
}
static struct request *cfq_next_request(request_queue_t *q)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *rq;
- if (!list_empty(cfqd->dispatch)) {
+ if (!list_empty(&q->queue_head)) {
struct cfq_rq *crq;
dispatch:
- rq = list_entry_rq(cfqd->dispatch->next);
+ rq = list_entry_rq(q->queue_head.next);
- crq = RQ_DATA(rq);
- if (crq)
+ if ((crq = RQ_DATA(rq)) != NULL) {
cfq_remove_merge_hints(q, crq);
+ cfq_account_dispatch(crq);
+ }
return rq;
}
- if (cfq_dispatch_requests(q, cfqd))
+ if (cfq_dispatch_requests(q, cfqd->cfq_quantum))
goto dispatch;
return NULL;
}
+/*
+ * task holds one reference to the queue, dropped when task exits. each crq
+ * in-flight on this queue also holds a reference, dropped when crq is freed.
+ *
+ * queue lock must be held here.
+ */
+static void cfq_put_queue(struct cfq_queue *cfqq)
+{
+ BUG_ON(!atomic_read(&cfqq->ref));
+
+ if (!atomic_dec_and_test(&cfqq->ref))
+ return;
+
+ BUG_ON(rb_first(&cfqq->sort_list));
+ BUG_ON(cfqq->on_rr);
+
+ cfq_put_cfqd(cfqq->cfqd);
+
+ /*
+ * it's on the empty list and still hashed
+ */
+ list_del(&cfqq->cfq_list);
+ hlist_del(&cfqq->cfq_hash);
+ kmem_cache_free(cfq_pool, cfqq);
+}
+
static inline struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, int pid, const int hashval)
+__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval)
{
- struct list_head *hash_list = &cfqd->cfq_hash[hashval];
- struct list_head *entry;
+ struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
+ struct hlist_node *entry, *next;
- list_for_each(entry, hash_list) {
+ hlist_for_each_safe(entry, next, hash_list) {
struct cfq_queue *__cfqq = list_entry_qhash(entry);
- if (__cfqq->pid == pid)
+ if (__cfqq->key == key)
return __cfqq;
}
return NULL;
}
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *cfqd, int pid)
+static struct cfq_queue *
+cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key)
{
- const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
+ return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT));
+}
+
+static inline void
+cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq,
+ struct cfq_io_context *cic)
+{
+ unsigned long hashkey = cfq_hash_key(cfqd, current);
+ unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
+ struct cfq_queue *__cfqq;
+ unsigned long flags;
+
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
- return __cfq_find_cfq_hash(cfqd, pid, hashval);
+ hlist_del(&(*cfqq)->cfq_hash);
+
+ __cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval);
+ if (!__cfqq || __cfqq == *cfqq) {
+ __cfqq = *cfqq;
+ hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+ __cfqq->key_type = cfqd->key_type;
+ } else {
+ atomic_inc(&__cfqq->ref);
+ cic->cfqq = __cfqq;
+ cfq_put_queue(*cfqq);
+ *cfqq = __cfqq;
+ }
+
+ cic->cfqq = __cfqq;
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
-static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static void cfq_free_io_context(struct cfq_io_context *cic)
{
- cfqd->busy_queues--;
- list_del(&cfqq->cfq_list);
- list_del(&cfqq->cfq_hash);
- mempool_free(cfqq, cfq_mpool);
+ kmem_cache_free(cfq_ioc_pool, cic);
}
-static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int pid,
- int gfp_mask)
+/*
+ * locking hierarchy is: io_context lock -> queue locks
+ */
+static void cfq_exit_io_context(struct cfq_io_context *cic)
{
- const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT);
+ struct cfq_queue *cfqq = cic->cfqq;
+ struct list_head *entry = &cic->list;
+ request_queue_t *q;
+ unsigned long flags;
+
+ /*
+ * put the reference this task is holding to the various queues
+ */
+ spin_lock_irqsave(&cic->ioc->lock, flags);
+ while ((entry = cic->list.next) != &cic->list) {
+ struct cfq_io_context *__cic;
+
+ __cic = list_entry(entry, struct cfq_io_context, list);
+ list_del(entry);
+
+ q = __cic->cfqq->cfqd->queue;
+ spin_lock(q->queue_lock);
+ cfq_put_queue(__cic->cfqq);
+ spin_unlock(q->queue_lock);
+ }
+
+ q = cfqq->cfqd->queue;
+ spin_lock(q->queue_lock);
+ cfq_put_queue(cfqq);
+ spin_unlock(q->queue_lock);
+
+ cic->cfqq = NULL;
+ spin_unlock_irqrestore(&cic->ioc->lock, flags);
+}
+
+static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags)
+{
+ struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags);
+
+ if (cic) {
+ cic->dtor = cfq_free_io_context;
+ cic->exit = cfq_exit_io_context;
+ INIT_LIST_HEAD(&cic->list);
+ cic->cfqq = NULL;
+ }
+
+ return cic;
+}
+
+/*
+ * Setup general io context and cfq io context. There can be several cfq
+ * io contexts per general io context, if this process is doing io to more
+ * than one device managed by cfq. Note that caller is holding a reference to
+ * cfqq, so we don't need to worry about it disappearing
+ */
+static struct cfq_io_context *
+cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags)
+{
+ struct cfq_data *cfqd = (*cfqq)->cfqd;
+ struct cfq_queue *__cfqq = *cfqq;
+ struct cfq_io_context *cic;
+ struct io_context *ioc;
+
+ might_sleep_if(gfp_flags & __GFP_WAIT);
+
+ ioc = get_io_context(gfp_flags);
+ if (!ioc)
+ return NULL;
+
+ if ((cic = ioc->cic) == NULL) {
+ cic = cfq_alloc_io_context(gfp_flags);
+
+ if (cic == NULL)
+ goto err;
+
+ ioc->cic = cic;
+ cic->ioc = ioc;
+ cic->cfqq = __cfqq;
+ atomic_inc(&__cfqq->ref);
+ } else {
+ struct cfq_io_context *__cic;
+ unsigned long flags;
+
+ /*
+ * since the first cic on the list is actually the head
+ * itself, need to check this here or we'll duplicate an
+ * cic per ioc for no reason
+ */
+ if (cic->cfqq == __cfqq)
+ goto out;
+
+ /*
+ * cic exists, check if we already are there. linear search
+ * should be ok here, the list will usually not be more than
+ * 1 or a few entries long
+ */
+ spin_lock_irqsave(&ioc->lock, flags);
+ list_for_each_entry(__cic, &cic->list, list) {
+ /*
+ * this process is already holding a reference to
+ * this queue, so no need to get one more
+ */
+ if (__cic->cfqq == __cfqq) {
+ cic = __cic;
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ goto out;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->lock, flags);
+
+ /*
+ * nope, process doesn't have a cic assoicated with this
+ * cfqq yet. get a new one and add to list
+ */
+ __cic = cfq_alloc_io_context(gfp_flags);
+ if (__cic == NULL)
+ goto err;
+
+ __cic->ioc = ioc;
+ __cic->cfqq = __cfqq;
+ atomic_inc(&__cfqq->ref);
+ spin_lock_irqsave(&ioc->lock, flags);
+ list_add(&__cic->list, &cic->list);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+
+ cic = __cic;
+ *cfqq = __cfqq;
+ }
+
+out:
+ /*
+ * if key_type has been changed on the fly, we lazily rehash
+ * each queue at lookup time
+ */
+ if ((*cfqq)->key_type != cfqd->key_type)
+ cfq_rehash_cfqq(cfqd, cfqq, cic);
+
+ return cic;
+err:
+ put_io_context(ioc);
+ return NULL;
+}
+
+static struct cfq_queue *
+__cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask)
+{
+ const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
struct cfq_queue *cfqq, *new_cfqq = NULL;
- request_queue_t *q = cfqd->queue;
retry:
- cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval);
+ cfqq = __cfq_find_cfq_hash(cfqd, key, hashval);
if (!cfqq) {
if (new_cfqq) {
cfqq = new_cfqq;
new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
- spin_unlock_irq(q->queue_lock);
- new_cfqq = mempool_alloc(cfq_mpool, gfp_mask);
- spin_lock_irq(q->queue_lock);
+ spin_unlock_irq(cfqd->queue->queue_lock);
+ new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+ spin_lock_irq(cfqd->queue->queue_lock);
goto retry;
} else
- return NULL;
+ goto out;
+
+ memset(cfqq, 0, sizeof(*cfqq));
- INIT_LIST_HEAD(&cfqq->cfq_hash);
+ INIT_HLIST_NODE(&cfqq->cfq_hash);
INIT_LIST_HEAD(&cfqq->cfq_list);
RB_CLEAR_ROOT(&cfqq->sort_list);
-
- cfqq->pid = pid;
- cfqq->queued[0] = cfqq->queued[1] = 0;
- list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+ INIT_LIST_HEAD(&cfqq->fifo[0]);
+ INIT_LIST_HEAD(&cfqq->fifo[1]);
+
+ cfqq->key = key;
+ hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+ atomic_set(&cfqq->ref, 0);
+ cfqq->cfqd = cfqd;
+ atomic_inc(&cfqd->ref);
+ cfqq->key_type = cfqd->key_type;
+ cfqq->service_start = ~0UL;
}
if (new_cfqq)
- mempool_free(new_cfqq, cfq_mpool);
+ kmem_cache_free(cfq_pool, new_cfqq);
+ atomic_inc(&cfqq->ref);
+out:
+ WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
return cfqq;
}
-static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid,
- int gfp_mask)
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask)
{
request_queue_t *q = cfqd->queue;
struct cfq_queue *cfqq;
spin_lock_irq(q->queue_lock);
- cfqq = __cfq_get_queue(cfqd, pid, gfp_mask);
+ cfqq = __cfq_get_queue(cfqd, key, gfp_mask);
spin_unlock_irq(q->queue_lock);
return cfqq;
@@ -508,40 +1243,30 @@ static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid,
static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
{
- struct cfq_queue *cfqq;
+ crq->is_sync = 0;
+ if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE)
+ crq->is_sync = 1;
- cfqq = __cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC);
- if (cfqq) {
- cfq_add_crq_rb(cfqd, cfqq, crq);
+ cfq_add_crq_rb(crq);
+ crq->queue_start = jiffies;
- if (list_empty(&cfqq->cfq_list)) {
- list_add(&cfqq->cfq_list, &cfqd->rr_list);
- cfqd->busy_queues++;
- }
- } else {
- /*
- * should can only happen if the request wasn't allocated
- * through blk_alloc_request(), eg stack requests from ide-cd
- * (those should be removed) _and_ we are in OOM.
- */
- list_add_tail(&crq->request->queuelist, cfqd->dispatch);
- }
+ list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]);
}
static void
cfq_insert_request(request_queue_t *q, struct request *rq, int where)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_rq *crq = RQ_DATA(rq);
switch (where) {
case ELEVATOR_INSERT_BACK:
- while (cfq_dispatch_requests(q, cfqd))
+ while (cfq_dispatch_requests(q, cfqd->cfq_quantum))
;
- list_add_tail(&rq->queuelist, cfqd->dispatch);
+ list_add_tail(&rq->queuelist, &q->queue_head);
break;
case ELEVATOR_INSERT_FRONT:
- list_add(&rq->queuelist, cfqd->dispatch);
+ list_add(&rq->queuelist, &q->queue_head);
break;
case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq));
@@ -562,12 +1287,27 @@ cfq_insert_request(request_queue_t *q, struct request *rq, int where)
static int cfq_queue_empty(request_queue_t *q)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
- if (list_empty(cfqd->dispatch) && list_empty(&cfqd->rr_list))
- return 1;
+ return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list);
+}
+
+static void cfq_completed_request(request_queue_t *q, struct request *rq)
+{
+ struct cfq_rq *crq = RQ_DATA(rq);
+
+ if (unlikely(!blk_fs_request(rq)))
+ return;
+
+ if (crq->in_flight) {
+ struct cfq_queue *cfqq = crq->cfq_queue;
+
+ WARN_ON(!cfqq->in_flight);
+ cfqq->in_flight--;
+
+ cfq_account_completion(cfqq, crq);
+ }
- return 0;
}
static struct request *
@@ -596,92 +1336,169 @@ cfq_latter_request(request_queue_t *q, struct request *rq)
static int cfq_may_queue(request_queue_t *q, int rw)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
- int ret = 1;
+ int ret = ELV_MQUEUE_MAY;
- if (!cfqd->busy_queues)
- goto out;
+ if (current->flags & PF_MEMALLOC)
+ return ELV_MQUEUE_MAY;
- cfqq = cfq_find_cfq_hash(cfqd, current->tgid);
+ cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current));
if (cfqq) {
- int limit = (q->nr_requests - cfqd->cfq_queued) / cfqd->busy_queues;
+ int limit = cfqd->max_queued;
+
+ if (cfqq->allocated[rw] < cfqd->cfq_queued)
+ return ELV_MQUEUE_MUST;
- if (limit < 3)
- limit = 3;
+ if (cfqd->busy_queues)
+ limit = q->nr_requests / cfqd->busy_queues;
+
+ if (limit < cfqd->cfq_queued)
+ limit = cfqd->cfq_queued;
else if (limit > cfqd->max_queued)
limit = cfqd->max_queued;
- if (cfqq->queued[rw] > limit)
- ret = 0;
+ if (cfqq->allocated[rw] >= limit) {
+ if (limit > cfqq->alloc_limit[rw])
+ cfqq->alloc_limit[rw] = limit;
+
+ ret = ELV_MQUEUE_NO;
+ }
}
-out:
+
return ret;
}
+static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
+{
+ struct request_list *rl = &q->rq;
+ const int write = waitqueue_active(&rl->wait[WRITE]);
+ const int read = waitqueue_active(&rl->wait[READ]);
+
+ if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ])
+ wake_up(&rl->wait[READ]);
+ if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE])
+ wake_up(&rl->wait[WRITE]);
+}
+
+/*
+ * queue lock held here
+ */
static void cfq_put_request(request_queue_t *q, struct request *rq)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_rq *crq = RQ_DATA(rq);
- struct request_list *rl;
- int other_rw;
if (crq) {
+ struct cfq_queue *cfqq = crq->cfq_queue;
+
BUG_ON(q->last_merge == rq);
- BUG_ON(ON_MHASH(crq));
+ BUG_ON(!hlist_unhashed(&crq->hash));
+
+ if (crq->io_context)
+ put_io_context(crq->io_context->ioc);
+
+ if (!cfqq->allocated[crq->is_write]) {
+ WARN_ON(1);
+ cfqq->allocated[crq->is_write] = 1;
+ }
+ cfqq->allocated[crq->is_write]--;
mempool_free(crq, cfqd->crq_pool);
rq->elevator_private = NULL;
- }
- /*
- * work-around for may_queue "bug": if a read gets issued and refused
- * to queue because writes ate all the allowed slots and no other
- * reads are pending for this queue, it could get stuck infinitely
- * since freed_request() only checks the waitqueue for writes when
- * freeing them. or vice versa for a single write vs many reads.
- * so check here whether "the other" data direction might be able
- * to queue and wake them
- */
- rl = &q->rq;
- other_rw = rq_data_dir(rq) ^ 1;
- if (rl->count[other_rw] <= q->nr_requests) {
smp_mb();
- if (waitqueue_active(&rl->wait[other_rw]))
- wake_up(&rl->wait[other_rw]);
+ cfq_check_waiters(q, cfqq);
+ cfq_put_queue(cfqq);
}
}
+/*
+ * Allocate cfq data structures associated with this request. A queue and
+ */
static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
{
- struct cfq_data *cfqd = q->elevator.elevator_data;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_io_context *cic;
+ const int rw = rq_data_dir(rq);
struct cfq_queue *cfqq;
struct cfq_rq *crq;
+ unsigned long flags;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask);
+ if (!cfqq) {
+#if 0
+ cfqq = cfq_get_queue(cfqd, CFQ_KEY_SPARE, gfp_mask);
+ printk("%s: got spare queue\n", current->comm);
+#else
+ goto out_lock;
+#endif
+ }
+
+ if (cfqq->allocated[rw] >= cfqd->max_queued)
+ goto out_lock;
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
/*
- * prepare a queue up front, so cfq_enqueue() doesn't have to
+ * if hashing type has changed, the cfq_queue might change here. we
+ * don't bother rechecking ->allocated since it should be a rare
+ * event
*/
- cfqq = cfq_get_queue(cfqd, current->tgid, gfp_mask);
- if (!cfqq)
- return 1;
+ cic = cfq_get_io_context(&cfqq, gfp_mask);
+ if (!cic)
+ goto err;
crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
if (crq) {
- memset(crq, 0, sizeof(*crq));
RB_CLEAR(&crq->rb_node);
+ crq->rb_key = 0;
crq->request = rq;
- crq->cfq_queue = NULL;
- INIT_LIST_HEAD(&crq->hash);
+ INIT_HLIST_NODE(&crq->hash);
+ crq->cfq_queue = cfqq;
+ crq->io_context = cic;
+ crq->service_start = crq->queue_start = 0;
+ crq->in_flight = crq->accounted = crq->is_sync = 0;
+ crq->is_write = rw;
rq->elevator_private = crq;
+ cfqq->allocated[rw]++;
+ cfqq->alloc_limit[rw] = 0;
return 0;
}
+ put_io_context(cic->ioc);
+err:
+ spin_lock_irqsave(q->queue_lock, flags);
+ cfq_put_queue(cfqq);
+out_lock:
+ spin_unlock_irqrestore(q->queue_lock, flags);
return 1;
}
-static void cfq_exit(request_queue_t *q, elevator_t *e)
+static void cfq_put_cfqd(struct cfq_data *cfqd)
{
- struct cfq_data *cfqd = e->elevator_data;
+ request_queue_t *q = cfqd->queue;
+ elevator_t *e = q->elevator;
+ struct cfq_queue *cfqq;
+
+ if (!atomic_dec_and_test(&cfqd->ref))
+ return;
+
+ /*
+ * kill spare queue, getting it means we have two refences to it.
+ * drop both
+ */
+ spin_lock_irq(q->queue_lock);
+ cfqq = __cfq_get_queue(cfqd, CFQ_KEY_SPARE, GFP_ATOMIC);
+ cfq_put_queue(cfqq);
+ cfq_put_queue(cfqq);
+ spin_unlock_irq(q->queue_lock);
+
+ blk_put_queue(q);
e->elevator_data = NULL;
mempool_destroy(cfqd->crq_pool);
@@ -690,9 +1507,15 @@ static void cfq_exit(request_queue_t *q, elevator_t *e)
kfree(cfqd);
}
-static int cfq_init(request_queue_t *q, elevator_t *e)
+static void cfq_exit_queue(elevator_t *e)
+{
+ cfq_put_cfqd(e->elevator_data);
+}
+
+static int cfq_init_queue(request_queue_t *q, elevator_t *e)
{
struct cfq_data *cfqd;
+ struct cfq_queue *cfqq;
int i;
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
@@ -701,12 +1524,13 @@ static int cfq_init(request_queue_t *q, elevator_t *e)
memset(cfqd, 0, sizeof(*cfqd));
INIT_LIST_HEAD(&cfqd->rr_list);
+ INIT_LIST_HEAD(&cfqd->empty_list);
- cfqd->crq_hash = kmalloc(sizeof(struct list_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
+ cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
if (!cfqd->crq_hash)
goto out_crqhash;
- cfqd->cfq_hash = kmalloc(sizeof(struct list_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
+ cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
if (!cfqd->cfq_hash)
goto out_cfqhash;
@@ -715,25 +1539,44 @@ static int cfq_init(request_queue_t *q, elevator_t *e)
goto out_crqpool;
for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
- INIT_LIST_HEAD(&cfqd->crq_hash[i]);
+ INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
- INIT_LIST_HEAD(&cfqd->cfq_hash[i]);
+ INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
- cfqd->dispatch = &q->queue_head;
e->elevator_data = cfqd;
+
cfqd->queue = q;
+ atomic_inc(&q->refcnt);
+
+ /*
+ * setup spare failure queue
+ */
+ cfqq = cfq_get_queue(cfqd, CFQ_KEY_SPARE, GFP_KERNEL);
+ if (!cfqq)
+ goto out_spare;
/*
* just set it to some high value, we want anyone to be able to queue
* some requests. fairness is handled differently
*/
- cfqd->max_queued = q->nr_requests;
- q->nr_requests = 8192;
+ q->nr_requests = 1024;
+ cfqd->max_queued = q->nr_requests / 16;
+ q->nr_batching = cfq_queued;
+ cfqd->key_type = CFQ_KEY_TGID;
+ cfqd->find_best_crq = 1;
+ atomic_set(&cfqd->ref, 1);
cfqd->cfq_queued = cfq_queued;
cfqd->cfq_quantum = cfq_quantum;
+ cfqd->cfq_fifo_expire_r = cfq_fifo_expire_r;
+ cfqd->cfq_fifo_expire_w = cfq_fifo_expire_w;
+ cfqd->cfq_fifo_batch_expire = cfq_fifo_rate;
+ cfqd->cfq_back_max = cfq_back_max;
+ cfqd->cfq_back_penalty = cfq_back_penalty;
return 0;
+out_spare:
+ mempool_destroy(cfqd->crq_pool);
out_crqpool:
kfree(cfqd->cfq_hash);
out_cfqhash:
@@ -743,29 +1586,39 @@ out_crqhash:
return -ENOMEM;
}
+static void cfq_slab_kill(void)
+{
+ if (crq_pool)
+ kmem_cache_destroy(crq_pool);
+ if (cfq_pool)
+ kmem_cache_destroy(cfq_pool);
+ if (cfq_ioc_pool)
+ kmem_cache_destroy(cfq_ioc_pool);
+}
+
static int __init cfq_slab_setup(void)
{
crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
NULL, NULL);
-
if (!crq_pool)
- panic("cfq_iosched: can't init crq pool\n");
+ goto fail;
cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
NULL, NULL);
-
if (!cfq_pool)
- panic("cfq_iosched: can't init cfq pool\n");
+ goto fail;
- cfq_mpool = mempool_create(64, mempool_alloc_slab, mempool_free_slab, cfq_pool);
-
- if (!cfq_mpool)
- panic("cfq_iosched: can't init cfq mpool\n");
+ cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
+ sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
+ if (!cfq_ioc_pool)
+ goto fail;
return 0;
+fail:
+ cfq_slab_kill();
+ return -ENOMEM;
}
-subsys_initcall(cfq_slab_setup);
/*
* sysfs parts below -->
@@ -791,27 +1644,135 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
return count;
}
-#define SHOW_FUNCTION(__FUNC, __VAR) \
+static ssize_t
+cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count)
+{
+ max_elapsed_dispatch = max_elapsed_crq = 0;
+ return count;
+}
+
+static ssize_t
+cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count)
+{
+ spin_lock_irq(cfqd->queue->queue_lock);
+ if (!strncmp(page, "pgid", 4))
+ cfqd->key_type = CFQ_KEY_PGID;
+ else if (!strncmp(page, "tgid", 4))
+ cfqd->key_type = CFQ_KEY_TGID;
+ else if (!strncmp(page, "uid", 3))
+ cfqd->key_type = CFQ_KEY_UID;
+ else if (!strncmp(page, "gid", 3))
+ cfqd->key_type = CFQ_KEY_GID;
+ spin_unlock_irq(cfqd->queue->queue_lock);
+ return count;
+}
+
+static ssize_t
+cfq_read_key_type(struct cfq_data *cfqd, char *page)
+{
+ ssize_t len = 0;
+ int i;
+
+ for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) {
+ if (cfqd->key_type == i)
+ len += sprintf(page+len, "[%s] ", cfq_key_types[i]);
+ else
+ len += sprintf(page+len, "%s ", cfq_key_types[i]);
+ }
+ len += sprintf(page+len, "\n");
+ return len;
+}
+
+static ssize_t
+cfq_status_show(struct cfq_data *cfqd, char *page)
+{
+ struct list_head *entry;
+ struct cfq_queue *cfqq;
+ ssize_t len;
+ int i = 0, queues;
+
+ len = sprintf(page, "Busy queues: %u\n", cfqd->busy_queues);
+ len += sprintf(page+len, "key type: %s\n",
+ cfq_key_types[cfqd->key_type]);
+ len += sprintf(page+len, "last sector: %Lu\n",
+ (unsigned long long)cfqd->last_sector);
+ len += sprintf(page+len, "max time in iosched: %lu\n",
+ max_elapsed_dispatch);
+ len += sprintf(page+len, "max completion time: %lu\n", max_elapsed_crq);
+
+ len += sprintf(page+len, "Busy queue list:\n");
+ spin_lock_irq(cfqd->queue->queue_lock);
+ list_for_each(entry, &cfqd->rr_list) {
+ i++;
+ cfqq = list_entry_cfqq(entry);
+ len += sprintf(page+len, " cfqq: key=%lu alloc=%d/%d, "
+ "queued=%d/%d, last_fifo=%lu, service_used=%lu\n",
+ cfqq->key, cfqq->allocated[0], cfqq->allocated[1],
+ cfqq->queued[0], cfqq->queued[1],
+ cfqq->last_fifo_expire, cfqq->service_used);
+ }
+ len += sprintf(page+len, " busy queues total: %d\n", i);
+ queues = i;
+
+ len += sprintf(page+len, "Empty queue list:\n");
+ i = 0;
+ list_for_each(entry, &cfqd->empty_list) {
+ i++;
+ cfqq = list_entry_cfqq(entry);
+ len += sprintf(page+len, " cfqq: key=%lu alloc=%d/%d, "
+ "queued=%d/%d, last_fifo=%lu, service_used=%lu\n",
+ cfqq->key, cfqq->allocated[0], cfqq->allocated[1],
+ cfqq->queued[0], cfqq->queued[1],
+ cfqq->last_fifo_expire, cfqq->service_used);
+ }
+ len += sprintf(page+len, " empty queues total: %d\n", i);
+ queues += i;
+ len += sprintf(page+len, "Total queues: %d\n", queues);
+ spin_unlock_irq(cfqd->queue->queue_lock);
+ return len;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \
{ \
- return cfq_var_show(__VAR, (page)); \
+ unsigned int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return cfq_var_show(__data, (page)); \
}
-SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum);
-SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued);
+SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
+SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
+SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1);
+SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1);
+SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1);
+SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0);
+SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
+SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
#undef SHOW_FUNCTION
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \
{ \
- int ret = cfq_var_store(__PTR, (page), count); \
- if (*(__PTR) < (MIN)) \
- *(__PTR) = (MIN); \
- else if (*(__PTR) > (MAX)) \
- *(__PTR) = (MAX); \
+ unsigned int __data; \
+ int ret = cfq_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
return ret; \
}
-STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, INT_MAX);
-STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, INT_MAX);
+STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0);
+STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
+STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
#undef STORE_FUNCTION
static struct cfq_fs_entry cfq_quantum_entry = {
@@ -824,10 +1785,62 @@ static struct cfq_fs_entry cfq_queued_entry = {
.show = cfq_queued_show,
.store = cfq_queued_store,
};
+static struct cfq_fs_entry cfq_fifo_expire_r_entry = {
+ .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_fifo_expire_r_show,
+ .store = cfq_fifo_expire_r_store,
+};
+static struct cfq_fs_entry cfq_fifo_expire_w_entry = {
+ .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_fifo_expire_w_show,
+ .store = cfq_fifo_expire_w_store,
+};
+static struct cfq_fs_entry cfq_fifo_batch_expire_entry = {
+ .attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_fifo_batch_expire_show,
+ .store = cfq_fifo_batch_expire_store,
+};
+static struct cfq_fs_entry cfq_find_best_entry = {
+ .attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_find_best_show,
+ .store = cfq_find_best_store,
+};
+static struct cfq_fs_entry cfq_back_max_entry = {
+ .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_back_max_show,
+ .store = cfq_back_max_store,
+};
+static struct cfq_fs_entry cfq_back_penalty_entry = {
+ .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_back_penalty_show,
+ .store = cfq_back_penalty_store,
+};
+static struct cfq_fs_entry cfq_clear_elapsed_entry = {
+ .attr = {.name = "clear_elapsed", .mode = S_IWUSR },
+ .store = cfq_clear_elapsed,
+};
+static struct cfq_fs_entry cfq_misc_entry = {
+ .attr = {.name = "show_status", .mode = S_IRUGO },
+ .show = cfq_status_show,
+};
+static struct cfq_fs_entry cfq_key_type_entry = {
+ .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_read_key_type,
+ .store = cfq_set_key_type,
+};
static struct attribute *default_attrs[] = {
&cfq_quantum_entry.attr,
&cfq_queued_entry.attr,
+ &cfq_fifo_expire_r_entry.attr,
+ &cfq_fifo_expire_w_entry.attr,
+ &cfq_fifo_batch_expire_entry.attr,
+ &cfq_key_type_entry.attr,
+ &cfq_find_best_entry.attr,
+ &cfq_back_max_entry.attr,
+ &cfq_back_penalty_entry.attr,
+ &cfq_clear_elapsed_entry.attr,
+ &cfq_misc_entry.attr,
NULL,
};
@@ -868,23 +1881,56 @@ struct kobj_type cfq_ktype = {
.default_attrs = default_attrs,
};
-elevator_t iosched_cfq = {
- .elevator_name = "cfq",
- .elevator_ktype = &cfq_ktype,
- .elevator_merge_fn = cfq_merge,
- .elevator_merged_fn = cfq_merged_request,
- .elevator_merge_req_fn = cfq_merged_requests,
- .elevator_next_req_fn = cfq_next_request,
- .elevator_add_req_fn = cfq_insert_request,
- .elevator_remove_req_fn = cfq_remove_request,
- .elevator_queue_empty_fn = cfq_queue_empty,
- .elevator_former_req_fn = cfq_former_request,
- .elevator_latter_req_fn = cfq_latter_request,
- .elevator_set_req_fn = cfq_set_request,
- .elevator_put_req_fn = cfq_put_request,
- .elevator_may_queue_fn = cfq_may_queue,
- .elevator_init_fn = cfq_init,
- .elevator_exit_fn = cfq_exit,
+static struct elevator_type iosched_cfq = {
+ .ops = {
+ .elevator_merge_fn = cfq_merge,
+ .elevator_merged_fn = cfq_merged_request,
+ .elevator_merge_req_fn = cfq_merged_requests,
+ .elevator_next_req_fn = cfq_next_request,
+ .elevator_add_req_fn = cfq_insert_request,
+ .elevator_remove_req_fn = cfq_remove_request,
+ .elevator_requeue_req_fn = cfq_requeue_request,
+ .elevator_queue_empty_fn = cfq_queue_empty,
+ .elevator_completed_req_fn = cfq_completed_request,
+ .elevator_former_req_fn = cfq_former_request,
+ .elevator_latter_req_fn = cfq_latter_request,
+ .elevator_set_req_fn = cfq_set_request,
+ .elevator_put_req_fn = cfq_put_request,
+ .elevator_may_queue_fn = cfq_may_queue,
+ .elevator_init_fn = cfq_init_queue,
+ .elevator_exit_fn = cfq_exit_queue,
+ },
+ .elevator_ktype = &cfq_ktype,
+ .elevator_name = "cfq",
+ .elevator_owner = THIS_MODULE,
};
-EXPORT_SYMBOL(iosched_cfq);
+int cfq_init(void)
+{
+ int ret;
+
+ if (cfq_slab_setup())
+ return -ENOMEM;
+
+ ret = elv_register(&iosched_cfq);
+ if (!ret) {
+ __module_get(THIS_MODULE);
+ return 0;
+ }
+
+ cfq_slab_kill();
+ return ret;
+}
+
+void cfq_exit(void)
+{
+ cfq_slab_kill();
+ elv_unregister(&iosched_cfq);
+}
+
+module_init(cfq_init);
+module_exit(cfq_exit);
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 204b3182900d..dc896a12283b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -21,7 +21,6 @@
*/
#include <linux/config.h> /* CONFIG_PROC_FS */
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/bio.h>
@@ -732,7 +731,6 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
}
#ifndef MODULE
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,13)
/*
* Config string is a comma separated set of i/o addresses of EISA cards.
*/
@@ -749,18 +747,6 @@ static int cpqarray_setup(char *str)
__setup("smart2=", cpqarray_setup);
-#else
-
-/*
- * Copy the contents of the ints[] array passed to us by init.
- */
-void cpqarray_setup(char *str, int *ints)
-{
- int i;
- for(i=0; i<ints[0] && i<8; i++)
- eisa[i] = ints[i+1];
-}
-#endif
#endif
/*
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index fb7ab733c709..f482e8bdb4d6 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -289,7 +289,7 @@ deadline_find_first_drq(struct deadline_data *dd, int data_dir)
static inline void
deadline_add_request(struct request_queue *q, struct request *rq)
{
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq = RQ_DATA(rq);
const int data_dir = rq_data_dir(drq->request);
@@ -317,7 +317,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
struct deadline_rq *drq = RQ_DATA(rq);
if (drq) {
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
list_del_init(&drq->fifo);
deadline_remove_merge_hints(q, drq);
@@ -328,7 +328,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
static int
deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
struct request *__rq;
int ret;
@@ -383,7 +383,7 @@ out_insert:
static void deadline_merged_request(request_queue_t *q, struct request *req)
{
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq = RQ_DATA(req);
/*
@@ -407,7 +407,7 @@ static void
deadline_merged_requests(request_queue_t *q, struct request *req,
struct request *next)
{
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq = RQ_DATA(req);
struct deadline_rq *dnext = RQ_DATA(next);
@@ -604,7 +604,7 @@ dispatch_request:
static struct request *deadline_next_request(request_queue_t *q)
{
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
struct request *rq;
/*
@@ -625,7 +625,7 @@ dispatch:
static void
deadline_insert_request(request_queue_t *q, struct request *rq, int where)
{
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
/* barriers must flush the reorder queue */
if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
@@ -653,7 +653,7 @@ deadline_insert_request(request_queue_t *q, struct request *rq, int where)
static int deadline_queue_empty(request_queue_t *q)
{
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
if (!list_empty(&dd->fifo_list[WRITE])
|| !list_empty(&dd->fifo_list[READ])
@@ -687,7 +687,7 @@ deadline_latter_request(request_queue_t *q, struct request *rq)
return NULL;
}
-static void deadline_exit(request_queue_t *q, elevator_t *e)
+static void deadline_exit_queue(elevator_t *e)
{
struct deadline_data *dd = e->elevator_data;
@@ -703,7 +703,7 @@ static void deadline_exit(request_queue_t *q, elevator_t *e)
* initialize elevator private data (deadline_data), and alloc a drq for
* each request on the free lists
*/
-static int deadline_init(request_queue_t *q, elevator_t *e)
+static int deadline_init_queue(request_queue_t *q, elevator_t *e)
{
struct deadline_data *dd;
int i;
@@ -748,7 +748,7 @@ static int deadline_init(request_queue_t *q, elevator_t *e)
static void deadline_put_request(request_queue_t *q, struct request *rq)
{
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq = RQ_DATA(rq);
if (drq) {
@@ -760,7 +760,7 @@ static void deadline_put_request(request_queue_t *q, struct request *rq)
static int
deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
{
- struct deadline_data *dd = q->elevator.elevator_data;
+ struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq;
drq = mempool_alloc(dd->drq_pool, gfp_mask);
@@ -805,33 +805,41 @@ deadline_var_store(unsigned int *var, const char *page, size_t count)
return count;
}
-#define SHOW_FUNCTION(__FUNC, __VAR) \
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
static ssize_t __FUNC(struct deadline_data *dd, char *page) \
{ \
- return deadline_var_show(__VAR, (page)); \
-}
-SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ]);
-SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE]);
-SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved);
-SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges);
-SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch);
+ unsigned int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return deadline_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
+SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
+SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
+SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
+SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
#undef SHOW_FUNCTION
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \
{ \
- int ret = deadline_var_store(__PTR, (page), count); \
- if (*(__PTR) < (MIN)) \
- *(__PTR) = (MIN); \
- else if (*(__PTR) > (MAX)) \
- *(__PTR) = (MAX); \
+ unsigned int __data; \
+ int ret = deadline_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
return ret; \
}
-STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX);
-STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX);
-STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX);
-STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1);
-STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX);
+STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
+STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
+STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
#undef STORE_FUNCTION
static struct deadline_fs_entry deadline_readexpire_entry = {
@@ -906,36 +914,54 @@ struct kobj_type deadline_ktype = {
.default_attrs = default_attrs,
};
-static int __init deadline_slab_setup(void)
+static struct elevator_type iosched_deadline = {
+ .ops = {
+ .elevator_merge_fn = deadline_merge,
+ .elevator_merged_fn = deadline_merged_request,
+ .elevator_merge_req_fn = deadline_merged_requests,
+ .elevator_next_req_fn = deadline_next_request,
+ .elevator_add_req_fn = deadline_insert_request,
+ .elevator_remove_req_fn = deadline_remove_request,
+ .elevator_queue_empty_fn = deadline_queue_empty,
+ .elevator_former_req_fn = deadline_former_request,
+ .elevator_latter_req_fn = deadline_latter_request,
+ .elevator_set_req_fn = deadline_set_request,
+ .elevator_put_req_fn = deadline_put_request,
+ .elevator_init_fn = deadline_init_queue,
+ .elevator_exit_fn = deadline_exit_queue,
+ },
+
+ .elevator_ktype = &deadline_ktype,
+ .elevator_name = "deadline",
+ .elevator_owner = THIS_MODULE,
+};
+
+int deadline_init(void)
{
+ int ret;
+
drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
0, 0, NULL, NULL);
if (!drq_pool)
- panic("deadline: can't init slab pool\n");
+ return -ENOMEM;
- return 0;
+ ret = elv_register(&iosched_deadline);
+ if (ret)
+ kmem_cache_destroy(drq_pool);
+
+ return ret;
}
-subsys_initcall(deadline_slab_setup);
-
-elevator_t iosched_deadline = {
- .elevator_merge_fn = deadline_merge,
- .elevator_merged_fn = deadline_merged_request,
- .elevator_merge_req_fn = deadline_merged_requests,
- .elevator_next_req_fn = deadline_next_request,
- .elevator_add_req_fn = deadline_insert_request,
- .elevator_remove_req_fn = deadline_remove_request,
- .elevator_queue_empty_fn = deadline_queue_empty,
- .elevator_former_req_fn = deadline_former_request,
- .elevator_latter_req_fn = deadline_latter_request,
- .elevator_set_req_fn = deadline_set_request,
- .elevator_put_req_fn = deadline_put_request,
- .elevator_init_fn = deadline_init,
- .elevator_exit_fn = deadline_exit,
-
- .elevator_ktype = &deadline_ktype,
- .elevator_name = "deadline",
-};
+void deadline_exit(void)
+{
+ kmem_cache_destroy(drq_pool);
+ elv_unregister(&iosched_deadline);
+}
+
+module_init(deadline_init);
+module_exit(deadline_exit);
-EXPORT_SYMBOL(iosched_deadline);
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("deadline IO scheduler");
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
index 35c9385ac133..1b4f6a70c0ca 100644
--- a/drivers/block/elevator.c
+++ b/drivers/block/elevator.c
@@ -37,6 +37,9 @@
#include <asm/uaccess.h>
+static spinlock_t elv_list_lock = SPIN_LOCK_UNLOCKED;
+static LIST_HEAD(elv_list);
+
/*
* can we safely merge with this request?
*/
@@ -60,6 +63,7 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
return 0;
}
+EXPORT_SYMBOL(elv_rq_merge_ok);
inline int elv_try_merge(struct request *__rq, struct bio *bio)
{
@@ -77,6 +81,7 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
return ret;
}
+EXPORT_SYMBOL(elv_try_merge);
inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
{
@@ -85,31 +90,117 @@ inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
+EXPORT_SYMBOL(elv_try_last_merge);
-/*
- * general block -> elevator interface starts here
- */
-int elevator_init(request_queue_t *q, elevator_t *type)
+struct elevator_type *elevator_find(const char *name)
+{
+ struct elevator_type *e = NULL;
+ struct list_head *entry;
+
+ spin_lock_irq(&elv_list_lock);
+ list_for_each(entry, &elv_list) {
+ struct elevator_type *__e;
+
+ __e = list_entry(entry, struct elevator_type, list);
+
+ if (!strcmp(__e->elevator_name, name)) {
+ e = __e;
+ break;
+ }
+ }
+ spin_unlock_irq(&elv_list_lock);
+
+ return e;
+}
+
+static int elevator_attach(request_queue_t *q, struct elevator_type *e,
+ struct elevator_queue *eq)
{
- elevator_t *e = &q->elevator;
+ int ret = 0;
- memcpy(e, type, sizeof(*e));
+ if (!try_module_get(e->elevator_owner))
+ return -EINVAL;
+
+ memset(eq, 0, sizeof(*eq));
+ eq->ops = &e->ops;
+ eq->elevator_type = e;
INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
+ q->elevator = eq;
+
+ if (eq->ops->elevator_init_fn)
+ ret = eq->ops->elevator_init_fn(q, eq);
- if (e->elevator_init_fn)
- return e->elevator_init_fn(q, e);
+ return ret;
+}
+
+static char chosen_elevator[16];
+
+static void elevator_setup_default(void)
+{
+ /*
+ * check if default is set and exists
+ */
+ if (chosen_elevator[0] && elevator_find(chosen_elevator))
+ return;
+
+#if defined(CONFIG_IOSCHED_AS)
+ strcpy(chosen_elevator, "anticipatory");
+#elif defined(CONFIG_IOSCHED_DEADLINE)
+ strcpy(chosen_elevator, "deadline");
+#elif defined(CONFIG_IOSCHED_CFQ)
+ strcpy(chosen_elevator, "cfq");
+#elif defined(CONFIG_IOSCHED_NOOP)
+ strcpy(chosen_elevator, "noop");
+#else
+#error "You must build at least 1 IO scheduler into the kernel"
+#endif
+ printk("elevator: using %s as default io scheduler\n", chosen_elevator);
+}
+static int __init elevator_setup(char *str)
+{
+ strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
return 0;
}
-void elevator_exit(request_queue_t *q)
+__setup("elevator=", elevator_setup);
+
+int elevator_init(request_queue_t *q, char *name)
+{
+ struct elevator_type *e = NULL;
+ struct elevator_queue *eq;
+ int ret = 0;
+
+ elevator_setup_default();
+
+ if (!name)
+ name = chosen_elevator;
+
+ e = elevator_find(name);
+ if (!e)
+ return -EINVAL;
+
+ eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
+ if (!eq)
+ return -ENOMEM;
+
+ ret = elevator_attach(q, e, eq);
+ if (ret)
+ kfree(eq);
+
+ return ret;
+}
+
+void elevator_exit(elevator_t *e)
{
- elevator_t *e = &q->elevator;
+ if (e->ops->elevator_exit_fn)
+ e->ops->elevator_exit_fn(e);
- if (e->elevator_exit_fn)
- e->elevator_exit_fn(q, e);
+ module_put(e->elevator_type->elevator_owner);
+ e->elevator_type = NULL;
+ kfree(e);
}
int elevator_global_init(void)
@@ -119,32 +210,32 @@ int elevator_global_init(void)
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
- if (e->elevator_merge_fn)
- return e->elevator_merge_fn(q, req, bio);
+ if (e->ops->elevator_merge_fn)
+ return e->ops->elevator_merge_fn(q, req, bio);
return ELEVATOR_NO_MERGE;
}
void elv_merged_request(request_queue_t *q, struct request *rq)
{
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
- if (e->elevator_merged_fn)
- e->elevator_merged_fn(q, rq);
+ if (e->ops->elevator_merged_fn)
+ e->ops->elevator_merged_fn(q, rq);
}
void elv_merge_requests(request_queue_t *q, struct request *rq,
struct request *next)
{
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
if (q->last_merge == next)
q->last_merge = NULL;
- if (e->elevator_merge_req_fn)
- e->elevator_merge_req_fn(q, rq, next);
+ if (e->ops->elevator_merge_req_fn)
+ e->ops->elevator_merge_req_fn(q, rq, next);
}
void elv_requeue_request(request_queue_t *q, struct request *rq)
@@ -160,8 +251,8 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
* if iosched has an explicit requeue hook, then use that. otherwise
* just put the request at the front of the queue
*/
- if (q->elevator.elevator_requeue_req_fn)
- q->elevator.elevator_requeue_req_fn(q, rq);
+ if (q->elevator->ops->elevator_requeue_req_fn)
+ q->elevator->ops->elevator_requeue_req_fn(q, rq);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
}
@@ -180,7 +271,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
blk_plug_device(q);
rq->q = q;
- q->elevator.elevator_add_req_fn(q, rq, where);
+ q->elevator->ops->elevator_add_req_fn(q, rq, where);
if (blk_queue_plugged(q)) {
int nrq = q->rq.count[READ] + q->rq.count[WRITE] - q->in_flight;
@@ -203,7 +294,7 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
static inline struct request *__elv_next_request(request_queue_t *q)
{
- return q->elevator.elevator_next_req_fn(q);
+ return q->elevator->ops->elevator_next_req_fn(q);
}
struct request *elv_next_request(request_queue_t *q)
@@ -252,7 +343,7 @@ struct request *elv_next_request(request_queue_t *q)
void elv_remove_request(request_queue_t *q, struct request *rq)
{
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
/*
* the time frame between a request being removed from the lists
@@ -274,16 +365,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
if (rq == q->last_merge)
q->last_merge = NULL;
- if (e->elevator_remove_req_fn)
- e->elevator_remove_req_fn(q, rq);
+ if (e->ops->elevator_remove_req_fn)
+ e->ops->elevator_remove_req_fn(q, rq);
}
int elv_queue_empty(request_queue_t *q)
{
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
- if (e->elevator_queue_empty_fn)
- return e->elevator_queue_empty_fn(q);
+ if (e->ops->elevator_queue_empty_fn)
+ return e->ops->elevator_queue_empty_fn(q);
return list_empty(&q->queue_head);
}
@@ -292,10 +383,10 @@ struct request *elv_latter_request(request_queue_t *q, struct request *rq)
{
struct list_head *next;
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
- if (e->elevator_latter_req_fn)
- return e->elevator_latter_req_fn(q, rq);
+ if (e->ops->elevator_latter_req_fn)
+ return e->ops->elevator_latter_req_fn(q, rq);
next = rq->queuelist.next;
if (next != &q->queue_head && next != &rq->queuelist)
@@ -308,10 +399,10 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
{
struct list_head *prev;
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
- if (e->elevator_former_req_fn)
- return e->elevator_former_req_fn(q, rq);
+ if (e->ops->elevator_former_req_fn)
+ return e->ops->elevator_former_req_fn(q, rq);
prev = rq->queuelist.prev;
if (prev != &q->queue_head && prev != &rq->queuelist)
@@ -322,10 +413,10 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
{
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
- if (e->elevator_set_req_fn)
- return e->elevator_set_req_fn(q, rq, gfp_mask);
+ if (e->ops->elevator_set_req_fn)
+ return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
rq->elevator_private = NULL;
return 0;
@@ -333,25 +424,25 @@ int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
void elv_put_request(request_queue_t *q, struct request *rq)
{
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
- if (e->elevator_put_req_fn)
- e->elevator_put_req_fn(q, rq);
+ if (e->ops->elevator_put_req_fn)
+ e->ops->elevator_put_req_fn(q, rq);
}
int elv_may_queue(request_queue_t *q, int rw)
{
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
- if (e->elevator_may_queue_fn)
- return e->elevator_may_queue_fn(q, rw);
+ if (e->ops->elevator_may_queue_fn)
+ return e->ops->elevator_may_queue_fn(q, rw);
- return 0;
+ return ELV_MQUEUE_MAY;
}
void elv_completed_request(request_queue_t *q, struct request *rq)
{
- elevator_t *e = &q->elevator;
+ elevator_t *e = q->elevator;
/*
* request is released from the driver, io must be done
@@ -359,22 +450,20 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
if (blk_account_rq(rq))
q->in_flight--;
- if (e->elevator_completed_req_fn)
- e->elevator_completed_req_fn(q, rq);
+ if (e->ops->elevator_completed_req_fn)
+ e->ops->elevator_completed_req_fn(q, rq);
}
int elv_register_queue(struct request_queue *q)
{
- elevator_t *e;
-
- e = &q->elevator;
+ elevator_t *e = q->elevator;
e->kobj.parent = kobject_get(&q->kobj);
if (!e->kobj.parent)
return -EBUSY;
snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
- e->kobj.ktype = e->elevator_ktype;
+ e->kobj.ktype = e->elevator_type->elevator_ktype;
return kobject_register(&e->kobj);
}
@@ -382,12 +471,131 @@ int elv_register_queue(struct request_queue *q)
void elv_unregister_queue(struct request_queue *q)
{
if (q) {
- elevator_t * e = &q->elevator;
+ elevator_t *e = q->elevator;
kobject_unregister(&e->kobj);
kobject_put(&q->kobj);
}
}
+int elv_register(struct elevator_type *e)
+{
+ if (elevator_find(e->elevator_name))
+ BUG();
+
+ spin_lock_irq(&elv_list_lock);
+ list_add_tail(&e->list, &elv_list);
+ spin_unlock_irq(&elv_list_lock);
+
+ printk("io scheduler %s registered\n", e->elevator_name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(elv_register);
+
+void elv_unregister(struct elevator_type *e)
+{
+ spin_lock_irq(&elv_list_lock);
+ list_del_init(&e->list);
+ spin_unlock_irq(&elv_list_lock);
+}
+EXPORT_SYMBOL_GPL(elv_unregister);
+
+/*
+ * switch to new_e io scheduler. be careful not to introduce deadlocks -
+ * we don't free the old io scheduler, before we have allocated what we
+ * need for the new one. this way we have a chance of going back to the old
+ * one, if the new one fails init for some reason
+ */
+static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
+{
+ elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
+ elevator_t *old_elevator;
+
+ if (!e) {
+ printk("elevator: out of memory\n");
+ return;
+ }
+
+ blk_wait_queue_drained(q);
+
+ /*
+ * unregister old elevator data
+ */
+ elv_unregister_queue(q);
+ old_elevator = q->elevator;
+
+ /*
+ * attach and start new elevator
+ */
+ if (elevator_attach(q, new_e, e))
+ goto fail;
+
+ if (elv_register_queue(q))
+ goto fail_register;
+
+ /*
+ * finally exit old elevator and start queue again
+ */
+ elevator_exit(old_elevator);
+ blk_finish_queue_drain(q);
+ return;
+
+fail_register:
+ /*
+ * switch failed, exit the new io scheduler and reattach the old
+ * one again (along with re-adding the sysfs dir)
+ */
+ elevator_exit(e);
+fail:
+ q->elevator = old_elevator;
+ elv_register_queue(q);
+ blk_finish_queue_drain(q);
+ printk("elevator: switch to %s failed\n", new_e->elevator_name);
+}
+
+ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
+{
+ char elevator_name[ELV_NAME_MAX];
+ struct elevator_type *e;
+
+ memset(elevator_name, 0, sizeof(elevator_name));
+ strncpy(elevator_name, name, sizeof(elevator_name));
+
+ if (elevator_name[strlen(elevator_name) - 1] == '\n')
+ elevator_name[strlen(elevator_name) - 1] = '\0';
+
+ e = elevator_find(elevator_name);
+ if (!e) {
+ printk("elevator: type %s not found\n", elevator_name);
+ return -EINVAL;
+ }
+
+ elevator_switch(q, e);
+ return count;
+}
+
+ssize_t elv_iosched_show(request_queue_t *q, char *name)
+{
+ elevator_t *e = q->elevator;
+ struct elevator_type *elv = e->elevator_type;
+ struct list_head *entry;
+ int len = 0;
+
+ spin_lock_irq(q->queue_lock);
+ list_for_each(entry, &elv_list) {
+ struct elevator_type *__e;
+
+ __e = list_entry(entry, struct elevator_type, list);
+ if (!strcmp(elv->elevator_name, __e->elevator_name))
+ len += sprintf(name+len, "[%s] ", elv->elevator_name);
+ else
+ len += sprintf(name+len, "%s ", __e->elevator_name);
+ }
+ spin_unlock_irq(q->queue_lock);
+
+ len += sprintf(len+name, "\n");
+ return len;
+}
+
module_init(elevator_global_init);
EXPORT_SYMBOL(elv_add_request);
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 26fdf6be6bd0..3ba6430899df 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -243,6 +243,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
blk_queue_hardsect_size(q, 512);
blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q);
+ q->nr_batching = BLK_BATCH_REQ;
q->unplug_thresh = 4; /* hmm */
q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
@@ -1395,7 +1396,8 @@ void blk_cleanup_queue(request_queue_t * q)
if (!atomic_dec_and_test(&q->refcnt))
return;
- elevator_exit(q);
+ if (q->elevator)
+ elevator_exit(q->elevator);
del_timer_sync(&q->unplug_timer);
kblockd_flush();
@@ -1418,6 +1420,7 @@ static int blk_init_free_list(request_queue_t *q)
rl->count[READ] = rl->count[WRITE] = 0;
init_waitqueue_head(&rl->wait[READ]);
init_waitqueue_head(&rl->wait[WRITE]);
+ init_waitqueue_head(&rl->drain);
rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep);
@@ -1429,45 +1432,6 @@ static int blk_init_free_list(request_queue_t *q)
static int __make_request(request_queue_t *, struct bio *);
-static elevator_t *chosen_elevator =
-#if defined(CONFIG_IOSCHED_AS)
- &iosched_as;
-#elif defined(CONFIG_IOSCHED_DEADLINE)
- &iosched_deadline;
-#elif defined(CONFIG_IOSCHED_CFQ)
- &iosched_cfq;
-#elif defined(CONFIG_IOSCHED_NOOP)
- &elevator_noop;
-#else
- NULL;
-#error "You must have at least 1 I/O scheduler selected"
-#endif
-
-#if defined(CONFIG_IOSCHED_AS) || defined(CONFIG_IOSCHED_DEADLINE) || defined (CONFIG_IOSCHED_NOOP)
-static int __init elevator_setup(char *str)
-{
-#ifdef CONFIG_IOSCHED_DEADLINE
- if (!strcmp(str, "deadline"))
- chosen_elevator = &iosched_deadline;
-#endif
-#ifdef CONFIG_IOSCHED_AS
- if (!strcmp(str, "as"))
- chosen_elevator = &iosched_as;
-#endif
-#ifdef CONFIG_IOSCHED_CFQ
- if (!strcmp(str, "cfq"))
- chosen_elevator = &iosched_cfq;
-#endif
-#ifdef CONFIG_IOSCHED_NOOP
- if (!strcmp(str, "noop"))
- chosen_elevator = &elevator_noop;
-#endif
- return 1;
-}
-
-__setup("elevator=", elevator_setup);
-#endif /* CONFIG_IOSCHED_AS || CONFIG_IOSCHED_DEADLINE || CONFIG_IOSCHED_NOOP */
-
request_queue_t *blk_alloc_queue(int gfp_mask)
{
request_queue_t *q = kmem_cache_alloc(requestq_cachep, gfp_mask);
@@ -1520,21 +1484,14 @@ EXPORT_SYMBOL(blk_alloc_queue);
**/
request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
- request_queue_t *q;
- static int printed;
+ request_queue_t *q = blk_alloc_queue(GFP_KERNEL);
- q = blk_alloc_queue(GFP_KERNEL);
if (!q)
return NULL;
if (blk_init_free_list(q))
goto out_init;
- if (!printed) {
- printed = 1;
- printk("Using %s io scheduler\n", chosen_elevator->elevator_name);
- }
-
q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
@@ -1555,8 +1512,10 @@ request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
/*
* all done
*/
- if (!elevator_init(q, chosen_elevator))
+ if (!elevator_init(q, NULL)) {
+ blk_queue_congestion_threshold(q);
return q;
+ }
blk_cleanup_queue(q);
out_init:
@@ -1584,13 +1543,20 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool);
}
-static inline struct request *blk_alloc_request(request_queue_t *q,int gfp_mask)
+static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
+ int gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
if (!rq)
return NULL;
+ /*
+ * first three bits are identical in rq->flags and bio->bi_rw,
+ * see bio.h and blkdev.h
+ */
+ rq->flags = rw;
+
if (!elv_set_request(q, rq, gfp_mask))
return rq;
@@ -1602,7 +1568,7 @@ static inline struct request *blk_alloc_request(request_queue_t *q,int gfp_mask)
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
*/
-static inline int ioc_batching(struct io_context *ioc)
+static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
{
if (!ioc)
return 0;
@@ -1612,7 +1578,7 @@ static inline int ioc_batching(struct io_context *ioc)
* even if the batch times out, otherwise we could theoretically
* lose wakeups.
*/
- return ioc->nr_batch_requests == BLK_BATCH_REQ ||
+ return ioc->nr_batch_requests == q->nr_batching ||
(ioc->nr_batch_requests > 0
&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
}
@@ -1623,12 +1589,12 @@ static inline int ioc_batching(struct io_context *ioc)
* is the behaviour we want though - once it gets a wakeup it should be given
* a nice run.
*/
-void ioc_set_batching(struct io_context *ioc)
+void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
{
- if (!ioc || ioc_batching(ioc))
+ if (!ioc || ioc_batching(q, ioc))
return;
- ioc->nr_batch_requests = BLK_BATCH_REQ;
+ ioc->nr_batch_requests = q->nr_batching;
ioc->last_waited = jiffies;
}
@@ -1644,11 +1610,14 @@ static void freed_request(request_queue_t *q, int rw)
if (rl->count[rw] < queue_congestion_off_threshold(q))
clear_queue_congested(q, rw);
if (rl->count[rw]+1 <= q->nr_requests) {
+ smp_mb();
if (waitqueue_active(&rl->wait[rw]))
wake_up(&rl->wait[rw]);
- if (!waitqueue_active(&rl->wait[rw]))
- blk_clear_queue_full(q, rw);
+ blk_clear_queue_full(q, rw);
}
+ if (unlikely(waitqueue_active(&rl->drain)) &&
+ !rl->count[READ] && !rl->count[WRITE])
+ wake_up(&rl->drain);
}
#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
@@ -1661,6 +1630,9 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
struct request_list *rl = &q->rq;
struct io_context *ioc = get_io_context(gfp_mask);
+ if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
+ return NULL;
+
spin_lock_irq(q->queue_lock);
if (rl->count[rw]+1 >= q->nr_requests) {
/*
@@ -1670,13 +1642,22 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
* will be blocked.
*/
if (!blk_queue_full(q, rw)) {
- ioc_set_batching(ioc);
+ ioc_set_batching(q, ioc);
blk_set_queue_full(q, rw);
}
}
- if (blk_queue_full(q, rw)
- && !ioc_batching(ioc) && !elv_may_queue(q, rw)) {
+ switch (elv_may_queue(q, rw)) {
+ case ELV_MQUEUE_NO:
+ spin_unlock_irq(q->queue_lock);
+ goto out;
+ case ELV_MQUEUE_MAY:
+ break;
+ case ELV_MQUEUE_MUST:
+ goto get_rq;
+ }
+
+ if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
/*
* The queue is full and the allocating process is not a
* "batcher", and not exempted by the IO scheduler
@@ -1685,12 +1666,13 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
goto out;
}
+get_rq:
rl->count[rw]++;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
set_queue_congested(q, rw);
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, gfp_mask);
+ rq = blk_alloc_request(q, rw, gfp_mask);
if (!rq) {
/*
* Allocation failed presumably due to memory. Undo anything
@@ -1705,17 +1687,11 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
goto out;
}
- if (ioc_batching(ioc))
+ if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
INIT_LIST_HEAD(&rq->queuelist);
- /*
- * first three bits are identical in rq->flags and bio->bi_rw,
- * see bio.h and blkdev.h
- */
- rq->flags = rw;
-
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
rq->bio = rq->biotail = NULL;
@@ -1764,7 +1740,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
* See ioc_batching, ioc_set_batching
*/
ioc = get_io_context(GFP_NOIO);
- ioc_set_batching(ioc);
+ ioc_set_batching(q, ioc);
put_io_context(ioc);
}
finish_wait(&rl->wait[rw], &wait);
@@ -2506,6 +2482,70 @@ static inline void blk_partition_remap(struct bio *bio)
}
}
+void blk_finish_queue_drain(request_queue_t *q)
+{
+ struct request_list *rl = &q->rq;
+
+ clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
+ wake_up(&rl->wait[0]);
+ wake_up(&rl->wait[1]);
+ wake_up(&rl->drain);
+}
+
+/*
+ * We rely on the fact that only requests allocated through blk_alloc_request()
+ * have io scheduler private data structures associated with them. Any other
+ * type of request (allocated on stack or through kmalloc()) should not go
+ * to the io scheduler core, but be attached to the queue head instead.
+ */
+void blk_wait_queue_drained(request_queue_t *q)
+{
+ struct request_list *rl = &q->rq;
+ DEFINE_WAIT(wait);
+
+ spin_lock_irq(q->queue_lock);
+ set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
+
+ while (rl->count[READ] || rl->count[WRITE]) {
+ prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE);
+
+ if (rl->count[READ] || rl->count[WRITE]) {
+ __generic_unplug_device(q);
+ spin_unlock_irq(q->queue_lock);
+ io_schedule();
+ spin_lock_irq(q->queue_lock);
+ }
+
+ finish_wait(&rl->drain, &wait);
+ }
+
+ spin_unlock_irq(q->queue_lock);
+}
+
+/*
+ * block waiting for the io scheduler being started again.
+ */
+static inline void block_wait_queue_running(request_queue_t *q)
+{
+ DEFINE_WAIT(wait);
+
+ while (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
+ struct request_list *rl = &q->rq;
+
+ prepare_to_wait_exclusive(&rl->drain, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ /*
+ * re-check the condition. avoids using prepare_to_wait()
+ * in the fast path (queue is running)
+ */
+ if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))
+ io_schedule();
+
+ finish_wait(&rl->drain, &wait);
+ }
+}
+
/**
* generic_make_request: hand a buffer to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
@@ -2595,6 +2635,8 @@ end_io:
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))
goto end_io;
+ block_wait_queue_running(q);
+
/*
* If this device has partitions, remap block n
* of partition p to block n+start(p) of the disk.
@@ -3018,6 +3060,7 @@ void kblockd_flush(void)
{
flush_workqueue(kblockd_workqueue);
}
+EXPORT_SYMBOL(kblockd_flush);
int __init blk_dev_init(void)
{
@@ -3036,6 +3079,7 @@ int __init blk_dev_init(void)
blk_max_low_pfn = max_low_pfn;
blk_max_pfn = max_pfn;
+
return 0;
}
@@ -3052,9 +3096,13 @@ void put_io_context(struct io_context *ioc)
if (atomic_dec_and_test(&ioc->refcount)) {
if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic);
+ if (ioc->cic && ioc->cic->dtor)
+ ioc->cic->dtor(ioc->cic);
+
kmem_cache_free(iocontext_cachep, ioc);
}
}
+EXPORT_SYMBOL(put_io_context);
/* Called by the exitting task */
void exit_io_context(void)
@@ -3064,14 +3112,15 @@ void exit_io_context(void)
local_irq_save(flags);
ioc = current->io_context;
- if (ioc) {
- if (ioc->aic && ioc->aic->exit)
- ioc->aic->exit(ioc->aic);
- put_io_context(ioc);
- current->io_context = NULL;
- } else
- WARN_ON(1);
+ current->io_context = NULL;
local_irq_restore(flags);
+
+ if (ioc->aic && ioc->aic->exit)
+ ioc->aic->exit(ioc->aic);
+ if (ioc->cic && ioc->cic->exit)
+ ioc->cic->exit(ioc->cic);
+
+ put_io_context(ioc);
}
/*
@@ -3090,22 +3139,42 @@ struct io_context *get_io_context(int gfp_flags)
local_irq_save(flags);
ret = tsk->io_context;
- if (ret == NULL) {
- ret = kmem_cache_alloc(iocontext_cachep, GFP_ATOMIC);
- if (ret) {
- atomic_set(&ret->refcount, 1);
- ret->pid = tsk->pid;
- ret->last_waited = jiffies; /* doesn't matter... */
- ret->nr_batch_requests = 0; /* because this is 0 */
- ret->aic = NULL;
+ if (ret)
+ goto out;
+
+ local_irq_restore(flags);
+
+ ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
+ if (ret) {
+ atomic_set(&ret->refcount, 1);
+ ret->pid = tsk->pid;
+ ret->last_waited = jiffies; /* doesn't matter... */
+ ret->nr_batch_requests = 0; /* because this is 0 */
+ ret->aic = NULL;
+ ret->cic = NULL;
+ spin_lock_init(&ret->lock);
+
+ local_irq_save(flags);
+
+ /*
+ * very unlikely, someone raced with us in setting up the task
+ * io context. free new context and just grab a reference.
+ */
+ if (!tsk->io_context)
tsk->io_context = ret;
+ else {
+ kmem_cache_free(iocontext_cachep, ret);
+ ret = tsk->io_context;
}
- }
- if (ret)
+
+out:
atomic_inc(&ret->refcount);
- local_irq_restore(flags);
+ local_irq_restore(flags);
+ }
+
return ret;
}
+EXPORT_SYMBOL(get_io_context);
void copy_io_context(struct io_context **pdst, struct io_context **psrc)
{
@@ -3119,6 +3188,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
*pdst = src;
}
}
+EXPORT_SYMBOL(copy_io_context);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
{
@@ -3127,7 +3197,7 @@ void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
*ioc1 = *ioc2;
*ioc2 = temp;
}
-
+EXPORT_SYMBOL(swap_io_context);
/*
* sysfs parts below
@@ -3285,11 +3355,18 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
.show = queue_max_hw_sectors_show,
};
+static struct queue_sysfs_entry queue_iosched_entry = {
+ .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
+ .show = elv_iosched_show,
+ .store = elv_iosched_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
+ &queue_iosched_entry.attr,
NULL,
};
diff --git a/drivers/block/noop-iosched.c b/drivers/block/noop-iosched.c
index ffef40be1f92..707dddd7d881 100644
--- a/drivers/block/noop-iosched.c
+++ b/drivers/block/noop-iosched.c
@@ -83,12 +83,31 @@ struct request *elevator_noop_next_request(request_queue_t *q)
return NULL;
}
-elevator_t elevator_noop = {
- .elevator_merge_fn = elevator_noop_merge,
- .elevator_merge_req_fn = elevator_noop_merge_requests,
- .elevator_next_req_fn = elevator_noop_next_request,
- .elevator_add_req_fn = elevator_noop_add_request,
- .elevator_name = "noop",
+static struct elevator_type elevator_noop = {
+ .ops = {
+ .elevator_merge_fn = elevator_noop_merge,
+ .elevator_merge_req_fn = elevator_noop_merge_requests,
+ .elevator_next_req_fn = elevator_noop_next_request,
+ .elevator_add_req_fn = elevator_noop_add_request,
+ },
+ .elevator_name = "noop",
+ .elevator_owner = THIS_MODULE,
};
-EXPORT_SYMBOL(elevator_noop);
+int noop_init(void)
+{
+ return elv_register(&elevator_noop);
+}
+
+void noop_exit(void)
+{
+ elv_unregister(&elevator_noop);
+}
+
+module_init(noop_init);
+module_exit(noop_exit);
+
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("No-op IO scheduler");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
new file mode 100644
index 000000000000..fb80b6a91f84
--- /dev/null
+++ b/drivers/block/pktcdvd.c
@@ -0,0 +1,2679 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices (aka an exercise in block layer masturbation)
+ *
+ *
+ * TODO: (circa order of when I will fix it)
+ * - Only able to write on CD-RW media right now.
+ * - check host application code on media and set it in write page
+ * - interface for UDF <-> packet to negotiate a new location when a write
+ * fails.
+ * - handle OPC, especially for -RW media
+ *
+ * Theory of operation:
+ *
+ * We use a custom make_request_fn function that forwards reads directly to
+ * the underlying CD device. Write requests are either attached directly to
+ * a live packet_data object, or simply stored sequentially in a list for
+ * later processing by the kcdrwd kernel thread. This driver doesn't use
+ * any elevator functionally as defined by the elevator_s struct, but the
+ * underlying CD device uses a standard elevator.
+ *
+ * This strategy makes it possible to do very late merging of IO requests.
+ * A new bio sent to pkt_make_request can be merged with a live packet_data
+ * object even if the object is in the data gathering state.
+ *
+ *************************************************************************/
+
+#define VERSION_CODE "v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
+
+#include <linux/pktcdvd.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/file.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/suspend.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_ioctl.h>
+
+#include <asm/uaccess.h>
+
+#if PACKET_DEBUG
+#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+#if PACKET_DEBUG > 1
+#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
+#else
+#define VPRINTK(fmt, args...)
+#endif
+
+#define MAX_SPEED 0xffff
+
+#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
+
+static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
+static struct proc_dir_entry *pkt_proc;
+static int pkt_major;
+static struct semaphore ctl_mutex; /* Serialize open/close/setup/teardown */
+static mempool_t *psd_pool;
+
+
+static void pkt_bio_finished(struct pktcdvd_device *pd)
+{
+ BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
+ if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
+ VPRINTK("pktcdvd: queue empty\n");
+ atomic_set(&pd->iosched.attention, 1);
+ wake_up(&pd->wqueue);
+ }
+}
+
+static void pkt_bio_destructor(struct bio *bio)
+{
+ kfree(bio->bi_io_vec);
+ kfree(bio);
+}
+
+static struct bio *pkt_bio_alloc(int nr_iovecs)
+{
+ struct bio_vec *bvl = NULL;
+ struct bio *bio;
+
+ bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
+ if (!bio)
+ goto no_bio;
+ bio_init(bio);
+
+ bvl = kmalloc(nr_iovecs * sizeof(struct bio_vec), GFP_KERNEL);
+ if (!bvl)
+ goto no_bvl;
+ memset(bvl, 0, nr_iovecs * sizeof(struct bio_vec));
+
+ bio->bi_max_vecs = nr_iovecs;
+ bio->bi_io_vec = bvl;
+ bio->bi_destructor = pkt_bio_destructor;
+
+ return bio;
+
+ no_bvl:
+ kfree(bio);
+ no_bio:
+ return NULL;
+}
+
+/*
+ * Allocate a packet_data struct
+ */
+static struct packet_data *pkt_alloc_packet_data(void)
+{
+ int i;
+ struct packet_data *pkt;
+
+ pkt = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
+ if (!pkt)
+ goto no_pkt;
+ memset(pkt, 0, sizeof(struct packet_data));
+
+ pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
+ if (!pkt->w_bio)
+ goto no_bio;
+
+ for (i = 0; i < PAGES_PER_PACKET; i++) {
+ pkt->pages[i] = alloc_page(GFP_KERNEL);
+ if (!pkt->pages[i])
+ goto no_page;
+ }
+ for (i = 0; i < PAGES_PER_PACKET; i++)
+ clear_page(page_address(pkt->pages[i]));
+
+ spin_lock_init(&pkt->lock);
+
+ for (i = 0; i < PACKET_MAX_SIZE; i++) {
+ struct bio *bio = pkt_bio_alloc(1);
+ if (!bio)
+ goto no_rd_bio;
+ pkt->r_bios[i] = bio;
+ }
+
+ return pkt;
+
+no_rd_bio:
+ for (i = 0; i < PACKET_MAX_SIZE; i++) {
+ struct bio *bio = pkt->r_bios[i];
+ if (bio)
+ bio_put(bio);
+ }
+
+no_page:
+ for (i = 0; i < PAGES_PER_PACKET; i++)
+ if (pkt->pages[i])
+ __free_page(pkt->pages[i]);
+ bio_put(pkt->w_bio);
+no_bio:
+ kfree(pkt);
+no_pkt:
+ return NULL;
+}
+
+/*
+ * Free a packet_data struct
+ */
+static void pkt_free_packet_data(struct packet_data *pkt)
+{
+ int i;
+
+ for (i = 0; i < PACKET_MAX_SIZE; i++) {
+ struct bio *bio = pkt->r_bios[i];
+ if (bio)
+ bio_put(bio);
+ }
+ for (i = 0; i < PAGES_PER_PACKET; i++)
+ __free_page(pkt->pages[i]);
+ bio_put(pkt->w_bio);
+ kfree(pkt);
+}
+
+static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
+{
+ struct packet_data *pkt, *next;
+
+ BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
+
+ list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
+ pkt_free_packet_data(pkt);
+ }
+}
+
+static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
+{
+ struct packet_data *pkt;
+
+ INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+ INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
+ spin_lock_init(&pd->cdrw.active_list_lock);
+ while (nr_packets > 0) {
+ pkt = pkt_alloc_packet_data();
+ if (!pkt) {
+ pkt_shrink_pktlist(pd);
+ return 0;
+ }
+ pkt->id = nr_packets;
+ pkt->pd = pd;
+ list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+ nr_packets--;
+ }
+ return 1;
+}
+
+static void *pkt_rb_alloc(int gfp_mask, void *data)
+{
+ return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
+}
+
+static void pkt_rb_free(void *ptr, void *data)
+{
+ kfree(ptr);
+}
+
+static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
+{
+ struct rb_node *n = rb_next(&node->rb_node);
+ if (!n)
+ return NULL;
+ return rb_entry(n, struct pkt_rb_node, rb_node);
+}
+
+static inline void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+ rb_erase(&node->rb_node, &pd->bio_queue);
+ mempool_free(node, pd->rb_pool);
+ pd->bio_queue_size--;
+ BUG_ON(pd->bio_queue_size < 0);
+}
+
+/*
+ * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
+ */
+static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
+{
+ struct rb_node *n = pd->bio_queue.rb_node;
+ struct rb_node *next;
+ struct pkt_rb_node *tmp;
+
+ if (!n) {
+ BUG_ON(pd->bio_queue_size > 0);
+ return NULL;
+ }
+
+ for (;;) {
+ tmp = rb_entry(n, struct pkt_rb_node, rb_node);
+ if (s <= tmp->bio->bi_sector)
+ next = n->rb_left;
+ else
+ next = n->rb_right;
+ if (!next)
+ break;
+ n = next;
+ }
+
+ if (s > tmp->bio->bi_sector) {
+ tmp = pkt_rbtree_next(tmp);
+ if (!tmp)
+ return NULL;
+ }
+ BUG_ON(s > tmp->bio->bi_sector);
+ return tmp;
+}
+
+/*
+ * Insert a node into the pd->bio_queue rb tree.
+ */
+static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+ struct rb_node **p = &pd->bio_queue.rb_node;
+ struct rb_node *parent = NULL;
+ sector_t s = node->bio->bi_sector;
+ struct pkt_rb_node *tmp;
+
+ while (*p) {
+ parent = *p;
+ tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
+ if (s < tmp->bio->bi_sector)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, &pd->bio_queue);
+ pd->bio_queue_size++;
+}
+
+/*
+ * Add a bio to a single linked list defined by its head and tail pointers.
+ */
+static inline void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail)
+{
+ bio->bi_next = NULL;
+ if (*list_tail) {
+ BUG_ON((*list_head) == NULL);
+ (*list_tail)->bi_next = bio;
+ (*list_tail) = bio;
+ } else {
+ BUG_ON((*list_head) != NULL);
+ (*list_head) = bio;
+ (*list_tail) = bio;
+ }
+}
+
+/*
+ * Remove and return the first bio from a single linked list defined by its
+ * head and tail pointers.
+ */
+static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail)
+{
+ struct bio *bio;
+
+ if (*list_head == NULL)
+ return NULL;
+
+ bio = *list_head;
+ *list_head = bio->bi_next;
+ if (*list_head == NULL)
+ *list_tail = NULL;
+
+ bio->bi_next = NULL;
+ return bio;
+}
+
+/*
+ * Send a packet_command to the underlying block device and
+ * wait for completion.
+ */
+static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+ char sense[SCSI_SENSE_BUFFERSIZE];
+ request_queue_t *q;
+ struct request *rq;
+ DECLARE_COMPLETION(wait);
+ int err = 0;
+
+ q = bdev_get_queue(pd->bdev);
+
+ rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? WRITE : READ,
+ __GFP_WAIT);
+ rq->errors = 0;
+ rq->rq_disk = pd->bdev->bd_disk;
+ rq->bio = NULL;
+ rq->buffer = NULL;
+ rq->timeout = 60*HZ;
+ rq->data = cgc->buffer;
+ rq->data_len = cgc->buflen;
+ rq->sense = sense;
+ memset(sense, 0, sizeof(sense));
+ rq->sense_len = 0;
+ rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER;
+ if (cgc->quiet)
+ rq->flags |= REQ_QUIET;
+ memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
+ if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
+ memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
+
+ rq->ref_count++;
+ rq->flags |= REQ_NOMERGE;
+ rq->waiting = &wait;
+ elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+ generic_unplug_device(q);
+ wait_for_completion(&wait);
+
+ if (rq->errors)
+ err = -EIO;
+
+ blk_put_request(rq);
+ return err;
+}
+
+/*
+ * A generic sense dump / resolve mechanism should be implemented across
+ * all ATAPI + SCSI devices.
+ */
+static void pkt_dump_sense(struct packet_command *cgc)
+{
+ static char *info[9] = { "No sense", "Recovered error", "Not ready",
+ "Medium error", "Hardware error", "Illegal request",
+ "Unit attention", "Data protect", "Blank check" };
+ int i;
+ struct request_sense *sense = cgc->sense;
+
+ printk("pktcdvd:");
+ for (i = 0; i < CDROM_PACKET_SIZE; i++)
+ printk(" %02x", cgc->cmd[i]);
+ printk(" - ");
+
+ if (sense == NULL) {
+ printk("no sense\n");
+ return;
+ }
+
+ printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
+
+ if (sense->sense_key > 8) {
+ printk(" (INVALID)\n");
+ return;
+ }
+
+ printk(" (%s)\n", info[sense->sense_key]);
+}
+
+/*
+ * flush the drive cache to media
+ */
+static int pkt_flush_cache(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+ cgc.quiet = 1;
+
+ /*
+ * the IMMED bit -- we default to not setting it, although that
+ * would allow a much faster close, this is safer
+ */
+#if 0
+ cgc.cmd[1] = 1 << 1;
+#endif
+ return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * speed is given as the normal factor, e.g. 4 for 4x
+ */
+static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed)
+{
+ struct packet_command cgc;
+ struct request_sense sense;
+ int ret;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.sense = &sense;
+ cgc.cmd[0] = GPCMD_SET_SPEED;
+ cgc.cmd[2] = (read_speed >> 8) & 0xff;
+ cgc.cmd[3] = read_speed & 0xff;
+ cgc.cmd[4] = (write_speed >> 8) & 0xff;
+ cgc.cmd[5] = write_speed & 0xff;
+
+ if ((ret = pkt_generic_packet(pd, &cgc)))
+ pkt_dump_sense(&cgc);
+
+ return ret;
+}
+
+/*
+ * Queue a bio for processing by the low-level CD device. Must be called
+ * from process context.
+ */
+static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read)
+{
+ spin_lock(&pd->iosched.lock);
+ if (bio_data_dir(bio) == READ) {
+ pkt_add_list_last(bio, &pd->iosched.read_queue,
+ &pd->iosched.read_queue_tail);
+ if (high_prio_read)
+ pd->iosched.high_prio_read = 1;
+ } else {
+ pkt_add_list_last(bio, &pd->iosched.write_queue,
+ &pd->iosched.write_queue_tail);
+ }
+ spin_unlock(&pd->iosched.lock);
+
+ atomic_set(&pd->iosched.attention, 1);
+ wake_up(&pd->wqueue);
+}
+
+/*
+ * Process the queued read/write requests. This function handles special
+ * requirements for CDRW drives:
+ * - A cache flush command must be inserted before a read request if the
+ * previous request was a write.
+ * - Switching between reading and writing is slow, so don't it more often
+ * than necessary.
+ * - Set the read speed according to current usage pattern. When only reading
+ * from the device, it's best to use the highest possible read speed, but
+ * when switching often between reading and writing, it's better to have the
+ * same read and write speeds.
+ * - Reads originating from user space should have higher priority than reads
+ * originating from pkt_gather_data, because some process is usually waiting
+ * on reads of the first kind.
+ */
+static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
+{
+ request_queue_t *q;
+
+ if (atomic_read(&pd->iosched.attention) == 0)
+ return;
+ atomic_set(&pd->iosched.attention, 0);
+
+ q = bdev_get_queue(pd->bdev);
+
+ for (;;) {
+ struct bio *bio;
+ int reads_queued, writes_queued, high_prio_read;
+
+ spin_lock(&pd->iosched.lock);
+ reads_queued = (pd->iosched.read_queue != NULL);
+ writes_queued = (pd->iosched.write_queue != NULL);
+ if (!reads_queued)
+ pd->iosched.high_prio_read = 0;
+ high_prio_read = pd->iosched.high_prio_read;
+ spin_unlock(&pd->iosched.lock);
+
+ if (!reads_queued && !writes_queued)
+ break;
+
+ if (pd->iosched.writing) {
+ if (high_prio_read || (!writes_queued && reads_queued)) {
+ if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+ VPRINTK("pktcdvd: write, waiting\n");
+ break;
+ }
+ pkt_flush_cache(pd);
+ pd->iosched.writing = 0;
+ }
+ } else {
+ if (!reads_queued && writes_queued) {
+ if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+ VPRINTK("pktcdvd: read, waiting\n");
+ break;
+ }
+ pd->iosched.writing = 1;
+ }
+ }
+
+ spin_lock(&pd->iosched.lock);
+ if (pd->iosched.writing) {
+ bio = pkt_get_list_first(&pd->iosched.write_queue,
+ &pd->iosched.write_queue_tail);
+ } else {
+ bio = pkt_get_list_first(&pd->iosched.read_queue,
+ &pd->iosched.read_queue_tail);
+ }
+ spin_unlock(&pd->iosched.lock);
+
+ if (!bio)
+ continue;
+
+ if (bio_data_dir(bio) == READ)
+ pd->iosched.successive_reads += bio->bi_size >> 10;
+ else
+ pd->iosched.successive_reads = 0;
+ if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
+ if (pd->read_speed == pd->write_speed) {
+ pd->read_speed = MAX_SPEED;
+ pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+ }
+ } else {
+ if (pd->read_speed != pd->write_speed) {
+ pd->read_speed = pd->write_speed;
+ pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+ }
+ }
+
+ atomic_inc(&pd->cdrw.pending_bios);
+ generic_make_request(bio);
+ }
+}
+
+/*
+ * Special care is needed if the underlying block device has a small
+ * max_phys_segments value.
+ */
+static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q)
+{
+ if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
+ /*
+ * The cdrom device can handle one segment/frame
+ */
+ clear_bit(PACKET_MERGE_SEGS, &pd->flags);
+ return 0;
+ } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
+ /*
+ * We can handle this case at the expense of some extra memory
+ * copies during write operations
+ */
+ set_bit(PACKET_MERGE_SEGS, &pd->flags);
+ return 0;
+ } else {
+ printk("pktcdvd: cdrom max_phys_segments too small\n");
+ return -EIO;
+ }
+}
+
+/*
+ * Copy CD_FRAMESIZE bytes from src_bio into a destination page
+ */
+static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs,
+ struct page *dst_page, int dst_offs)
+{
+ unsigned int copy_size = CD_FRAMESIZE;
+
+ while (copy_size > 0) {
+ struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
+ void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +
+ src_bvl->bv_offset + offs;
+ void *vto = page_address(dst_page) + dst_offs;
+ int len = min_t(int, copy_size, src_bvl->bv_len - offs);
+
+ BUG_ON(len < 0);
+ memcpy(vto, vfrom, len);
+ kunmap_atomic(src_bvl->bv_page, KM_USER0);
+
+ seg++;
+ offs = 0;
+ dst_offs += len;
+ copy_size -= len;
+ }
+}
+
+/*
+ * Copy all data for this packet to pkt->pages[], so that
+ * a) The number of required segments for the write bio is minimized, which
+ * is necessary for some scsi controllers.
+ * b) The data can be used as cache to avoid read requests if we receive a
+ * new write request for the same zone.
+ */
+static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, int *offsets)
+{
+ int f, p, offs;
+
+ /* Copy all data to pkt->pages[] */
+ p = 0;
+ offs = 0;
+ for (f = 0; f < pkt->frames; f++) {
+ if (pages[f] != pkt->pages[p]) {
+ void *vfrom = kmap_atomic(pages[f], KM_USER0) + offsets[f];
+ void *vto = page_address(pkt->pages[p]) + offs;
+ memcpy(vto, vfrom, CD_FRAMESIZE);
+ kunmap_atomic(pages[f], KM_USER0);
+ pages[f] = pkt->pages[p];
+ offsets[f] = offs;
+ } else {
+ BUG_ON(offsets[f] != offs);
+ }
+ offs += CD_FRAMESIZE;
+ if (offs >= PAGE_SIZE) {
+ BUG_ON(offs > PAGE_SIZE);
+ offs = 0;
+ p++;
+ }
+ }
+}
+
+static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
+{
+ struct packet_data *pkt = bio->bi_private;
+ struct pktcdvd_device *pd = pkt->pd;
+ BUG_ON(!pd);
+
+ if (bio->bi_size)
+ return 1;
+
+ VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
+ (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
+
+ if (err)
+ atomic_inc(&pkt->io_errors);
+ if (atomic_dec_and_test(&pkt->io_wait)) {
+ atomic_inc(&pkt->run_sm);
+ wake_up(&pd->wqueue);
+ }
+ pkt_bio_finished(pd);
+
+ return 0;
+}
+
+static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
+{
+ struct packet_data *pkt = bio->bi_private;
+ struct pktcdvd_device *pd = pkt->pd;
+ BUG_ON(!pd);
+
+ if (bio->bi_size)
+ return 1;
+
+ VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
+
+ pd->stats.pkt_ended++;
+
+ pkt_bio_finished(pd);
+ atomic_dec(&pkt->io_wait);
+ atomic_inc(&pkt->run_sm);
+ wake_up(&pd->wqueue);
+ return 0;
+}
+
+/*
+ * Schedule reads for the holes in a packet
+ */
+static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ int frames_read = 0;
+ struct bio *bio;
+ int f;
+ char written[PACKET_MAX_SIZE];
+
+ BUG_ON(!pkt->orig_bios);
+
+ atomic_set(&pkt->io_wait, 0);
+ atomic_set(&pkt->io_errors, 0);
+
+ if (pkt->cache_valid) {
+ VPRINTK("pkt_gather_data: zone %llx cached\n",
+ (unsigned long long)pkt->sector);
+ goto out_account;
+ }
+
+ /*
+ * Figure out which frames we need to read before we can write.
+ */
+ memset(written, 0, sizeof(written));
+ spin_lock(&pkt->lock);
+ for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
+ int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
+ int num_frames = bio->bi_size / CD_FRAMESIZE;
+ BUG_ON(first_frame < 0);
+ BUG_ON(first_frame + num_frames > pkt->frames);
+ for (f = first_frame; f < first_frame + num_frames; f++)
+ written[f] = 1;
+ }
+ spin_unlock(&pkt->lock);
+
+ /*
+ * Schedule reads for missing parts of the packet.
+ */
+ for (f = 0; f < pkt->frames; f++) {
+ int p, offset;
+ if (written[f])
+ continue;
+ bio = pkt->r_bios[f];
+ bio_init(bio);
+ bio->bi_max_vecs = 1;
+ bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+ bio->bi_bdev = pd->bdev;
+ bio->bi_end_io = pkt_end_io_read;
+ bio->bi_private = pkt;
+
+ p = (f * CD_FRAMESIZE) / PAGE_SIZE;
+ offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+ VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
+ f, pkt->pages[p], offset);
+ if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
+ BUG();
+
+ atomic_inc(&pkt->io_wait);
+ bio->bi_rw = READ;
+ pkt_queue_bio(pd, bio, 0);
+ frames_read++;
+ }
+
+out_account:
+ VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
+ frames_read, (unsigned long long)pkt->sector);
+ pd->stats.pkt_started++;
+ pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
+ pd->stats.secs_w += pd->settings.size;
+}
+
+/*
+ * Find a packet matching zone, or the least recently used packet if
+ * there is no match.
+ */
+static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
+{
+ struct packet_data *pkt;
+
+ list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
+ if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
+ list_del_init(&pkt->list);
+ if (pkt->sector != zone)
+ pkt->cache_valid = 0;
+ break;
+ }
+ }
+ return pkt;
+}
+
+static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ if (pkt->cache_valid) {
+ list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+ } else {
+ list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
+ }
+}
+
+/*
+ * recover a failed write, query for relocation if possible
+ *
+ * returns 1 if recovery is possible, or 0 if not
+ *
+ */
+static int pkt_start_recovery(struct packet_data *pkt)
+{
+ /*
+ * FIXME. We need help from the file system to implement
+ * recovery handling.
+ */
+ return 0;
+#if 0
+ struct request *rq = pkt->rq;
+ struct pktcdvd_device *pd = rq->rq_disk->private_data;
+ struct block_device *pkt_bdev;
+ struct super_block *sb = NULL;
+ unsigned long old_block, new_block;
+ sector_t new_sector;
+
+ pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
+ if (pkt_bdev) {
+ sb = get_super(pkt_bdev);
+ bdput(pkt_bdev);
+ }
+
+ if (!sb)
+ return 0;
+
+ if (!sb->s_op || !sb->s_op->relocate_blocks)
+ goto out;
+
+ old_block = pkt->sector / (CD_FRAMESIZE >> 9);
+ if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
+ goto out;
+
+ new_sector = new_block * (CD_FRAMESIZE >> 9);
+ pkt->sector = new_sector;
+
+ pkt->bio->bi_sector = new_sector;
+ pkt->bio->bi_next = NULL;
+ pkt->bio->bi_flags = 1 << BIO_UPTODATE;
+ pkt->bio->bi_idx = 0;
+
+ BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
+ BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
+ BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
+ BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
+ BUG_ON(pkt->bio->bi_private != pkt);
+
+ drop_super(sb);
+ return 1;
+
+out:
+ drop_super(sb);
+ return 0;
+#endif
+}
+
+static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
+{
+#if PACKET_DEBUG > 1
+ static const char *state_name[] = {
+ "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
+ };
+ enum packet_data_state old_state = pkt->state;
+ VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
+ state_name[old_state], state_name[state]);
+#endif
+ pkt->state = state;
+}
+
+/*
+ * Scan the work queue to see if we can start a new packet.
+ * returns non-zero if any work was done.
+ */
+static int pkt_handle_queue(struct pktcdvd_device *pd)
+{
+ struct packet_data *pkt, *p;
+ struct bio *bio = NULL;
+ sector_t zone = 0; /* Suppress gcc warning */
+ struct pkt_rb_node *node, *first_node;
+ struct rb_node *n;
+
+ VPRINTK("handle_queue\n");
+
+ atomic_set(&pd->scan_queue, 0);
+
+ if (list_empty(&pd->cdrw.pkt_free_list)) {
+ VPRINTK("handle_queue: no pkt\n");
+ return 0;
+ }
+
+ /*
+ * Try to find a zone we are not already working on.
+ */
+ spin_lock(&pd->lock);
+ first_node = pkt_rbtree_find(pd, pd->current_sector);
+ if (!first_node) {
+ n = rb_first(&pd->bio_queue);
+ if (n)
+ first_node = rb_entry(n, struct pkt_rb_node, rb_node);
+ }
+ node = first_node;
+ while (node) {
+ bio = node->bio;
+ zone = ZONE(bio->bi_sector, pd);
+ list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
+ if (p->sector == zone)
+ goto try_next_bio;
+ }
+ break;
+try_next_bio:
+ node = pkt_rbtree_next(node);
+ if (!node) {
+ n = rb_first(&pd->bio_queue);
+ if (n)
+ node = rb_entry(n, struct pkt_rb_node, rb_node);
+ }
+ if (node == first_node)
+ node = NULL;
+ }
+ spin_unlock(&pd->lock);
+ if (!bio) {
+ VPRINTK("handle_queue: no bio\n");
+ return 0;
+ }
+
+ pkt = pkt_get_packet_data(pd, zone);
+ BUG_ON(!pkt);
+
+ pd->current_sector = zone + pd->settings.size;
+ pkt->sector = zone;
+ pkt->frames = pd->settings.size >> 2;
+ BUG_ON(pkt->frames > PACKET_MAX_SIZE);
+ pkt->write_size = 0;
+
+ /*
+ * Scan work queue for bios in the same zone and link them
+ * to this packet.
+ */
+ spin_lock(&pd->lock);
+ VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
+ while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
+ bio = node->bio;
+ VPRINTK("pkt_handle_queue: found zone=%llx\n",
+ (unsigned long long)ZONE(bio->bi_sector, pd));
+ if (ZONE(bio->bi_sector, pd) != zone)
+ break;
+ pkt_rbtree_erase(pd, node);
+ spin_lock(&pkt->lock);
+ pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail);
+ pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ spin_unlock(&pkt->lock);
+ }
+ spin_unlock(&pd->lock);
+
+ pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
+ pkt_set_state(pkt, PACKET_WAITING_STATE);
+ atomic_set(&pkt->run_sm, 1);
+
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_add(&pkt->list, &pd->cdrw.pkt_active_list);
+ spin_unlock(&pd->cdrw.active_list_lock);
+
+ return 1;
+}
+
+/*
+ * Assemble a bio to write one packet and queue the bio for processing
+ * by the underlying block device.
+ */
+static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ struct bio *bio;
+ struct page *pages[PACKET_MAX_SIZE];
+ int offsets[PACKET_MAX_SIZE];
+ int f;
+ int frames_write;
+
+ for (f = 0; f < pkt->frames; f++) {
+ pages[f] = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+ offsets[f] = (f * CD_FRAMESIZE) % PAGE_SIZE;
+ }
+
+ /*
+ * Fill-in pages[] and offsets[] with data from orig_bios.
+ */
+ frames_write = 0;
+ spin_lock(&pkt->lock);
+ for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
+ int segment = bio->bi_idx;
+ int src_offs = 0;
+ int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
+ int num_frames = bio->bi_size / CD_FRAMESIZE;
+ BUG_ON(first_frame < 0);
+ BUG_ON(first_frame + num_frames > pkt->frames);
+ for (f = first_frame; f < first_frame + num_frames; f++) {
+ struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
+
+ while (src_offs >= src_bvl->bv_len) {
+ src_offs -= src_bvl->bv_len;
+ segment++;
+ BUG_ON(segment >= bio->bi_vcnt);
+ src_bvl = bio_iovec_idx(bio, segment);
+ }
+
+ if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
+ pages[f] = src_bvl->bv_page;
+ offsets[f] = src_bvl->bv_offset + src_offs;
+ } else {
+ pkt_copy_bio_data(bio, segment, src_offs,
+ pages[f], offsets[f]);
+ }
+ src_offs += CD_FRAMESIZE;
+ frames_write++;
+ }
+ }
+ pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
+ spin_unlock(&pkt->lock);
+
+ VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
+ frames_write, (unsigned long long)pkt->sector);
+ BUG_ON(frames_write != pkt->write_size);
+
+ if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
+ pkt_make_local_copy(pkt, pages, offsets);
+ pkt->cache_valid = 1;
+ } else {
+ pkt->cache_valid = 0;
+ }
+
+ /* Start the write request */
+ bio_init(pkt->w_bio);
+ pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
+ pkt->w_bio->bi_sector = pkt->sector;
+ pkt->w_bio->bi_bdev = pd->bdev;
+ pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
+ pkt->w_bio->bi_private = pkt;
+ for (f = 0; f < pkt->frames; f++) {
+ if ((f + 1 < pkt->frames) && (pages[f + 1] == pages[f]) &&
+ (offsets[f + 1] = offsets[f] + CD_FRAMESIZE)) {
+ if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE * 2, offsets[f]))
+ BUG();
+ f++;
+ } else {
+ if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE, offsets[f]))
+ BUG();
+ }
+ }
+ VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt);
+
+ atomic_set(&pkt->io_wait, 1);
+ pkt->w_bio->bi_rw = WRITE;
+ pkt_queue_bio(pd, pkt->w_bio, 0);
+}
+
+static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
+{
+ struct bio *bio, *next;
+
+ if (!uptodate)
+ pkt->cache_valid = 0;
+
+ /* Finish all bios corresponding to this packet */
+ bio = pkt->orig_bios;
+ while (bio) {
+ next = bio->bi_next;
+ bio->bi_next = NULL;
+ bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
+ bio = next;
+ }
+ pkt->orig_bios = pkt->orig_bios_tail = NULL;
+}
+
+static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+ int uptodate;
+
+ VPRINTK("run_state_machine: pkt %d\n", pkt->id);
+
+ for (;;) {
+ switch (pkt->state) {
+ case PACKET_WAITING_STATE:
+ if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
+ return;
+
+ pkt->sleep_time = 0;
+ pkt_gather_data(pd, pkt);
+ pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
+ break;
+
+ case PACKET_READ_WAIT_STATE:
+ if (atomic_read(&pkt->io_wait) > 0)
+ return;
+
+ if (atomic_read(&pkt->io_errors) > 0) {
+ pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+ } else {
+ pkt_start_write(pd, pkt);
+ }
+ break;
+
+ case PACKET_WRITE_WAIT_STATE:
+ if (atomic_read(&pkt->io_wait) > 0)
+ return;
+
+ if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
+ pkt_set_state(pkt, PACKET_FINISHED_STATE);
+ } else {
+ pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+ }
+ break;
+
+ case PACKET_RECOVERY_STATE:
+ if (pkt_start_recovery(pkt)) {
+ pkt_start_write(pd, pkt);
+ } else {
+ VPRINTK("No recovery possible\n");
+ pkt_set_state(pkt, PACKET_FINISHED_STATE);
+ }
+ break;
+
+ case PACKET_FINISHED_STATE:
+ uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
+ pkt_finish_packet(pkt, uptodate);
+ return;
+
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+static void pkt_handle_packets(struct pktcdvd_device *pd)
+{
+ struct packet_data *pkt, *next;
+
+ VPRINTK("pkt_handle_packets\n");
+
+ /*
+ * Run state machine for active packets
+ */
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (atomic_read(&pkt->run_sm) > 0) {
+ atomic_set(&pkt->run_sm, 0);
+ pkt_run_state_machine(pd, pkt);
+ }
+ }
+
+ /*
+ * Move no longer active packets to the free list
+ */
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
+ if (pkt->state == PACKET_FINISHED_STATE) {
+ list_del(&pkt->list);
+ pkt_put_packet_data(pd, pkt);
+ pkt_set_state(pkt, PACKET_IDLE_STATE);
+ atomic_set(&pd->scan_queue, 1);
+ }
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+static void pkt_count_states(struct pktcdvd_device *pd, int *states)
+{
+ struct packet_data *pkt;
+ int i;
+
+ for (i = 0; i <= PACKET_NUM_STATES; i++)
+ states[i] = 0;
+
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ states[pkt->state]++;
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+/*
+ * kcdrwd is woken up when writes have been queued for one of our
+ * registered devices
+ */
+static int kcdrwd(void *foobar)
+{
+ struct pktcdvd_device *pd = foobar;
+ struct packet_data *pkt;
+ long min_sleep_time, residue;
+
+ set_user_nice(current, -20);
+
+ for (;;) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * Wait until there is something to do
+ */
+ add_wait_queue(&pd->wqueue, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* Check if we need to run pkt_handle_queue */
+ if (atomic_read(&pd->scan_queue) > 0)
+ goto work_to_do;
+
+ /* Check if we need to run the state machine for some packet */
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (atomic_read(&pkt->run_sm) > 0)
+ goto work_to_do;
+ }
+
+ /* Check if we need to process the iosched queues */
+ if (atomic_read(&pd->iosched.attention) != 0)
+ goto work_to_do;
+
+ /* Otherwise, go to sleep */
+ if (PACKET_DEBUG > 1) {
+ int states[PACKET_NUM_STATES];
+ pkt_count_states(pd, states);
+ VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2], states[3],
+ states[4], states[5]);
+ }
+
+ min_sleep_time = MAX_SCHEDULE_TIMEOUT;
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
+ min_sleep_time = pkt->sleep_time;
+ }
+
+ generic_unplug_device(bdev_get_queue(pd->bdev));
+
+ VPRINTK("kcdrwd: sleeping\n");
+ residue = schedule_timeout(min_sleep_time);
+ VPRINTK("kcdrwd: wake up\n");
+
+ /* make swsusp happy with our thread */
+ if (current->flags & PF_FREEZE)
+ refrigerator(PF_FREEZE);
+
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (!pkt->sleep_time)
+ continue;
+ pkt->sleep_time -= min_sleep_time - residue;
+ if (pkt->sleep_time <= 0) {
+ pkt->sleep_time = 0;
+ atomic_inc(&pkt->run_sm);
+ }
+ }
+
+ if (signal_pending(current)) {
+ flush_signals(current);
+ }
+ if (kthread_should_stop())
+ break;
+ }
+work_to_do:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&pd->wqueue, &wait);
+
+ if (kthread_should_stop())
+ break;
+
+ /*
+ * if pkt_handle_queue returns true, we can queue
+ * another request.
+ */
+ while (pkt_handle_queue(pd))
+ ;
+
+ /*
+ * Handle packet state machine
+ */
+ pkt_handle_packets(pd);
+
+ /*
+ * Handle iosched queues
+ */
+ pkt_iosched_process_queue(pd);
+ }
+
+ return 0;
+}
+
+static void pkt_print_settings(struct pktcdvd_device *pd)
+{
+ printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
+ printk("%u blocks, ", pd->settings.size >> 2);
+ printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
+}
+
+static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc,
+ int page_code, int page_control)
+{
+ memset(cgc->cmd, 0, sizeof(cgc->cmd));
+
+ cgc->cmd[0] = GPCMD_MODE_SENSE_10;
+ cgc->cmd[2] = page_code | (page_control << 6);
+ cgc->cmd[7] = cgc->buflen >> 8;
+ cgc->cmd[8] = cgc->buflen & 0xff;
+ cgc->data_direction = CGC_DATA_READ;
+ return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+ memset(cgc->cmd, 0, sizeof(cgc->cmd));
+ memset(cgc->buffer, 0, 2);
+ cgc->cmd[0] = GPCMD_MODE_SELECT_10;
+ cgc->cmd[1] = 0x10; /* PF */
+ cgc->cmd[7] = cgc->buflen >> 8;
+ cgc->cmd[8] = cgc->buflen & 0xff;
+ cgc->data_direction = CGC_DATA_WRITE;
+ return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
+{
+ struct packet_command cgc;
+ int ret;
+
+ /* set up command and get the disc info */
+ init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_DISC_INFO;
+ cgc.cmd[8] = cgc.buflen = 2;
+ cgc.quiet = 1;
+
+ if ((ret = pkt_generic_packet(pd, &cgc)))
+ return ret;
+
+ /* not all drives have the same disc_info length, so requeue
+ * packet with the length the drive tells us it can supply
+ */
+ cgc.buflen = be16_to_cpu(di->disc_information_length) +
+ sizeof(di->disc_information_length);
+
+ if (cgc.buflen > sizeof(disc_information))
+ cgc.buflen = sizeof(disc_information);
+
+ cgc.cmd[8] = cgc.buflen;
+ return pkt_generic_packet(pd, &cgc);
+}
+
+static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
+{
+ struct packet_command cgc;
+ int ret;
+
+ init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
+ cgc.cmd[1] = type & 3;
+ cgc.cmd[4] = (track & 0xff00) >> 8;
+ cgc.cmd[5] = track & 0xff;
+ cgc.cmd[8] = 8;
+ cgc.quiet = 1;
+
+ if ((ret = pkt_generic_packet(pd, &cgc)))
+ return ret;
+
+ cgc.buflen = be16_to_cpu(ti->track_information_length) +
+ sizeof(ti->track_information_length);
+
+ if (cgc.buflen > sizeof(track_information))
+ cgc.buflen = sizeof(track_information);
+
+ cgc.cmd[8] = cgc.buflen;
+ return pkt_generic_packet(pd, &cgc);
+}
+
+static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written)
+{
+ disc_information di;
+ track_information ti;
+ __u32 last_track;
+ int ret = -1;
+
+ if ((ret = pkt_get_disc_info(pd, &di)))
+ return ret;
+
+ last_track = (di.last_track_msb << 8) | di.last_track_lsb;
+ if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+ return ret;
+
+ /* if this track is blank, try the previous. */
+ if (ti.blank) {
+ last_track--;
+ if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+ return ret;
+ }
+
+ /* if last recorded field is valid, return it. */
+ if (ti.lra_v) {
+ *last_written = be32_to_cpu(ti.last_rec_address);
+ } else {
+ /* make it up instead */
+ *last_written = be32_to_cpu(ti.track_start) +
+ be32_to_cpu(ti.track_size);
+ if (ti.free_blocks)
+ *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
+ }
+ return 0;
+}
+
+/*
+ * write mode select package based on pd->settings
+ */
+static int pkt_set_write_settings(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ struct request_sense sense;
+ write_param_page *wp;
+ char buffer[128];
+ int ret, size;
+
+ /* doesn't apply to DVD+RW */
+ if (pd->mmc3_profile == 0x1a)
+ return 0;
+
+ memset(buffer, 0, sizeof(buffer));
+ init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
+ cgc.sense = &sense;
+ if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+ pkt_dump_sense(&cgc);
+ return ret;
+ }
+
+ size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
+ pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
+ if (size > sizeof(buffer))
+ size = sizeof(buffer);
+
+ /*
+ * now get it all
+ */
+ init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
+ cgc.sense = &sense;
+ if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+ pkt_dump_sense(&cgc);
+ return ret;
+ }
+
+ /*
+ * write page is offset header + block descriptor length
+ */
+ wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
+
+ wp->fp = pd->settings.fp;
+ wp->track_mode = pd->settings.track_mode;
+ wp->write_type = pd->settings.write_type;
+ wp->data_block_type = pd->settings.block_mode;
+
+ wp->multi_session = 0;
+
+#ifdef PACKET_USE_LS
+ wp->link_size = 7;
+ wp->ls_v = 1;
+#endif
+
+ if (wp->data_block_type == PACKET_BLOCK_MODE1) {
+ wp->session_format = 0;
+ wp->subhdr2 = 0x20;
+ } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
+ wp->session_format = 0x20;
+ wp->subhdr2 = 8;
+#if 0
+ wp->mcn[0] = 0x80;
+ memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
+#endif
+ } else {
+ /*
+ * paranoia
+ */
+ printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
+ return 1;
+ }
+ wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
+
+ cgc.buflen = cgc.cmd[8] = size;
+ if ((ret = pkt_mode_select(pd, &cgc))) {
+ pkt_dump_sense(&cgc);
+ return ret;
+ }
+
+ pkt_print_settings(pd);
+ return 0;
+}
+
+/*
+ * 0 -- we can write to this track, 1 -- we can't
+ */
+static int pkt_good_track(track_information *ti)
+{
+ /*
+ * only good for CD-RW at the moment, not DVD-RW
+ */
+
+ /*
+ * FIXME: only for FP
+ */
+ if (ti->fp == 0)
+ return 0;
+
+ /*
+ * "good" settings as per Mt Fuji.
+ */
+ if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
+ return 0;
+
+ if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
+ return 0;
+
+ if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
+ return 0;
+
+ printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+ return 1;
+}
+
+/*
+ * 0 -- we can write to this disc, 1 -- we can't
+ */
+static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
+{
+ switch (pd->mmc3_profile) {
+ case 0x0a: /* CD-RW */
+ case 0xffff: /* MMC3 not supported */
+ break;
+ case 0x1a: /* DVD+RW */
+ case 0x13: /* DVD-RW */
+ return 0;
+ default:
+ printk("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
+ return 1;
+ }
+
+ /*
+ * for disc type 0xff we should probably reserve a new track.
+ * but i'm not sure, should we leave this to user apps? probably.
+ */
+ if (di->disc_type == 0xff) {
+ printk("pktcdvd: Unknown disc. No track?\n");
+ return 1;
+ }
+
+ if (di->disc_type != 0x20 && di->disc_type != 0) {
+ printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
+ return 1;
+ }
+
+ if (di->erasable == 0) {
+ printk("pktcdvd: Disc not erasable\n");
+ return 1;
+ }
+
+ if (di->border_status == PACKET_SESSION_RESERVED) {
+ printk("pktcdvd: Can't write to last track (reserved)\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int pkt_probe_settings(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ unsigned char buf[12];
+ disc_information di;
+ track_information ti;
+ int ret, track;
+
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
+ cgc.cmd[8] = 8;
+ ret = pkt_generic_packet(pd, &cgc);
+ pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
+
+ memset(&di, 0, sizeof(disc_information));
+ memset(&ti, 0, sizeof(track_information));
+
+ if ((ret = pkt_get_disc_info(pd, &di))) {
+ printk("failed get_disc\n");
+ return ret;
+ }
+
+ if (pkt_good_disc(pd, &di))
+ return -ENXIO;
+
+ switch (pd->mmc3_profile) {
+ case 0x1a: /* DVD+RW */
+ printk("pktcdvd: inserted media is DVD+RW\n");
+ break;
+ case 0x13: /* DVD-RW */
+ printk("pktcdvd: inserted media is DVD-RW\n");
+ break;
+ default:
+ printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
+ break;
+ }
+ pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
+
+ track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
+ if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
+ printk("pktcdvd: failed get_track\n");
+ return ret;
+ }
+
+ if (pkt_good_track(&ti)) {
+ printk("pktcdvd: can't write to this track\n");
+ return -ENXIO;
+ }
+
+ /*
+ * we keep packet size in 512 byte units, makes it easier to
+ * deal with request calculations.
+ */
+ pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
+ if (pd->settings.size == 0) {
+ printk("pktcdvd: detected zero packet size!\n");
+ pd->settings.size = 128;
+ }
+ pd->settings.fp = ti.fp;
+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
+
+ if (ti.nwa_v) {
+ pd->nwa = be32_to_cpu(ti.next_writable);
+ set_bit(PACKET_NWA_VALID, &pd->flags);
+ }
+
+ /*
+ * in theory we could use lra on -RW media as well and just zero
+ * blocks that haven't been written yet, but in practice that
+ * is just a no-go. we'll use that for -R, naturally.
+ */
+ if (ti.lra_v) {
+ pd->lra = be32_to_cpu(ti.last_rec_address);
+ set_bit(PACKET_LRA_VALID, &pd->flags);
+ } else {
+ pd->lra = 0xffffffff;
+ set_bit(PACKET_LRA_VALID, &pd->flags);
+ }
+
+ /*
+ * fine for now
+ */
+ pd->settings.link_loss = 7;
+ pd->settings.write_type = 0; /* packet */
+ pd->settings.track_mode = ti.track_mode;
+
+ /*
+ * mode1 or mode2 disc
+ */
+ switch (ti.data_mode) {
+ case PACKET_MODE1:
+ pd->settings.block_mode = PACKET_BLOCK_MODE1;
+ break;
+ case PACKET_MODE2:
+ pd->settings.block_mode = PACKET_BLOCK_MODE2;
+ break;
+ default:
+ printk("pktcdvd: unknown data mode\n");
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * enable/disable write caching on drive
+ */
+static int pkt_write_caching(struct pktcdvd_device *pd, int set)
+{
+ struct packet_command cgc;
+ struct request_sense sense;
+ unsigned char buf[64];
+ int ret;
+
+ memset(buf, 0, sizeof(buf));
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc.sense = &sense;
+ cgc.buflen = pd->mode_offset + 12;
+
+ /*
+ * caching mode page might not be there, so quiet this command
+ */
+ cgc.quiet = 1;
+
+ if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
+ return ret;
+
+ buf[pd->mode_offset + 10] |= (!!set << 2);
+
+ cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
+ ret = pkt_mode_select(pd, &cgc);
+ if (ret) {
+ printk("pktcdvd: write caching control failed\n");
+ pkt_dump_sense(&cgc);
+ } else if (!ret && set)
+ printk("pktcdvd: enabled write caching on %s\n", pd->name);
+ return ret;
+}
+
+static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
+{
+ struct packet_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
+ cgc.cmd[4] = lockflag ? 1 : 0;
+ return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * Returns drive maximum write speed
+ */
+static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed)
+{
+ struct packet_command cgc;
+ struct request_sense sense;
+ unsigned char buf[256+18];
+ unsigned char *cap_buf;
+ int ret, offset;
+
+ memset(buf, 0, sizeof(buf));
+ cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
+ cgc.sense = &sense;
+
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+ if (ret) {
+ cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
+ sizeof(struct mode_page_header);
+ ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+ if (ret) {
+ pkt_dump_sense(&cgc);
+ return ret;
+ }
+ }
+
+ offset = 20; /* Obsoleted field, used by older drives */
+ if (cap_buf[1] >= 28)
+ offset = 28; /* Current write speed selected */
+ if (cap_buf[1] >= 30) {
+ /* If the drive reports at least one "Logical Unit Write
+ * Speed Performance Descriptor Block", use the information
+ * in the first block. (contains the highest speed)
+ */
+ int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
+ if (num_spdb > 0)
+ offset = 34;
+ }
+
+ *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
+ return 0;
+}
+
+/* These tables from cdrecord - I don't have orange book */
+/* standard speed CD-RW (1-4x) */
+static char clv_to_speed[16] = {
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+ 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* high speed CD-RW (-10x) */
+static char hs_clv_to_speed[16] = {
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+ 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* ultra high speed CD-RW */
+static char us_clv_to_speed[16] = {
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
+ 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
+};
+
+/*
+ * reads the maximum media speed from ATIP
+ */
+static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
+{
+ struct packet_command cgc;
+ struct request_sense sense;
+ unsigned char buf[64];
+ unsigned int size, st, sp;
+ int ret;
+
+ init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
+ cgc.sense = &sense;
+ cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ cgc.cmd[1] = 2;
+ cgc.cmd[2] = 4; /* READ ATIP */
+ cgc.cmd[8] = 2;
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret) {
+ pkt_dump_sense(&cgc);
+ return ret;
+ }
+ size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
+ if (size > sizeof(buf))
+ size = sizeof(buf);
+
+ init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
+ cgc.sense = &sense;
+ cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+ cgc.cmd[1] = 2;
+ cgc.cmd[2] = 4;
+ cgc.cmd[8] = size;
+ ret = pkt_generic_packet(pd, &cgc);
+ if (ret) {
+ pkt_dump_sense(&cgc);
+ return ret;
+ }
+
+ if (!buf[6] & 0x40) {
+ printk("pktcdvd: Disc type is not CD-RW\n");
+ return 1;
+ }
+ if (!buf[6] & 0x4) {
+ printk("pktcdvd: A1 values on media are not valid, maybe not CDRW?\n");
+ return 1;
+ }
+
+ st = (buf[6] >> 3) & 0x7; /* disc sub-type */
+
+ sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
+
+ /* Info from cdrecord */
+ switch (st) {
+ case 0: /* standard speed */
+ *speed = clv_to_speed[sp];
+ break;
+ case 1: /* high speed */
+ *speed = hs_clv_to_speed[sp];
+ break;
+ case 2: /* ultra high speed */
+ *speed = us_clv_to_speed[sp];
+ break;
+ default:
+ printk("pktcdvd: Unknown disc sub-type %d\n",st);
+ return 1;
+ }
+ if (*speed) {
+ printk("pktcdvd: Max. media speed: %d\n",*speed);
+ return 0;
+ } else {
+ printk("pktcdvd: Unknown speed %d for sub-type %d\n",sp,st);
+ return 1;
+ }
+}
+
+static int pkt_perform_opc(struct pktcdvd_device *pd)
+{
+ struct packet_command cgc;
+ struct request_sense sense;
+ int ret;
+
+ VPRINTK("pktcdvd: Performing OPC\n");
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.sense = &sense;
+ cgc.timeout = 60*HZ;
+ cgc.cmd[0] = GPCMD_SEND_OPC;
+ cgc.cmd[1] = 1;
+ if ((ret = pkt_generic_packet(pd, &cgc)))
+ pkt_dump_sense(&cgc);
+ return ret;
+}
+
+static int pkt_open_write(struct pktcdvd_device *pd)
+{
+ int ret;
+ unsigned int write_speed, media_write_speed, read_speed;
+
+ if ((ret = pkt_probe_settings(pd))) {
+ DPRINTK("pktcdvd: %s failed probe\n", pd->name);
+ return -EIO;
+ }
+
+ if ((ret = pkt_set_write_settings(pd))) {
+ DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
+ return -EIO;
+ }
+
+ pkt_write_caching(pd, USE_WCACHING);
+
+ if ((ret = pkt_get_max_speed(pd, &write_speed)))
+ write_speed = 16 * 177;
+ switch (pd->mmc3_profile) {
+ case 0x13: /* DVD-RW */
+ case 0x1a: /* DVD+RW */
+ DPRINTK("pktcdvd: write speed %ukB/s\n", write_speed);
+ break;
+ default:
+ if ((ret = pkt_media_speed(pd, &media_write_speed)))
+ media_write_speed = 16;
+ write_speed = min(write_speed, media_write_speed * 177);
+ DPRINTK("pktcdvd: write speed %ux\n", write_speed / 176);
+ break;
+ }
+ read_speed = write_speed;
+
+ if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
+ DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
+ return -EIO;
+ }
+ pd->write_speed = write_speed;
+ pd->read_speed = read_speed;
+
+ if ((ret = pkt_perform_opc(pd))) {
+ DPRINTK("pktcdvd: %s Optimum Power Calibration failed\n", pd->name);
+ }
+
+ return 0;
+}
+
+/*
+ * called at open time.
+ */
+static int pkt_open_dev(struct pktcdvd_device *pd, int write)
+{
+ int ret;
+ long lba;
+ request_queue_t *q;
+
+ /*
+ * We need to re-open the cdrom device without O_NONBLOCK to be able
+ * to read/write from/to it. It is already opened in O_NONBLOCK mode
+ * so bdget() can't fail.
+ */
+ bdget(pd->bdev->bd_dev);
+ if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY)))
+ goto out;
+
+ if ((ret = pkt_get_last_written(pd, &lba))) {
+ printk("pktcdvd: pkt_get_last_written failed\n");
+ goto out_putdev;
+ }
+
+ set_capacity(pd->disk, lba << 2);
+ set_capacity(pd->bdev->bd_disk, lba << 2);
+ bd_set_size(pd->bdev, (loff_t)lba << 11);
+
+ q = bdev_get_queue(pd->bdev);
+ if (write) {
+ if ((ret = pkt_open_write(pd)))
+ goto out_putdev;
+ /*
+ * Some CDRW drives can not handle writes larger than one packet,
+ * even if the size is a multiple of the packet size.
+ */
+ spin_lock_irq(q->queue_lock);
+ blk_queue_max_sectors(q, pd->settings.size);
+ spin_unlock_irq(q->queue_lock);
+ set_bit(PACKET_WRITABLE, &pd->flags);
+ } else {
+ pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+ clear_bit(PACKET_WRITABLE, &pd->flags);
+ }
+
+ if ((ret = pkt_set_segment_merging(pd, q)))
+ goto out_putdev;
+
+ if (write)
+ printk("pktcdvd: %lukB available on disc\n", lba << 1);
+
+ return 0;
+
+out_putdev:
+ blkdev_put(pd->bdev);
+out:
+ return ret;
+}
+
+/*
+ * called when the device is closed. makes sure that the device flushes
+ * the internal cache before we close.
+ */
+static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
+{
+ if (flush && pkt_flush_cache(pd))
+ DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
+
+ pkt_lock_door(pd, 0);
+
+ pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+ blkdev_put(pd->bdev);
+}
+
+static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
+{
+ if (dev_minor >= MAX_WRITERS)
+ return NULL;
+ return pkt_devs[dev_minor];
+}
+
+static int pkt_open(struct inode *inode, struct file *file)
+{
+ struct pktcdvd_device *pd = NULL;
+ int ret;
+
+ VPRINTK("pktcdvd: entering open\n");
+
+ down(&ctl_mutex);
+ pd = pkt_find_dev_from_minor(iminor(inode));
+ if (!pd) {
+ ret = -ENODEV;
+ goto out;
+ }
+ BUG_ON(pd->refcnt < 0);
+
+ pd->refcnt++;
+ if (pd->refcnt == 1) {
+ if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
+ ret = -EIO;
+ goto out_dec;
+ }
+ /*
+ * needed here as well, since ext2 (among others) may change
+ * the blocksize at mount time
+ */
+ set_blocksize(inode->i_bdev, CD_FRAMESIZE);
+ }
+
+ up(&ctl_mutex);
+ return 0;
+
+out_dec:
+ pd->refcnt--;
+out:
+ VPRINTK("pktcdvd: failed open (%d)\n", ret);
+ up(&ctl_mutex);
+ return ret;
+}
+
+static int pkt_close(struct inode *inode, struct file *file)
+{
+ struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
+ int ret = 0;
+
+ down(&ctl_mutex);
+ pd->refcnt--;
+ BUG_ON(pd->refcnt < 0);
+ if (pd->refcnt == 0) {
+ int flush = test_bit(PACKET_WRITABLE, &pd->flags);
+ pkt_release_dev(pd, flush);
+ }
+ up(&ctl_mutex);
+ return ret;
+}
+
+
+static void *psd_pool_alloc(int gfp_mask, void *data)
+{
+ return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
+}
+
+static void psd_pool_free(void *ptr, void *data)
+{
+ kfree(ptr);
+}
+
+static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
+{
+ struct packet_stacked_data *psd = bio->bi_private;
+ struct pktcdvd_device *pd = psd->pd;
+
+ if (bio->bi_size)
+ return 1;
+
+ bio_put(bio);
+ bio_endio(psd->bio, psd->bio->bi_size, err);
+ mempool_free(psd, psd_pool);
+ pkt_bio_finished(pd);
+ return 0;
+}
+
+static int pkt_make_request(request_queue_t *q, struct bio *bio)
+{
+ struct pktcdvd_device *pd;
+ char b[BDEVNAME_SIZE];
+ sector_t zone;
+ struct packet_data *pkt;
+ int was_empty, blocked_bio;
+ struct pkt_rb_node *node;
+
+ pd = q->queuedata;
+ if (!pd) {
+ printk("pktcdvd: %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
+ goto end_io;
+ }
+
+ /*
+ * Clone READ bios so we can have our own bi_end_io callback.
+ */
+ if (bio_data_dir(bio) == READ) {
+ struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+ struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+
+ psd->pd = pd;
+ psd->bio = bio;
+ cloned_bio->bi_bdev = pd->bdev;
+ cloned_bio->bi_private = psd;
+ cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+ pd->stats.secs_r += bio->bi_size >> 9;
+ pkt_queue_bio(pd, cloned_bio, 1);
+ return 0;
+ }
+
+ if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+ printk("pktcdvd: WRITE for ro device %s (%llu)\n",
+ pd->name, (unsigned long long)bio->bi_sector);
+ goto end_io;
+ }
+
+ if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
+ printk("pktcdvd: wrong bio size\n");
+ goto end_io;
+ }
+
+ blk_queue_bounce(q, &bio);
+
+ zone = ZONE(bio->bi_sector, pd);
+ VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
+ (unsigned long long)bio->bi_sector,
+ (unsigned long long)(bio->bi_sector + bio_sectors(bio)));
+
+ /* Check if we have to split the bio */
+ {
+ struct bio_pair *bp;
+ sector_t last_zone;
+ int first_sectors;
+
+ last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
+ if (last_zone != zone) {
+ BUG_ON(last_zone != zone + pd->settings.size);
+ first_sectors = last_zone - bio->bi_sector;
+ bp = bio_split(bio, bio_split_pool, first_sectors);
+ BUG_ON(!bp);
+ pkt_make_request(q, &bp->bio1);
+ pkt_make_request(q, &bp->bio2);
+ bio_pair_release(bp);
+ return 0;
+ }
+ }
+
+ /*
+ * If we find a matching packet in state WAITING or READ_WAIT, we can
+ * just append this bio to that packet.
+ */
+ spin_lock(&pd->cdrw.active_list_lock);
+ blocked_bio = 0;
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ if (pkt->sector == zone) {
+ spin_lock(&pkt->lock);
+ if ((pkt->state == PACKET_WAITING_STATE) ||
+ (pkt->state == PACKET_READ_WAIT_STATE)) {
+ pkt_add_list_last(bio, &pkt->orig_bios,
+ &pkt->orig_bios_tail);
+ pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ if ((pkt->write_size >= pkt->frames) &&
+ (pkt->state == PACKET_WAITING_STATE)) {
+ atomic_inc(&pkt->run_sm);
+ wake_up(&pd->wqueue);
+ }
+ spin_unlock(&pkt->lock);
+ spin_unlock(&pd->cdrw.active_list_lock);
+ return 0;
+ } else {
+ blocked_bio = 1;
+ }
+ spin_unlock(&pkt->lock);
+ }
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
+
+ /*
+ * No matching packet found. Store the bio in the work queue.
+ */
+ node = mempool_alloc(pd->rb_pool, GFP_NOIO);
+ BUG_ON(!node);
+ node->bio = bio;
+ spin_lock(&pd->lock);
+ BUG_ON(pd->bio_queue_size < 0);
+ was_empty = (pd->bio_queue_size == 0);
+ pkt_rbtree_insert(pd, node);
+ spin_unlock(&pd->lock);
+
+ /*
+ * Wake up the worker thread.
+ */
+ atomic_set(&pd->scan_queue, 1);
+ if (was_empty) {
+ /* This wake_up is required for correct operation */
+ wake_up(&pd->wqueue);
+ } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
+ /*
+ * This wake up is not required for correct operation,
+ * but improves performance in some cases.
+ */
+ wake_up(&pd->wqueue);
+ }
+ return 0;
+end_io:
+ bio_io_error(bio, bio->bi_size);
+ return 0;
+}
+
+
+
+static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
+{
+ struct pktcdvd_device *pd = q->queuedata;
+ sector_t zone = ZONE(bio->bi_sector, pd);
+ int used = ((bio->bi_sector - zone) << 9) + bio->bi_size;
+ int remaining = (pd->settings.size << 9) - used;
+ int remaining2;
+
+ /*
+ * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
+ * boundary, pkt_make_request() will split the bio.
+ */
+ remaining2 = PAGE_SIZE - bio->bi_size;
+ remaining = max(remaining, remaining2);
+
+ BUG_ON(remaining < 0);
+ return remaining;
+}
+
+static void pkt_init_queue(struct pktcdvd_device *pd)
+{
+ request_queue_t *q = pd->disk->queue;
+
+ blk_queue_make_request(q, pkt_make_request);
+ blk_queue_hardsect_size(q, CD_FRAMESIZE);
+ blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
+ blk_queue_merge_bvec(q, pkt_merge_bvec);
+ q->queuedata = pd;
+}
+
+static int pkt_seq_show(struct seq_file *m, void *p)
+{
+ struct pktcdvd_device *pd = m->private;
+ char *msg;
+ char bdev_buf[BDEVNAME_SIZE];
+ int states[PACKET_NUM_STATES];
+
+ seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
+ bdevname(pd->bdev, bdev_buf));
+
+ seq_printf(m, "\nSettings:\n");
+ seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
+
+ if (pd->settings.write_type == 0)
+ msg = "Packet";
+ else
+ msg = "Unknown";
+ seq_printf(m, "\twrite type:\t\t%s\n", msg);
+
+ seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
+ seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
+
+ seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
+
+ if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
+ msg = "Mode 1";
+ else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
+ msg = "Mode 2";
+ else
+ msg = "Unknown";
+ seq_printf(m, "\tblock mode:\t\t%s\n", msg);
+
+ seq_printf(m, "\nStatistics:\n");
+ seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
+ seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
+ seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
+ seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
+ seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
+
+ seq_printf(m, "\nMisc:\n");
+ seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
+ seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
+ seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
+ seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
+ seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
+ seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
+
+ seq_printf(m, "\nQueue state:\n");
+ seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
+ seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
+ seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
+
+ pkt_count_states(pd, states);
+ seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2], states[3], states[4], states[5]);
+
+ return 0;
+}
+
+static int pkt_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pkt_seq_show, PDE(inode)->data);
+}
+
+static struct file_operations pkt_proc_fops = {
+ .open = pkt_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
+{
+ int i;
+ int ret = 0;
+ char b[BDEVNAME_SIZE];
+ struct proc_dir_entry *proc;
+ struct block_device *bdev;
+
+ if (pd->pkt_dev == dev) {
+ printk("pktcdvd: Recursive setup not allowed\n");
+ return -EBUSY;
+ }
+ for (i = 0; i < MAX_WRITERS; i++) {
+ struct pktcdvd_device *pd2 = pkt_devs[i];
+ if (!pd2)
+ continue;
+ if (pd2->bdev->bd_dev == dev) {
+ printk("pktcdvd: %s already setup\n", bdevname(pd2->bdev, b));
+ return -EBUSY;
+ }
+ if (pd2->pkt_dev == dev) {
+ printk("pktcdvd: Can't chain pktcdvd devices\n");
+ return -EBUSY;
+ }
+ }
+
+ bdev = bdget(dev);
+ if (!bdev)
+ return -ENOMEM;
+ ret = blkdev_get(bdev, FMODE_READ, O_RDONLY | O_NONBLOCK);
+ if (ret)
+ return ret;
+
+ /* This is safe, since we have a reference from open(). */
+ __module_get(THIS_MODULE);
+
+ if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
+ printk("pktcdvd: not enough memory for buffers\n");
+ ret = -ENOMEM;
+ goto out_mem;
+ }
+
+ pd->bdev = bdev;
+ set_blocksize(bdev, CD_FRAMESIZE);
+
+ pkt_init_queue(pd);
+
+ atomic_set(&pd->cdrw.pending_bios, 0);
+ pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
+ if (IS_ERR(pd->cdrw.thread)) {
+ printk("pktcdvd: can't start kernel thread\n");
+ ret = -ENOMEM;
+ goto out_thread;
+ }
+
+ proc = create_proc_entry(pd->name, 0, pkt_proc);
+ if (proc) {
+ proc->data = pd;
+ proc->proc_fops = &pkt_proc_fops;
+ }
+ DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
+ return 0;
+
+out_thread:
+ pkt_shrink_pktlist(pd);
+out_mem:
+ blkdev_put(bdev);
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
+
+ VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
+ BUG_ON(!pd);
+
+ switch (cmd) {
+ /*
+ * forward selected CDROM ioctls to CD-ROM, for UDF
+ */
+ case CDROMMULTISESSION:
+ case CDROMREADTOCENTRY:
+ case CDROM_LAST_WRITTEN:
+ case CDROM_SEND_PACKET:
+ case SCSI_IOCTL_SEND_COMMAND:
+ return ioctl_by_bdev(pd->bdev, cmd, arg);
+
+ case CDROMEJECT:
+ /*
+ * The door gets locked when the device is opened, so we
+ * have to unlock it or else the eject command fails.
+ */
+ pkt_lock_door(pd, 0);
+ return ioctl_by_bdev(pd->bdev, cmd, arg);
+
+ default:
+ printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int pkt_media_changed(struct gendisk *disk)
+{
+ struct pktcdvd_device *pd = disk->private_data;
+ struct gendisk *attached_disk;
+
+ if (!pd)
+ return 0;
+ if (!pd->bdev)
+ return 0;
+ attached_disk = pd->bdev->bd_disk;
+ if (!attached_disk)
+ return 0;
+ return attached_disk->fops->media_changed(attached_disk);
+}
+
+static struct block_device_operations pktcdvd_ops = {
+ .owner = THIS_MODULE,
+ .open = pkt_open,
+ .release = pkt_close,
+ .ioctl = pkt_ioctl,
+ .media_changed = pkt_media_changed,
+};
+
+/*
+ * Set up mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
+{
+ int idx;
+ int ret = -ENOMEM;
+ struct pktcdvd_device *pd;
+ struct gendisk *disk;
+ dev_t dev = new_decode_dev(ctrl_cmd->dev);
+
+ for (idx = 0; idx < MAX_WRITERS; idx++)
+ if (!pkt_devs[idx])
+ break;
+ if (idx == MAX_WRITERS) {
+ printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
+ return -EBUSY;
+ }
+
+ pd = kmalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
+ if (!pd)
+ return ret;
+ memset(pd, 0, sizeof(struct pktcdvd_device));
+
+ pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
+ if (!pd->rb_pool)
+ goto out_mem;
+
+ disk = alloc_disk(1);
+ if (!disk)
+ goto out_mem;
+ pd->disk = disk;
+
+ spin_lock_init(&pd->lock);
+ spin_lock_init(&pd->iosched.lock);
+ sprintf(pd->name, "pktcdvd%d", idx);
+ init_waitqueue_head(&pd->wqueue);
+ pd->bio_queue = RB_ROOT;
+
+ disk->major = pkt_major;
+ disk->first_minor = idx;
+ disk->fops = &pktcdvd_ops;
+ disk->flags = GENHD_FL_REMOVABLE;
+ sprintf(disk->disk_name, "pktcdvd%d", idx);
+ disk->private_data = pd;
+ disk->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!disk->queue)
+ goto out_mem2;
+
+ pd->pkt_dev = MKDEV(disk->major, disk->first_minor);
+ ret = pkt_new_dev(pd, dev);
+ if (ret)
+ goto out_new_dev;
+
+ add_disk(disk);
+ pkt_devs[idx] = pd;
+ ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
+ return 0;
+
+out_new_dev:
+ blk_put_queue(disk->queue);
+out_mem2:
+ put_disk(disk);
+out_mem:
+ if (pd->rb_pool)
+ mempool_destroy(pd->rb_pool);
+ kfree(pd);
+ return ret;
+}
+
+/*
+ * Tear down mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
+{
+ struct pktcdvd_device *pd;
+ int idx;
+ dev_t pkt_dev = new_decode_dev(ctrl_cmd->pkt_dev);
+
+ for (idx = 0; idx < MAX_WRITERS; idx++) {
+ pd = pkt_devs[idx];
+ if (pd && (pd->pkt_dev == pkt_dev))
+ break;
+ }
+ if (idx == MAX_WRITERS) {
+ DPRINTK("pktcdvd: dev not setup\n");
+ return -ENXIO;
+ }
+
+ if (pd->refcnt > 0)
+ return -EBUSY;
+
+ if (!IS_ERR(pd->cdrw.thread))
+ kthread_stop(pd->cdrw.thread);
+
+ blkdev_put(pd->bdev);
+
+ pkt_shrink_pktlist(pd);
+
+ remove_proc_entry(pd->name, pkt_proc);
+ DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
+
+ del_gendisk(pd->disk);
+ blk_put_queue(pd->disk->queue);
+ put_disk(pd->disk);
+
+ pkt_devs[idx] = NULL;
+ mempool_destroy(pd->rb_pool);
+ kfree(pd);
+
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
+{
+ struct pktcdvd_device *pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
+ if (pd) {
+ ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
+ ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
+ } else {
+ ctrl_cmd->dev = 0;
+ ctrl_cmd->pkt_dev = 0;
+ }
+ ctrl_cmd->num_devices = MAX_WRITERS;
+}
+
+static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct pkt_ctrl_command ctrl_cmd;
+ int ret = 0;
+
+ if (cmd != PACKET_CTRL_CMD)
+ return -ENOTTY;
+
+ if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
+ return -EFAULT;
+
+ switch (ctrl_cmd.command) {
+ case PKT_CTRL_CMD_SETUP:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ down(&ctl_mutex);
+ ret = pkt_setup_dev(&ctrl_cmd);
+ up(&ctl_mutex);
+ break;
+ case PKT_CTRL_CMD_TEARDOWN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ down(&ctl_mutex);
+ ret = pkt_remove_dev(&ctrl_cmd);
+ up(&ctl_mutex);
+ break;
+ case PKT_CTRL_CMD_STATUS:
+ down(&ctl_mutex);
+ pkt_get_status(&ctrl_cmd);
+ up(&ctl_mutex);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
+ return -EFAULT;
+ return ret;
+}
+
+
+static struct file_operations pkt_ctl_fops = {
+ .ioctl = pkt_ctl_ioctl,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice pkt_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "pktcdvd",
+ .devfs_name = "pktcdvd/control",
+ .fops = &pkt_ctl_fops
+};
+
+int pkt_init(void)
+{
+ int ret;
+
+ psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL);
+ if (!psd_pool)
+ return -ENOMEM;
+
+ ret = register_blkdev(pkt_major, "pktcdvd");
+ if (ret < 0) {
+ printk("pktcdvd: Unable to register block device\n");
+ goto out2;
+ }
+ if (!pkt_major)
+ pkt_major = ret;
+
+ ret = misc_register(&pkt_misc);
+ if (ret) {
+ printk("pktcdvd: Unable to register misc device\n");
+ goto out;
+ }
+
+ init_MUTEX(&ctl_mutex);
+
+ pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
+
+ DPRINTK("pktcdvd: %s\n", VERSION_CODE);
+ return 0;
+
+out:
+ unregister_blkdev(pkt_major, "pktcdvd");
+out2:
+ mempool_destroy(psd_pool);
+ return ret;
+}
+
+void pkt_exit(void)
+{
+ remove_proc_entry("pktcdvd", proc_root_driver);
+ misc_deregister(&pkt_misc);
+ unregister_blkdev(pkt_major, "pktcdvd");
+ mempool_destroy(psd_pool);
+}
+
+MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
+MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
+MODULE_LICENSE("GPL");
+
+module_init(pkt_init);
+module_exit(pkt_exit);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index f605535d3f56..dd3be0a06219 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -25,6 +25,7 @@
* -- prune comments, they are too volumnous
* -- Exterminate P3 printks
* -- Resove XXX's
+ * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=?
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -62,9 +63,9 @@
/* command block wrapper */
struct bulk_cb_wrap {
- u32 Signature; /* contains 'USBC' */
+ __le32 Signature; /* contains 'USBC' */
u32 Tag; /* unique per command id */
- u32 DataTransferLength; /* size of data */
+ __le32 DataTransferLength; /* size of data */
u8 Flags; /* direction in bit 0 */
u8 Lun; /* LUN normally 0 */
u8 Length; /* of of the CDB */
@@ -78,9 +79,9 @@ struct bulk_cb_wrap {
/* command status wrapper */
struct bulk_cs_wrap {
- u32 Signature; /* should = 'USBS' */
+ __le32 Signature; /* should = 'USBS' */
u32 Tag; /* same as original command */
- u32 Residue; /* amount not transferred */
+ __le32 Residue; /* amount not transferred */
u8 Status; /* see below */
};
@@ -157,7 +158,8 @@ struct ub_scsi_cmd {
struct ub_scsi_cmd *next;
int error; /* Return code - valid upon done */
- int act_len; /* Return size */
+ unsigned int act_len; /* Return size */
+ unsigned char key, asc, ascq; /* May be valid if error==-EIO */
int stat_count; /* Retries getting status. */
@@ -490,6 +492,18 @@ static void ub_id_put(int id)
*/
static void ub_cleanup(struct ub_dev *sc)
{
+
+ /*
+ * If we zero disk->private_data BEFORE put_disk, we have to check
+ * for NULL all over the place in open, release, check_media and
+ * revalidate, because the block level semaphore is well inside the
+ * put_disk. But we cannot zero after the call, because *disk is gone.
+ * The sd.c is blatantly racy in this area.
+ */
+ /* disk->private_data = NULL; */
+ put_disk(sc->disk);
+ sc->disk = NULL;
+
ub_id_put(sc->id);
kfree(sc);
}
@@ -661,9 +675,12 @@ static inline int ub_bd_rq_fn_1(request_queue_t *q)
/*
* build the command
+ *
+ * The call to blk_queue_hardsect_size() guarantees that request
+ * is aligned, but it is given in terms of 512 byte units, always.
*/
- block = rq->sector;
- nblks = rq->nr_sectors;
+ block = rq->sector >> sc->capacity.bshift;
+ nblks = rq->nr_sectors >> sc->capacity.bshift;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10;
@@ -678,7 +695,7 @@ static inline int ub_bd_rq_fn_1(request_queue_t *q)
cmd->dir = ub_dir;
cmd->state = UB_CMDST_INIT;
cmd->data = rq->buffer;
- cmd->len = nblks * 512;
+ cmd->len = rq->nr_sectors * 512;
cmd->done = ub_rw_cmd_done;
cmd->back = rq;
@@ -786,17 +803,16 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
- sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
- add_timer(&sc->work_timer);
-
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */
- del_timer(&sc->work_timer);
ub_complete(&sc->work_done);
return rc;
}
+ sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
+ add_timer(&sc->work_timer);
+
cmd->state = UB_CMDST_CMD;
ub_cmdtr_state(sc, cmd);
return 0;
@@ -836,6 +852,7 @@ static void ub_scsi_action(unsigned long _dev)
unsigned long flags;
spin_lock_irqsave(&sc->lock, flags);
+ del_timer(&sc->work_timer);
ub_scsi_dispatch(sc);
spin_unlock_irqrestore(&sc->lock, flags);
}
@@ -968,18 +985,17 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
- sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
- add_timer(&sc->work_timer);
-
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
- del_timer(&sc->work_timer);
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return;
}
+ sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
+ add_timer(&sc->work_timer);
+
cmd->state = UB_CMDST_DATA;
ub_cmdtr_state(sc, cmd);
@@ -1063,19 +1079,18 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
- sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
- add_timer(&sc->work_timer);
-
rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC);
if (rc != 0) {
/* XXX Clear stalls */
printk("%s: CSW #%d submit failed (%d)\n",
sc->name, cmd->tag, rc); /* P3 */
- del_timer(&sc->work_timer);
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return;
}
+
+ sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
+ add_timer(&sc->work_timer);
return;
}
@@ -1132,16 +1147,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
(*cmd->done)(sc, cmd);
} else if (cmd->state == UB_CMDST_SENSE) {
- /*
- * We do not look at sense, because even if there was no sense,
- * we get into UB_CMDST_SENSE from a STALL or CSW FAIL only.
- * We request sense because we want to clear CHECK CONDITION
- * on devices with delusions of SCSI, and not because we
- * are curious in any way about the sense itself.
- */
- /* if ((cmd->top_sense[2] & 0x0F) == NO_SENSE) { foo } */
-
ub_state_done(sc, cmd, -EIO);
+
} else {
printk(KERN_WARNING "%s: "
"wrong command state %d on device %u\n",
@@ -1186,18 +1193,17 @@ static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
- sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
- add_timer(&sc->work_timer);
-
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
printk("ub: CSW #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
- del_timer(&sc->work_timer);
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return;
}
+ sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
+ add_timer(&sc->work_timer);
+
cmd->stat_count = 0;
cmd->state = UB_CMDST_STAT;
ub_cmdtr_state(sc, cmd);
@@ -1217,9 +1223,17 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
goto error;
}
+ /*
+ * ``If the allocation length is eighteen or greater, and a device
+ * server returns less than eithteen bytes of data, the application
+ * client should assume that the bytes not transferred would have been
+ * zeroes had the device server returned those bytes.''
+ */
memset(&sc->top_sense, 0, UB_SENSE_SIZE);
+
scmd = &sc->top_rqs_cmd;
scmd->cdb[0] = REQUEST_SENSE;
+ scmd->cdb[4] = UB_SENSE_SIZE;
scmd->cdb_len = 6;
scmd->dir = UB_DIR_READ;
scmd->state = UB_CMDST_INIT;
@@ -1271,14 +1285,13 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
- sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
- add_timer(&sc->work_timer);
-
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
- del_timer(&sc->work_timer);
ub_complete(&sc->work_done);
return rc;
}
+
+ sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
+ add_timer(&sc->work_timer);
return 0;
}
@@ -1289,8 +1302,15 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
unsigned char *sense = scmd->data;
struct ub_scsi_cmd *cmd;
+ /*
+ * Ignoring scmd->act_len, because the buffer was pre-zeroed.
+ */
ub_cmdtr_sense(sc, scmd, sense);
+ /*
+ * Find the command which triggered the unit attention or a check,
+ * save the sense into it, and advance its state machine.
+ */
if ((cmd = ub_cmdq_peek(sc)) == NULL) {
printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
return;
@@ -1308,6 +1328,10 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
return;
}
+ cmd->key = sense[2] & 0x0F;
+ cmd->asc = sense[12];
+ cmd->ascq = sense[13];
+
ub_scsi_urb_compl(sc, cmd);
}
@@ -1407,7 +1431,15 @@ static int ub_bd_open(struct inode *inode, struct file *filp)
if (sc->removable || sc->readonly)
check_disk_change(inode->i_bdev);
- /* XXX sd.c and floppy.c bail on open if media is not present. */
+ /*
+ * The sd.c considers ->media_present and ->changed not equivalent,
+ * under some pretty murky conditions (a failure of READ CAPACITY).
+ * We may need it one day.
+ */
+ if (sc->removable && sc->changed && !(filp->f_flags & O_NDELAY)) {
+ rc = -ENOMEDIUM;
+ goto err_open;
+ }
if (sc->readonly && (filp->f_mode & FMODE_WRITE)) {
rc = -EROFS;
@@ -1492,8 +1524,11 @@ static int ub_bd_revalidate(struct gendisk *disk)
printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n",
sc->name, sc->dev->devnum, sc->capacity.nsec, sc->capacity.bsize);
+ /* XXX Support sector size switching like in sr.c */
+ blk_queue_hardsect_size(disk->queue, sc->capacity.bsize);
set_capacity(disk, sc->capacity.nsec);
// set_disk_ro(sdkp->disk, sc->readonly);
+
return 0;
}
@@ -1592,6 +1627,9 @@ static int ub_sync_tur(struct ub_dev *sc)
rc = cmd->error;
+ if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */
+ rc = cmd->key;
+
err_submit:
kfree(cmd);
err_alloc:
@@ -1654,8 +1692,8 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret)
}
/* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
- nsec = be32_to_cpu(*(u32 *)p) + 1;
- bsize = be32_to_cpu(*(u32 *)(p + 4));
+ nsec = be32_to_cpu(*(__be32 *)p) + 1;
+ bsize = be32_to_cpu(*(__be32 *)(p + 4));
switch (bsize) {
case 512: shift = 0; break;
case 1024: shift = 1; break;
@@ -1725,28 +1763,22 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
- init_timer(&timer);
- timer.function = ub_probe_timeout;
- timer.data = (unsigned long) &compl;
- timer.expires = jiffies + UB_CTRL_TIMEOUT;
- add_timer(&timer);
-
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
printk(KERN_WARNING
"%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
- del_timer_sync(&timer);
return rc;
}
+ init_timer(&timer);
+ timer.function = ub_probe_timeout;
+ timer.data = (unsigned long) &compl;
+ timer.expires = jiffies + UB_CTRL_TIMEOUT;
+ add_timer(&timer);
+
wait_for_completion(&compl);
del_timer_sync(&timer);
- /*
- * Most of the time, URB was done and dev set to NULL, and so
- * the unlink bounces out with ENODEV. We do not call usb_kill_urb
- * because we still think about a backport to 2.4.
- */
- usb_unlink_urb(&sc->work_urb);
+ usb_kill_urb(&sc->work_urb);
/* reset the endpoint toggle */
usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
@@ -1813,6 +1845,7 @@ static int ub_probe(struct usb_interface *intf,
request_queue_t *q;
struct gendisk *disk;
int rc;
+ int i;
rc = -ENOMEM;
if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
@@ -1879,7 +1912,11 @@ static int ub_probe(struct usb_interface *intf,
* has to succeed, so we clear checks with an additional one here.
* In any case it's not our business how revaliadation is implemented.
*/
- ub_sync_tur(sc);
+ for (i = 0; i < 3; i++) { /* Retries for benh's key */
+ if ((rc = ub_sync_tur(sc)) <= 0) break;
+ if (rc != 0x6) break;
+ msleep(10);
+ }
sc->removable = 1; /* XXX Query this from the device */
@@ -1915,7 +1952,7 @@ static int ub_probe(struct usb_interface *intf,
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
// blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
blk_queue_max_sectors(q, UB_MAX_SECTORS);
- // blk_queue_hardsect_size(q, xxxxx);
+ blk_queue_hardsect_size(q, sc->capacity.bsize);
/*
* This is a serious infraction, caused by a deficiency in the
@@ -2006,17 +2043,6 @@ static void ub_disconnect(struct usb_interface *intf)
blk_cleanup_queue(q);
/*
- * If we zero disk->private_data BEFORE put_disk, we have to check
- * for NULL all over the place in open, release, check_media and
- * revalidate, because the block level semaphore is well inside the
- * put_disk. But we cannot zero after the call, because *disk is gone.
- * The sd.c is blatantly racy in this area.
- */
- /* disk->private_data = NULL; */
- put_disk(disk);
- sc->disk = NULL;
-
- /*
* We really expect blk_cleanup_queue() to wait, so no amount
* of paranoya is too much.
*
@@ -2035,6 +2061,13 @@ static void ub_disconnect(struct usb_interface *intf)
spin_unlock_irqrestore(&sc->lock, flags);
/*
+ * There is virtually no chance that other CPU runs times so long
+ * after ub_urb_complete should have called del_timer, but only if HCD
+ * didn't forget to deliver a callback on unlink.
+ */
+ del_timer_sync(&sc->work_timer);
+
+ /*
* At this point there must be no commands coming from anyone
* and no URBs left in transit.
*/
diff --git a/drivers/cdrom/Makefile b/drivers/cdrom/Makefile
index 5c484f3b3e58..4a8351753e07 100644
--- a/drivers/cdrom/Makefile
+++ b/drivers/cdrom/Makefile
@@ -8,6 +8,7 @@
obj-$(CONFIG_BLK_DEV_IDECD) += cdrom.o
obj-$(CONFIG_BLK_DEV_SR) += cdrom.o
obj-$(CONFIG_PARIDE_PCD) += cdrom.o
+obj-$(CONFIG_CDROM_PKTCDVD) += cdrom.o
obj-$(CONFIG_AZTCD) += aztcd.o
obj-$(CONFIG_CDU31A) += cdu31a.o cdrom.o
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index e57d19031f8e..4153153aeaf5 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -234,6 +234,12 @@
-- Mt Rainier support
-- DVD-RAM write open fixes
+ Nov 5 2001, Aug 8 2002. Modified by Andy Polyakov
+ <appro@fy.chalmers.se> to support MMC-3 compliant DVD+RW units.
+
+ Modified by Nigel Kukard <nkukard@lbsd.net> - support DVD+RW
+ 2.4.x patch by Andy Polyakov <appro@fy.chalmers.se>
+
-------------------------------------------------------------------------*/
#define REVISION "Revision: 3.20"
@@ -848,6 +854,41 @@ static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
return ret;
}
+static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
+{
+ struct packet_command cgc;
+ char buffer[32];
+ int ret, mmc3_profile;
+
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
+
+ cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
+ cgc.cmd[1] = 0;
+ cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */
+ cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
+ cgc.quiet = 1;
+
+ if ((ret = cdi->ops->generic_packet(cdi, &cgc))) {
+ mmc3_profile = 0xffff;
+ } else {
+ mmc3_profile = (buffer[6] << 8) | buffer[7];
+ printk(KERN_INFO "cdrom: %s: mmc-3 profile capable, current profile: %Xh\n",
+ cdi->name, mmc3_profile);
+ }
+ cdi->mmc3_profile = mmc3_profile;
+}
+
+static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi)
+{
+ switch (cdi->mmc3_profile) {
+ case 0x12: /* DVD-RAM */
+ case 0x1A: /* DVD+RW */
+ return 0;
+ default:
+ return 1;
+ }
+}
+
/*
* returns 0 for ok to open write, non-0 to disallow
*/
@@ -889,10 +930,50 @@ static int cdrom_open_write(struct cdrom_device_info *cdi)
ret = cdrom_ram_open_write(cdi);
else if (CDROM_CAN(CDC_MO_DRIVE))
ret = mo_open_write(cdi);
+ else if (!cdrom_is_dvd_rw(cdi))
+ ret = 0;
return ret;
}
+static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi)
+{
+ struct packet_command cgc;
+
+ if (cdi->mmc3_profile != 0x1a) {
+ cdinfo(CD_CLOSE, "%s: No DVD+RW\n", cdi->name);
+ return;
+ }
+
+ if (!cdi->media_written) {
+ cdinfo(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name);
+ return;
+ }
+
+ printk(KERN_INFO "cdrom: %s: dirty DVD+RW media, \"finalizing\"\n",
+ cdi->name);
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+ cgc.timeout = 30*HZ;
+ cdi->ops->generic_packet(cdi, &cgc);
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_CLOSE_TRACK;
+ cgc.timeout = 3000*HZ;
+ cgc.quiet = 1;
+ cdi->ops->generic_packet(cdi, &cgc);
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_CLOSE_TRACK;
+ cgc.cmd[2] = 2; /* Close session */
+ cgc.quiet = 1;
+ cgc.timeout = 3000*HZ;
+ cdi->ops->generic_packet(cdi, &cgc);
+
+ cdi->media_written = 0;
+}
+
static int cdrom_close_write(struct cdrom_device_info *cdi)
{
#if 0
@@ -925,6 +1006,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
ret = open_for_data(cdi);
if (ret)
goto err;
+ cdrom_mmc3_profile(cdi);
if (fp->f_mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
@@ -932,6 +1014,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
if (!CDROM_CAN(CDC_RAM))
goto err;
ret = 0;
+ cdi->media_written = 0;
}
}
@@ -1123,6 +1206,8 @@ int cdrom_release(struct cdrom_device_info *cdi, struct file *fp)
cdi->use_count--;
if (cdi->use_count == 0)
cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
+ if (cdi->use_count == 0)
+ cdrom_dvd_rw_close_write(cdi);
if (cdi->use_count == 0 &&
(cdo->capability & CDC_LOCK) && !keeplocked) {
cdinfo(CD_CLOSE, "Unlocking door!\n");
@@ -1329,6 +1414,7 @@ int media_changed(struct cdrom_device_info *cdi, int queue)
if (cdi->ops->media_changed(cdi, CDSL_CURRENT)) {
cdi->mc_flags = 0x3; /* set bit on both queues */
ret |= 1;
+ cdi->media_written = 0;
}
cdi->mc_flags &= ~mask; /* clear bit */
return ret;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 7bec505ad95e..b7b3c5617756 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -1782,16 +1782,8 @@ static struct pci_driver agp_intel_pci_driver = {
.resume = agp_intel_resume,
};
-/* intel_agp_init() must not be declared static for explicit
- early initialization to work (ie i810fb) */
-int __init agp_intel_init(void)
+static int __init agp_intel_init(void)
{
- static int agp_initialised=0;
-
- if (agp_initialised == 1)
- return 0;
- agp_initialised=1;
-
return pci_module_init(&agp_intel_pci_driver);
}
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 019a31c36121..4042fd3b56bd 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
+#include <linux/delay.h>
#include "agp.h"
#define SIS_ATTBASE 0x90
@@ -102,8 +103,7 @@ static void sis_delayed_enable(u32 mode)
*/
if (device->device == agp_bridge->dev->device) {
printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout (1+(HZ*10)/1000);
+ msleep(10);
}
}
}
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index f11cf4ea0398..2355455786f4 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -32,6 +32,7 @@
*/
#include <linux/config.h>
+#include <linux/delay.h>
#undef SERIAL_PARANOIA_CHECK
#define SERIAL_DO_RESTART
@@ -1563,8 +1564,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
}
@@ -1622,8 +1622,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("serdatr = %d (jiff=%lu)...", lsr, jiffies);
#endif
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(char_time);
+ msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 6ceaee64b269..46002d5bada1 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -2690,20 +2690,16 @@ cy_wait_until_sent(struct tty_struct *tty, int timeout)
#ifdef CY_DEBUG_WAIT_UNTIL_SENT
printk("Not clean (jiff=%lu)...", jiffies);
#endif
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(char_time);
- if (signal_pending(current))
+ if (msleep_interruptible(jiffies_to_msecs(char_time)))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
}
- current->state = TASK_RUNNING;
} else {
// Nothing to do!
}
/* Run one more char cycle */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(char_time * 5);
+ msleep_interruptible(jiffies_to_msecs(char_time * 5));
#ifdef CY_DEBUG_WAIT_UNTIL_SENT
printk("Clean (jiff=%lu)...done\n", jiffies);
#endif
@@ -2828,8 +2824,7 @@ cy_close(struct tty_struct *tty, struct file *filp)
if (info->blocked_open) {
CY_UNLOCK(info, flags);
if (info->close_delay) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
CY_LOCK(info, flags);
diff --git a/drivers/char/drm/radeon_mem.c b/drivers/char/drm/radeon_mem.c
index 289957406c9c..f82d0c42472f 100644
--- a/drivers/char/drm/radeon_mem.c
+++ b/drivers/char/drm/radeon_mem.c
@@ -85,7 +85,7 @@ static struct mem_block *alloc_block( struct mem_block *heap, int size,
struct mem_block *p;
int mask = (1 << align2)-1;
- for (p = heap->next ; p != heap ; p = p->next) {
+ list_for_each(p, heap) {
int start = (p->start + mask) & ~mask;
if (p->filp == 0 && start + size <= p->start + p->size)
return split_block( p, start, size, filp );
@@ -98,7 +98,7 @@ static struct mem_block *find_block( struct mem_block *heap, int start )
{
struct mem_block *p;
- for (p = heap->next ; p != heap ; p = p->next)
+ list_for_each(p, heap)
if (p->start == start)
return p;
@@ -166,7 +166,7 @@ void radeon_mem_release( DRMFILE filp, struct mem_block *heap )
if (!heap || !heap->next)
return;
- for (p = heap->next ; p != heap ; p = p->next) {
+ list_for_each(p, heap) {
if (p->filp == filp)
p->filp = NULL;
}
@@ -174,7 +174,7 @@ void radeon_mem_release( DRMFILE filp, struct mem_block *heap )
/* Assumes a single contiguous range. Needs a special filp in
* 'heap' to stop it being subsumed.
*/
- for (p = heap->next ; p != heap ; p = p->next) {
+ list_for_each(p, heap) {
while (p->filp == 0 && p->next->filp == 0) {
struct mem_block *q = p->next;
p->size += q->size;
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index e8f15f46eca4..903e4c3cc209 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -107,7 +107,6 @@ static struct file_operations dtlk_fops =
};
/* local prototypes */
-static void dtlk_delay(int ms);
static int dtlk_dev_probe(void);
static struct dtlk_settings *dtlk_interrogate(void);
static int dtlk_readable(void);
@@ -146,7 +145,7 @@ static ssize_t dtlk_read(struct file *file, char __user *buf,
return i;
if (file->f_flags & O_NONBLOCK)
break;
- dtlk_delay(100);
+ msleep_interruptible(100);
}
if (retries == loops_per_jiffy)
printk(KERN_ERR "dtlk_read times out\n");
@@ -191,7 +190,7 @@ static ssize_t dtlk_write(struct file *file, const char __user *buf,
rate to 500 bytes/sec, but that's
still enough to keep up with the
speech synthesizer. */
- dtlk_delay(1);
+ msleep_interruptible(1);
else {
/* the RDY bit goes zero 2-3 usec
after writing, and goes 1 again
@@ -212,7 +211,7 @@ static ssize_t dtlk_write(struct file *file, const char __user *buf,
if (file->f_flags & O_NONBLOCK)
break;
- dtlk_delay(1);
+ msleep_interruptible(1);
if (++retries > 10 * HZ) { /* wait no more than 10 sec
from last write */
@@ -351,8 +350,7 @@ static int __init dtlk_init(void)
static void __exit dtlk_cleanup (void)
{
dtlk_write_bytes("goodbye", 8);
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(5 * HZ / 10); /* nap 0.50 sec but
+ msleep_interruptible(500); /* nap 0.50 sec but
could be awakened
earlier by
signals... */
@@ -368,13 +366,6 @@ module_exit(dtlk_cleanup);
/* ------------------------------------------------------------------------ */
-/* sleep for ms milliseconds */
-static void dtlk_delay(int ms)
-{
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout((ms * HZ + 1000 - HZ) / 1000);
-}
-
static int dtlk_readable(void)
{
#ifdef TRACING
@@ -431,7 +422,7 @@ static int __init dtlk_dev_probe(void)
/* posting an index takes 18 msec. Here, we
wait up to 100 msec to see whether it
appears. */
- dtlk_delay(100);
+ msleep_interruptible(100);
dtlk_has_indexing = dtlk_readable();
#ifdef TRACING
printk(", indexing %d\n", dtlk_has_indexing);
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 0f13bef975f5..4b1e0b38cd8c 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -561,8 +561,7 @@ static void pc_close(struct tty_struct * tty, struct file * filp)
if (ch->close_delay)
{
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(ch->close_delay);
+ msleep_interruptible(jiffies_to_msecs(ch->close_delay));
}
wake_up_interruptible(&ch->open_wait);
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index d67098c45ffd..7353c2678916 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -57,6 +57,7 @@
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -70,6 +71,7 @@
#define NR_PORTS 64 /* maximum number of ports */
#define NR_PRIMARY 8 /* maximum number of primary ports */
+#define REGION_SIZE 8 /* size of io region to request */
/* The following variables can be set by giving module options */
static int irq[NR_PRIMARY]; /* IRQ for each base port */
@@ -2066,8 +2068,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
if (info->blocked_open) {
if (info->close_delay) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
}
@@ -2098,8 +2099,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
while ((serial_in(info, UART_ESI_STAT1) != 0x03) ||
(serial_in(info, UART_ESI_STAT2) != 0xff)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
+ msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
@@ -2344,19 +2344,21 @@ static _INLINE_ void show_serial_version(void)
* This routine is called by espserial_init() to initialize a specific serial
* port.
*/
-static _INLINE_ int autoconfig(struct esp_struct * info, int *region_start)
+static _INLINE_ int autoconfig(struct esp_struct * info)
{
int port_detected = 0;
unsigned long flags;
+ if (!request_region(info->port, REGION_SIZE, "esp serial"))
+ return -EIO;
+
save_flags(flags); cli();
/*
* Check for ESP card
*/
- if (!check_region(info->port, 8) &&
- serial_in(info, UART_ESI_BASE) == 0xf3) {
+ if (serial_in(info, UART_ESI_BASE) == 0xf3) {
serial_out(info, UART_ESI_CMD1, 0x00);
serial_out(info, UART_ESI_CMD1, 0x01);
@@ -2372,19 +2374,6 @@ static _INLINE_ int autoconfig(struct esp_struct * info, int *region_start)
info->irq = 4;
}
- if (ports && (ports->port == (info->port - 8))) {
- release_region(*region_start,
- info->port - *region_start);
- } else
- *region_start = info->port;
-
- if (!request_region(*region_start,
- info->port - *region_start + 8,
- "esp serial"))
- {
- restore_flags(flags);
- return -EIO;
- }
/* put card in enhanced mode */
/* this prevents access through */
@@ -2397,6 +2386,8 @@ static _INLINE_ int autoconfig(struct esp_struct * info, int *region_start)
serial_out(info, UART_ESI_CMD2, 0x00);
}
}
+ if (!port_detected)
+ release_region(info->port, REGION_SIZE);
restore_flags(flags);
return (port_detected);
@@ -2430,7 +2421,6 @@ static struct tty_operations esp_ops = {
int __init espserial_init(void)
{
int i, offset;
- int region_start;
struct esp_struct * info;
struct esp_struct *last_primary = NULL;
int esp[] = {0x100,0x140,0x180,0x200,0x240,0x280,0x300,0x380};
@@ -2516,7 +2506,7 @@ int __init espserial_init(void)
info->irq = irq[i];
info->line = (i * 8) + (offset / 8);
- if (!autoconfig(info, &region_start)) {
+ if (!autoconfig(info)) {
i++;
offset = 0;
continue;
@@ -2592,7 +2582,6 @@ static void __exit espserial_exit(void)
{
unsigned long flags;
int e1;
- unsigned int region_start, region_end;
struct esp_struct *temp_async;
struct esp_pio_buffer *pio_buf;
@@ -2607,27 +2596,8 @@ static void __exit espserial_exit(void)
while (ports) {
if (ports->port) {
- region_start = region_end = ports->port;
- temp_async = ports;
-
- while (temp_async) {
- if ((region_start - temp_async->port) == 8) {
- region_start = temp_async->port;
- temp_async->port = 0;
- temp_async = ports;
- } else if ((temp_async->port - region_end)
- == 8) {
- region_end = temp_async->port;
- temp_async->port = 0;
- temp_async = ports;
- } else
- temp_async = temp_async->next_port;
- }
-
- release_region(region_start,
- region_end - region_start + 8);
+ release_region(ports->port, REGION_SIZE);
}
-
temp_async = ports->next_port;
kfree(ports);
ports = temp_async;
diff --git a/drivers/char/ftape/lowlevel/fdc-io.c b/drivers/char/ftape/lowlevel/fdc-io.c
index 3b78eda30b19..a3b0f510b1e8 100644
--- a/drivers/char/ftape/lowlevel/fdc-io.c
+++ b/drivers/char/ftape/lowlevel/fdc-io.c
@@ -389,7 +389,7 @@ int fdc_interrupt_wait(unsigned int time)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&ftape_wait_intr, &wait);
while (!ft_interrupt_seen && (current->state == TASK_INTERRUPTIBLE)) {
timeout = schedule_timeout(timeout);
diff --git a/drivers/char/ftape/lowlevel/ftape-io.c b/drivers/char/ftape/lowlevel/ftape-io.c
index bd3b2f117ce0..b7910d429f34 100644
--- a/drivers/char/ftape/lowlevel/ftape-io.c
+++ b/drivers/char/ftape/lowlevel/ftape-io.c
@@ -32,6 +32,7 @@
#include <asm/system.h>
#include <linux/ioctl.h>
#include <linux/mtio.h>
+#include <linux/delay.h>
#include <linux/ftape.h>
#include <linux/qic117.h>
@@ -96,19 +97,12 @@ void ftape_sleep(unsigned int time)
timeout = ticks;
save_flags(flags);
sti();
- set_current_state(TASK_INTERRUPTIBLE);
- do {
- /* Mmm. Isn't current->blocked == 0xffffffff ?
- */
- if (signal_pending(current)) {
- TRACE(ft_t_err,
- "awoken by non-blocked signal :-(");
- break; /* exit on signal */
- }
- while (current->state != TASK_RUNNING) {
- timeout = schedule_timeout(timeout);
- }
- } while (timeout);
+ msleep_interruptible(jiffies_to_msecs(timeout));
+ /* Mmm. Isn't current->blocked == 0xffffffff ?
+ */
+ if (signal_pending(current)) {
+ TRACE(ft_t_err, "awoken by non-blocked signal :-(");
+ }
restore_flags(flags);
}
TRACE_EXIT;
diff --git a/drivers/char/ftape/zftape/zftape-buffers.c b/drivers/char/ftape/zftape/zftape-buffers.c
index 3d6483983423..ec4fdaabe39b 100644
--- a/drivers/char/ftape/zftape/zftape-buffers.c
+++ b/drivers/char/ftape/zftape/zftape-buffers.c
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/zftape.h>
@@ -119,8 +120,7 @@ void *zft_kmalloc(size_t size)
void *new;
while ((new = kmalloc(size, GFP_KERNEL)) == NULL) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(HZ/10);
+ msleep_interruptible(100);
}
memset(new, 0, size);
used_memory += size;
diff --git a/drivers/char/generic_serial.c b/drivers/char/generic_serial.c
index 45bc1be63669..fd9ed4c3434a 100644
--- a/drivers/char/generic_serial.c
+++ b/drivers/char/generic_serial.c
@@ -26,6 +26,7 @@
#include <linux/mm.h>
#include <linux/generic_serial.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@@ -399,8 +400,7 @@ static int gs_wait_tx_flushed (void * ptr, int timeout)
gs_dprintk (GS_DEBUG_FLUSH, "Expect to finish in %d jiffies "
"(%d chars).\n", jiffies_to_transmit, charsleft);
- set_current_state (TASK_INTERRUPTIBLE);
- schedule_timeout(jiffies_to_transmit);
+ msleep_interruptible(jiffies_to_msecs(jiffies_to_transmit));
if (signal_pending (current)) {
gs_dprintk (GS_DEBUG_FLUSH, "Signal pending. Bombing out: ");
rv = -EINTR;
@@ -767,8 +767,7 @@ void gs_close(struct tty_struct * tty, struct file * filp)
if (port->blocked_open) {
if (port->close_delay) {
- set_current_state (TASK_INTERRUPTIBLE);
- schedule_timeout(port->close_delay);
+ msleep_interruptible(jiffies_to_msecs(port->close_delay));
}
wake_up_interruptible(&port->open_wait);
}
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 3c0af2cad6ee..8d569ed74bc8 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -37,6 +37,7 @@
#include <linux/tty_flip.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/hvconsole.h>
#include <asm/vio.h>
@@ -44,7 +45,7 @@
#define HVC_MAJOR 229
#define HVC_MINOR 0
-#define TIMEOUT ((HZ + 99) / 100)
+#define TIMEOUT (10)
/*
* Wait this long per iteration while trying to push buffered data to the
@@ -607,7 +608,7 @@ int khvcd(void *unused)
if (poll_mask == 0)
schedule();
else
- schedule_timeout(TIMEOUT);
+ msleep_interruptible(TIMEOUT);
}
__set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
diff --git a/drivers/char/ip2main.c b/drivers/char/ip2main.c
index fa3ca6eb5d53..e0cbd7552eb8 100644
--- a/drivers/char/ip2main.c
+++ b/drivers/char/ip2main.c
@@ -1632,8 +1632,7 @@ ip2_close( PTTY tty, struct file *pFile )
if (pCh->wopen) {
if (pCh->ClosingDelay) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(pCh->ClosingDelay);
+ msleep_interruptible(jiffies_to_msecs(pCh->ClosingDelay));
}
wake_up_interruptible(&pCh->open_wait);
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 835d6a431a00..6a943d41093c 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2283,6 +2283,7 @@ void __exit cleanup_one_si(struct smi_info *to_clean)
interface is in a clean state. */
while ((to_clean->curr_msg) || (to_clean->si_state != SI_NORMAL)) {
poll(to_clean);
+ set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
}
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index b454bf2084bf..eb8c18883a9d 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1124,11 +1124,10 @@ static void isicom_close(struct tty_struct * tty, struct file * filp)
port->tty = NULL;
if (port->blocked_open) {
if (port->close_delay) {
- set_current_state(TASK_INTERRUPTIBLE);
#ifdef ISICOM_DEBUG
printk(KERN_DEBUG "ISICOM: scheduling until time out.\n");
#endif
- schedule_timeout(port->close_delay);
+ msleep_interruptible(jiffies_to_msecs(port->close_delay));
}
wake_up_interruptible(&port->open_wait);
}
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 9460f6d0bf97..832806725c1e 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -691,7 +691,6 @@ static int stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, i
static int stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait);
static int stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *filp);
static void stli_dohangup(void *arg);
-static void stli_delay(int len);
static int stli_setport(stliport_t *portp);
static int stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
static void stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
@@ -1180,7 +1179,7 @@ static void stli_close(struct tty_struct *tty, struct file *filp)
if (portp->openwaitcnt) {
if (portp->close_delay)
- stli_delay(portp->close_delay);
+ msleep_interruptible(jiffies_to_msecs(portp->close_delay));
wake_up_interruptible(&portp->open_wait);
}
@@ -1478,25 +1477,6 @@ static int stli_setport(stliport_t *portp)
/*****************************************************************************/
/*
- * Wait for a specified delay period, this is not a busy-loop. It will
- * give up the processor while waiting. Unfortunately this has some
- * rather intimate knowledge of the process management stuff.
- */
-
-static void stli_delay(int len)
-{
-#ifdef DEBUG
- printk("stli_delay(len=%d)\n", len);
-#endif
- if (len > 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(len);
- }
-}
-
-/*****************************************************************************/
-
-/*
* Possibly need to wait for carrier (DCD signal) to come high. Say
* maybe because if we are clocal then we don't need to wait...
*/
@@ -2504,7 +2484,7 @@ static void stli_waituntilsent(struct tty_struct *tty, int timeout)
while (test_bit(ST_TXBUSY, &portp->state)) {
if (signal_pending(current))
break;
- stli_delay(2);
+ msleep_interruptible(20);
if (time_after_eq(jiffies, tend))
break;
}
diff --git a/drivers/char/lcd.c b/drivers/char/lcd.c
index 64837783d180..717d812c2526 100644
--- a/drivers/char/lcd.c
+++ b/drivers/char/lcd.c
@@ -24,6 +24,7 @@
#include <linux/mc146818rtc.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -583,8 +584,7 @@ static long lcd_read(struct inode *inode, struct file *file, char *buf,
lcd_waiters++;
while (((buttons_now = (long) button_pressed()) == 0) &&
!(signal_pending(current))) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(2 * HZ);
+ msleep_interruptible(2000);
}
lcd_waiters--;
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index d45d381d444f..11373907f29f 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -625,8 +625,7 @@ static void moxa_close(struct tty_struct *tty, struct file *filp)
ch->tty = NULL;
if (ch->blocked_open) {
if (ch->close_delay) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(ch->close_delay);
+ msleep_interruptible(jiffies_to_msecs(ch->close_delay));
}
wake_up_interruptible(&ch->open_wait);
}
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 147c1ba2efe8..9ed493ed2220 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -59,6 +59,7 @@
#include <linux/smp_lock.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -818,8 +819,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
info->tty = NULL;
if (info->blocked_open) {
if (info->close_delay) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
}
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 5d7f2154247d..f334dbfcf21f 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2609,8 +2609,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
if (info->blocked_open) {
if (info->close_delay) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
}
@@ -2665,8 +2664,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
if (info->params.mode == MGSL_MODE_HDLC) {
while (info->tx_active) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
+ msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
@@ -2675,8 +2673,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
} else {
while ((info->tx_count || info->tx_active) &&
info->tx_enabled) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
+ msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
@@ -4129,8 +4126,7 @@ BOOLEAN irq_test(MGSLPC_INFO *info)
end_time=100;
while(end_time-- && !info->irq_occurred) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(10));
+ msleep_interruptible(10);
}
info->testing_irq = FALSE;
diff --git a/drivers/char/pcxx.c b/drivers/char/pcxx.c
index 061237bb0c33..97fd9da9c5ef 100644
--- a/drivers/char/pcxx.c
+++ b/drivers/char/pcxx.c
@@ -538,8 +538,7 @@ static void pcxe_close(struct tty_struct * tty, struct file * filp)
info->tty = NULL;
if(info->blocked_open) {
if(info->close_delay) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
}
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 0cc02371a845..a8f443e1461c 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -330,8 +330,7 @@ int RIODelay (struct Port *PortP, int njiffies)
func_enter ();
rio_dprintk (RIO_DEBUG_DELAY, "delaying %d jiffies\n", njiffies);
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(njiffies);
+ msleep_interruptible(jiffies_to_msecs(njiffies));
func_exit();
if (signal_pending(current))
@@ -347,8 +346,7 @@ int RIODelay_ni (struct Port *PortP, int njiffies)
func_enter ();
rio_dprintk (RIO_DEBUG_DELAY, "delaying %d jiffies (ni)\n", njiffies);
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(njiffies);
+ msleep(jiffies_to_msecs(njiffies));
func_exit();
return !RIO_FAIL;
}
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index a35ea03a3227..616ecc65a06b 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -45,6 +45,7 @@
#include <linux/fcntl.h>
#include <linux/major.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
@@ -1114,8 +1115,7 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
*/
timeout = jiffies+HZ;
while(port->IER & IER_TXEMPTY) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(port->timeout);
+ msleep_interruptible(jiffies_to_msecs(port->timeout));
if (time_after(jiffies, timeout))
break;
}
@@ -1130,8 +1130,7 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
port->tty = NULL;
if (port->blocked_open) {
if (port->close_delay) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(port->close_delay);
+ msleep_interruptible(jiffies_to_msecs(port->close_delay));
}
wake_up_interruptible(&port->open_wait);
}
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 72af1bec91e5..9f13fad3fc11 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -1112,8 +1112,7 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
if (info->blocked_open) {
if (info->close_delay) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
} else {
@@ -1538,8 +1537,7 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
printk(KERN_INFO "txcnt = %d (jiff=%lu,check=%d)...", txcnt, jiffies, check_time);
#endif
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(check_time);
+ msleep_interruptible(jiffies_to_msecs(check_time));
if (signal_pending(current))
break;
}
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 885621edb4a6..b2538a6b85b0 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -1840,8 +1840,7 @@ cy_close(struct tty_struct * tty, struct file * filp)
info->tty = 0;
if (info->blocked_open) {
if (info->close_delay) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
}
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index bf82f06d4011..568edc38313c 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -1452,8 +1452,7 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
*/
timeout = jiffies+HZ;
while(port->IER & IER_TXEMPTY) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(port->timeout);
+ msleep_interruptible(jiffies_to_msecs(port->timeout));
if (time_after(jiffies, timeout)) {
printk (KERN_INFO "Timeout waiting for close\n");
break;
@@ -1470,8 +1469,7 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
port->tty = NULL;
if (port->blocked_open) {
if (port->close_delay) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(port->close_delay);
+ msleep_interruptible(jiffies_to_msecs(port->close_delay));
}
wake_up_interruptible(&port->open_wait);
}
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 90018ce17c57..38a7e846b813 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -42,6 +42,7 @@
#include <linux/smp_lock.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/device.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -512,7 +513,6 @@ static int stl_clrportstats(stlport_t *portp, comstats_t __user *cp);
static int stl_getportstruct(stlport_t __user *arg);
static int stl_getbrdstruct(stlbrd_t __user *arg);
static int stl_waitcarrier(stlport_t *portp, struct file *filp);
-static void stl_delay(int len);
static void stl_eiointr(stlbrd_t *brdp);
static void stl_echatintr(stlbrd_t *brdp);
static void stl_echmcaintr(stlbrd_t *brdp);
@@ -1204,7 +1204,7 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
if (portp->openwaitcnt) {
if (portp->close_delay)
- stl_delay(portp->close_delay);
+ msleep_interruptible(jiffies_to_msecs(portp->close_delay));
wake_up_interruptible(&portp->open_wait);
}
@@ -1216,25 +1216,6 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
/*****************************************************************************/
/*
- * Wait for a specified delay period, this is not a busy-loop. It will
- * give up the processor while waiting. Unfortunately this has some
- * rather intimate knowledge of the process management stuff.
- */
-
-static void stl_delay(int len)
-{
-#ifdef DEBUG
- printk("stl_delay(len=%d)\n", len);
-#endif
- if (len > 0) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(len);
- }
-}
-
-/*****************************************************************************/
-
-/*
* Write routine. Take data and stuff it in to the TX ring queue.
* If transmit interrupts are not running then start them.
*/
@@ -1854,7 +1835,7 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout)
while (stl_datastate(portp)) {
if (signal_pending(current))
break;
- stl_delay(2);
+ msleep_interruptible(20);
if (time_after_eq(jiffies, tend))
break;
}
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index a990eb7aa6b5..3f95c4c6dff6 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -1499,7 +1499,7 @@ static void sx_close (void *ptr)
sx_send_command (port, HS_CLOSE, 0, 0);
while (to-- && (sx_read_channel_byte (port, hi_hstat) != HS_IDLE_CLOSED)) {
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout (1);
if (signal_pending (current))
break;
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 0a1cc1d3630f..edf89fffe4e1 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -82,6 +82,7 @@
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/netdevice.h>
@@ -3259,8 +3260,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
if (info->blocked_open) {
if (info->close_delay) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
}
@@ -3326,8 +3326,7 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
if ( info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW ) {
while (info->tx_active) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
+ msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
@@ -3336,8 +3335,7 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
} else {
while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
info->tx_enabled) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
+ msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
@@ -7200,8 +7198,7 @@ BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
EndTime=100;
while( EndTime-- && !info->irq_occurred ) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(10));
+ msleep_interruptible(10);
}
spin_lock_irqsave(&info->irq_spinlock,flags);
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 5648ed35562f..9ccce79fdf4e 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -878,8 +878,7 @@ static void close(struct tty_struct *tty, struct file *filp)
if (info->blocked_open) {
if (info->close_delay) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(info->close_delay);
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
}
wake_up_interruptible(&info->open_wait);
}
@@ -1164,8 +1163,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
if ( info->params.mode == MGSL_MODE_HDLC ) {
while (info->tx_active) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
+ msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
@@ -1175,8 +1173,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
//TODO: determine if there is something similar to USC16C32
// TXSTATUS_ALL_SENT status
while ( info->tx_active && info->tx_enabled) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
+ msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
@@ -5209,8 +5206,7 @@ int irq_test(SLMP_INFO *info)
timeout=100;
while( timeout-- && !info->irq_occurred ) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(10));
+ msleep_interruptible(10);
}
spin_lock_irqsave(&info->lock,flags);
@@ -5360,8 +5356,7 @@ int loopback_test(SLMP_INFO *info)
/* wait for receive complete */
/* Set a timeout for waiting for interrupt. */
for ( timeout = 100; timeout; --timeout ) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(10));
+ msleep_interruptible(10);
if (rx_get_frame(info)) {
rc = TRUE;
diff --git a/drivers/char/tpqic02.c b/drivers/char/tpqic02.c
index 66881e953399..d812253b4473 100644
--- a/drivers/char/tpqic02.c
+++ b/drivers/char/tpqic02.c
@@ -554,10 +554,9 @@ static int wait_for_ready(time_t timeout)
/* not ready and no exception && timeout not expired yet */
while (((stat = inb_p(QIC02_STAT_PORT) & QIC02_STAT_MASK) == QIC02_STAT_MASK) && time_before(jiffies, spin_t)) {
/* be `nice` to other processes on long operations... */
- current->state = TASK_INTERRUPTIBLE;
/* nap 0.30 sec between checks, */
/* but could be woken up earlier by signals... */
- schedule_timeout(3 * HZ / 10);
+ msleep_interruptible(300);
}
/* don't use jiffies for this test because it may have changed by now */
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 89457cc2ed72..9d5e4ba9dc25 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1981,10 +1981,10 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
static int tioccons(struct file *file)
{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
if (file->f_op->write == redirected_tty_write) {
struct file *f;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
spin_lock(&redirect_lock);
f = redirect;
redirect = NULL;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 31da37da5f40..cffa77b92629 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3077,6 +3077,10 @@ int con_font_get(int currcons, struct console_font_op *op)
if (rc)
goto out;
+ op->height = font.height;
+ op->width = font.width;
+ op->charcount = font.charcount;
+
if (op->data && copy_to_user(op->data, font.data, c))
rc = -EFAULT;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index cc6b66e34e37..895f245fcced 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1932,6 +1932,8 @@ static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq)
info->dma = drive->using_dma ? 1 : 0;
info->cmd = WRITE;
+ info->devinfo.media_written = 1;
+
/* Start sending the write request to the drive. */
return cdrom_start_packet_command(drive, 32768, cdrom_start_write_cont);
}
@@ -1999,7 +2001,7 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
}
CDROM_CONFIG_FLAGS(drive)->seeking = 0;
}
- if (IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) {
+ if ((rq_data_dir(rq) == READ) && IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) {
action = cdrom_start_seek(drive, block);
} else {
if (rq_data_dir(rq) == READ)
@@ -2960,8 +2962,10 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
CDROM_CONFIG_FLAGS(drive)->no_eject = 0;
if (cap.cd_r_write)
CDROM_CONFIG_FLAGS(drive)->cd_r = 1;
- if (cap.cd_rw_write)
+ if (cap.cd_rw_write) {
CDROM_CONFIG_FLAGS(drive)->cd_rw = 1;
+ CDROM_CONFIG_FLAGS(drive)->ram = 1;
+ }
if (cap.test_write)
CDROM_CONFIG_FLAGS(drive)->test_write = 1;
if (cap.dvd_ram_read || cap.dvd_r_read || cap.dvd_rom)
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index cc9c999f9a8b..2a9ff6f0229b 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -70,8 +70,7 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
if (!ret)
break;
- set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout (HZ/3))
+ if (msleep_interruptible(334))
return -EINTR;
}
@@ -1496,7 +1495,7 @@ static int nodemgr_host_thread(void *__hi)
* to make sure things settle down. */
for (i = 0; i < 4 ; i++) {
set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(HZ/16)) {
+ if (msleep_interruptible(63)) {
up(&nodemgr_serialize);
goto caught_signal;
}
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 47de79f21797..aa9ab3378e4c 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -357,8 +357,7 @@ static int sbp2util_down_timeout(atomic_t *done, int timeout)
int i;
for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(HZ/10)) /* 100ms */
+ if (msleep_interruptible(100)) /* 100ms */
return(1);
}
return ((i > 0) ? 0:1);
@@ -1088,7 +1087,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
SBP2_DEBUG("sbp2_query_logins: query_response_hi/lo initialized");
- scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(QUERY_LOGINS_REQUEST);
+ scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
@@ -1199,7 +1198,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
SBP2_DEBUG("sbp2_login_device: login_response_hi/lo initialized");
- scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(LOGIN_REQUEST);
+ scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */
scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
@@ -1325,7 +1324,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
scsi_id->logout_orb->reserved3 = 0x0;
scsi_id->logout_orb->reserved4 = 0x0;
- scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(LOGOUT_REQUEST);
+ scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_LOGOUT_REQUEST);
scsi_id->logout_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
/* Notify us when complete */
@@ -1390,7 +1389,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
scsi_id->reconnect_orb->reserved3 = 0x0;
scsi_id->reconnect_orb->reserved4 = 0x0;
- scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(RECONNECT_REQUEST);
+ scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(SBP2_RECONNECT_REQUEST);
scsi_id->reconnect_orb->login_ID_misc |=
ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index d12f8e160a80..94b59f80bf90 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -52,15 +52,15 @@ struct sbp2_command_orb {
u8 cdb[12];
};
-#define LOGIN_REQUEST 0x0
-#define QUERY_LOGINS_REQUEST 0x1
-#define RECONNECT_REQUEST 0x3
-#define SET_PASSWORD_REQUEST 0x4
-#define LOGOUT_REQUEST 0x7
-#define ABORT_TASK_REQUEST 0xb
-#define ABORT_TASK_SET 0xc
-#define LOGICAL_UNIT_RESET 0xe
-#define TARGET_RESET_REQUEST 0xf
+#define SBP2_LOGIN_REQUEST 0x0
+#define SBP2_QUERY_LOGINS_REQUEST 0x1
+#define SBP2_RECONNECT_REQUEST 0x3
+#define SBP2_SET_PASSWORD_REQUEST 0x4
+#define SBP2_LOGOUT_REQUEST 0x7
+#define SBP2_ABORT_TASK_REQUEST 0xb
+#define SBP2_ABORT_TASK_SET 0xc
+#define SBP2_LOGICAL_UNIT_RESET 0xe
+#define SBP2_TARGET_RESET_REQUEST 0xf
#define ORB_SET_LUN(value) (value & 0xffff)
#define ORB_SET_FUNCTION(value) ((value & 0xf) << 16)
diff --git a/drivers/isdn/act2000/act2000_isa.c b/drivers/isdn/act2000/act2000_isa.c
index b363d0976465..df7923c5b843 100644
--- a/drivers/isdn/act2000/act2000_isa.c
+++ b/drivers/isdn/act2000/act2000_isa.c
@@ -18,13 +18,6 @@
static act2000_card *irq2card_map[16];
-static void
-act2000_isa_delay(long t)
-{
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(t);
-}
-
/*
* Reset Controller, then try to read the Card's signature.
+ Return:
@@ -419,7 +412,7 @@ act2000_isa_download(act2000_card * card, act2000_ddef __user * cb)
if (!act2000_isa_reset(card->port))
return -ENXIO;
- act2000_isa_delay(HZ / 2);
+ msleep_interruptible(500);
if(copy_from_user(&cblock, cb, sizeof(cblock)))
return -EFAULT;
length = cblock.length;
@@ -452,6 +445,6 @@ act2000_isa_download(act2000_card * card, act2000_ddef __user * cb)
p += l;
}
kfree(buf);
- act2000_isa_delay(HZ / 2);
+ msleep_interruptible(500);
return (act2000_isa_getid(card));
}
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 8d58cfd45b0e..8cd4dde06a81 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -24,6 +24,7 @@
#include <linux/capi.h>
#include <linux/kernelcapi.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
@@ -831,8 +832,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
while (card->cardstate != CARD_RUNNING) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/10); /* 0.1 sec */
+ msleep_interruptible(100); /* 0.1 sec */
if (signal_pending(current)) {
capi_ctr_put(card);
@@ -856,8 +856,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
while (card->cardstate > CARD_DETECTED) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/10); /* 0.1 sec */
+ msleep_interruptible(100); /* 0.1 sec */
if (signal_pending(current))
return -EINTR;
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 26bb71d96b38..15b80c844205 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -843,9 +843,8 @@ static int init_card(struct IsdnCardState *cs)
}
while (cnt) {
cs->cardmsg(cs, CARD_INIT, NULL);
- set_current_state(TASK_UNINTERRUPTIBLE);
/* Timeout 10ms */
- schedule_timeout((10 * HZ) / 1000);
+ msleep(10);
printk(KERN_INFO "%s: IRQ %d count %d\n",
CardType[cs->typ], cs->irq, kstat_irqs(cs->irq));
if (kstat_irqs(cs->irq) == irq_cnt) {
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 3b526d704fa6..21271465bd2f 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -691,8 +691,7 @@ Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg)
byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
byteout(cs->hw.elsa.timer, 0);
spin_unlock_irqrestore(&cs->lock, flags);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout((110*HZ)/1000);
+ msleep(110);
spin_lock_irqsave(&cs->lock, flags);
cs->hw.elsa.ctrl_reg &= ~ELSA_ENA_TIMER_INT;
byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 3946536d112b..c2db52696a86 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1619,8 +1619,7 @@ hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
inithfcpci(cs);
reset_hfcpci(cs);
spin_unlock_irqrestore(&cs->lock, flags);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout((80 * HZ) / 1000); /* Timeout 80ms */
+ msleep(80); /* Timeout 80ms */
/* now switch timer interrupt off */
spin_lock_irqsave(&cs->lock, flags);
cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 07a07aab792b..685fcc2d7256 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -314,8 +314,7 @@ release_io_hfcsx(struct IsdnCardState *cs)
cs->hw.hfcsx.int_m2 = 0; /* interrupt output off ! */
Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2);
Write_hfc(cs, HFCSX_CIRM, HFCSX_RESET); /* Reset On */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout((30 * HZ) / 1000); /* Timeout 30ms */
+ msleep(30); /* Timeout 30ms */
Write_hfc(cs, HFCSX_CIRM, 0); /* Reset Off */
del_timer(&cs->hw.hfcsx.timer);
release_region(cs->hw.hfcsx.base, 2); /* release IO-Block */
@@ -1367,8 +1366,7 @@ hfcsx_card_msg(struct IsdnCardState *cs, int mt, void *arg)
spin_lock_irqsave(&cs->lock, flags);
inithfcsx(cs);
spin_unlock_irqrestore(&cs->lock, flags);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout((80 * HZ) / 1000); /* Timeout 80ms */
+ msleep(80); /* Timeout 80ms */
/* now switch timer interrupt off */
spin_lock_irqsave(&cs->lock, flags);
cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER;
diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c
index ea8da997039f..6fc55fea1702 100644
--- a/drivers/isdn/hisax/hfcscard.c
+++ b/drivers/isdn/hisax/hfcscard.c
@@ -125,8 +125,7 @@ hfcs_card_msg(struct IsdnCardState *cs, int mt, void *arg)
init2bds0(cs);
spin_unlock_irqrestore(&cs->lock, flags);
delay = (80*HZ)/1000 +1;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout((80*HZ)/1000);
+ msleep(80);
spin_lock_irqsave(&cs->lock, flags);
cs->hw.hfcD.ctmt |= HFCD_TIM800;
cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt);
diff --git a/drivers/isdn/hysdn/boardergo.c b/drivers/isdn/hysdn/boardergo.c
index 2f2731520d40..e19a01a305a9 100644
--- a/drivers/isdn/hysdn/boardergo.c
+++ b/drivers/isdn/hysdn/boardergo.c
@@ -21,6 +21,7 @@
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include "hysdn_defs.h"
@@ -246,8 +247,7 @@ ergo_writebootimg(struct HYSDN_CARD *card, uchar * buf, ulong offs)
/* the interrupts are still masked */
sti();
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout((20 * HZ) / 1000); /* Timeout 20ms */
+ msleep_interruptible(20); /* Timeout 20ms */
if (((tDpramBootSpooler *) card->dpram)->Len != DPRAM_SPOOLER_DATA_SIZE) {
if (card->debug_flags & LOG_POF_CARD)
@@ -386,8 +386,7 @@ ergo_waitpofready(struct HYSDN_CARD *card)
return (0); /* success */
} /* data has arrived */
sti();
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout((50 * HZ) / 1000); /* Timeout 50ms */
+ msleep_interruptible(50); /* Timeout 50ms */
} /* wait until timeout */
if (card->debug_flags & LOG_POF_CARD)
diff --git a/drivers/isdn/hysdn/hysdn_sched.c b/drivers/isdn/hysdn/hysdn_sched.c
index 1a2015ecfda5..4fa3b01707cd 100644
--- a/drivers/isdn/hysdn/hysdn_sched.c
+++ b/drivers/isdn/hysdn/hysdn_sched.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include "hysdn_defs.h"
@@ -160,8 +161,7 @@ hysdn_tx_cfgline(hysdn_card * card, uchar * line, word chan)
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg delayed");
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout((20 * HZ) / 1000); /* Timeout 20ms */
+ msleep_interruptible(20); /* Timeout 20ms */
if (!--cnt) {
restore_flags(flags);
return (-ERR_ASYNC_TIME); /* timed out */
@@ -190,8 +190,7 @@ hysdn_tx_cfgline(hysdn_card * card, uchar * line, word chan)
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg waiting for tx-ready");
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout((20 * HZ) / 1000); /* Timeout 20ms */
+ msleep_interruptible(20); /* Timeout 20ms */
if (!--cnt) {
restore_flags(flags);
return (-ERR_ASYNC_TIME); /* timed out */
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 921c3c2ab97a..6b6d839930f1 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -13,6 +13,7 @@
#include <linux/config.h>
#include <linux/isdn.h>
+#include <linux/delay.h>
#include "isdn_common.h"
#include "isdn_tty.h"
#ifdef CONFIG_ISDN_AUDIO
@@ -1748,8 +1749,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
tty->closing = 0;
module_put(info->owner);
if (info->blocked_open) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/2);
+ msleep_interruptible(500);
wake_up_interruptible(&info->open_wait);
}
info->flags &= ~(ISDN_ASYNC_NORMAL_ACTIVE | ISDN_ASYNC_CLOSING);
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 0c256d698686..70989aa4c157 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -762,8 +762,7 @@ icn_check_loader(int cardnumber)
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Loader %d TO?\n", cardnumber);
#endif
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(ICN_BOOT_TIMEOUT1);
+ msleep_interruptible(ICN_BOOT_TIMEOUT1);
} else {
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Loader %d OK\n", cardnumber);
@@ -788,8 +787,7 @@ icn_check_loader(int cardnumber)
int slsec = sec; \
printk(KERN_DEBUG "SLEEP(%d)\n",slsec); \
while (slsec) { \
- current->state = TASK_INTERRUPTIBLE; \
- schedule_timeout(HZ); \
+ msleep_interruptible(1000); \
slsec--; \
} \
}
@@ -950,7 +948,7 @@ icn_loadproto(u_char __user * buffer, icn_card * card)
icn_maprelease_channel(card, 0);
return -EIO;
}
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(10);
}
}
@@ -974,8 +972,7 @@ icn_loadproto(u_char __user * buffer, icn_card * card)
#ifdef BOOT_DEBUG
printk(KERN_DEBUG "Proto TO?\n");
#endif
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(ICN_BOOT_TIMEOUT1);
+ msleep_interruptible(ICN_BOOT_TIMEOUT1);
} else {
if ((card->secondhalf) || (!card->doubleS0)) {
#ifdef BOOT_DEBUG
@@ -1271,9 +1268,9 @@ icn_command(isdn_ctrl * c, icn_card * card)
if (!card->leased) {
card->leased = 1;
while (card->ptype == ISDN_PTYPE_UNKNOWN) {
- schedule_timeout(ICN_BOOT_TIMEOUT1);
+ msleep_interruptible(ICN_BOOT_TIMEOUT1);
}
- schedule_timeout(ICN_BOOT_TIMEOUT1);
+ msleep_interruptible(ICN_BOOT_TIMEOUT1);
sprintf(cbuf, "00;FV2ON\n01;EAZ%c\n02;EAZ%c\n",
(a & 1)?'1':'C', (a & 2)?'2':'C');
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
diff --git a/drivers/isdn/icn/icn.h b/drivers/isdn/icn/icn.h
index 4ab15c917ed4..28548318a389 100644
--- a/drivers/isdn/icn/icn.h
+++ b/drivers/isdn/icn/icn.h
@@ -70,8 +70,7 @@ typedef struct icn_cdef {
#define ICN_FLAGS_RUNNING 4 /* Cards driver activated */
#define ICN_FLAGS_RBTIMER 8 /* cyclic scheduling of B-Channel-poll */
-#define ICN_BOOT_TIMEOUT1 (HZ) /* Delay for Boot-download (jiffies) */
-#define ICN_CHANLOCK_DELAY (HZ/10) /* Delay for Channel-mapping (jiffies) */
+#define ICN_BOOT_TIMEOUT1 1000 /* Delay for Boot-download (msecs) */
#define ICN_TIMER_BCREAD (HZ/100) /* B-Channel poll-cycle */
#define ICN_TIMER_DCREAD (HZ/2) /* D-Channel poll-cycle */
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 544f41b413ed..7f17ab1ac7ee 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/sched.h>
#include "isdnloop.h"
static char *revision = "$Revision: 1.11.6.7 $";
@@ -1161,8 +1162,10 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
if (!card->leased) {
card->leased = 1;
while (card->ptype == ISDN_PTYPE_UNKNOWN) {
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(10);
}
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(10);
sprintf(cbuf, "00;FV2ON\n01;EAZ1\n02;EAZ2\n");
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
diff --git a/drivers/isdn/sc/card.h b/drivers/isdn/sc/card.h
index bdb27fae6f20..8e44928cdf1c 100644
--- a/drivers/isdn/sc/card.h
+++ b/drivers/isdn/sc/card.h
@@ -24,24 +24,25 @@
* We need these if they're not already included
*/
#include <linux/timer.h>
+#include <linux/time.h>
#include <linux/isdnif.h>
#include "message.h"
/*
* Amount of time to wait for a reset to complete
*/
-#define CHECKRESET_TIME milliseconds(4000)
+#define CHECKRESET_TIME msecs_to_jiffies(4000)
/*
* Amount of time between line status checks
*/
-#define CHECKSTAT_TIME milliseconds(8000)
+#define CHECKSTAT_TIME msecs_to_jiffies(8000)
/*
* The maximum amount of time to wait for a message response
* to arrive. Use exclusively by send_and_receive
*/
-#define SAR_TIMEOUT milliseconds(10000)
+#define SAR_TIMEOUT msecs_to_jiffies(10000)
/*
* Macro to determine is a card id is valid
diff --git a/drivers/isdn/sc/hardware.h b/drivers/isdn/sc/hardware.h
index adde8fb14665..9e6d5302bf8e 100644
--- a/drivers/isdn/sc/hardware.h
+++ b/drivers/isdn/sc/hardware.h
@@ -104,9 +104,6 @@
* Some handy macros
*/
-/* Return the number of jiffies in a given number of msecs */
-#define milliseconds(x) (((x)*HZ)/1000)
-
/* Determine if a channel number is valid for the adapter */
#define IS_VALID_CHANNEL(y,x) ((x>0) && (x <= sc_adapter[y]->channels))
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index 930845cff800..a10c6af42a97 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include "includes.h"
#include "hardware.h"
#include "card.h"
@@ -167,8 +168,7 @@ static int __init sc_init(void)
if(do_reset) {
pr_debug("Doing a SAFE probe reset\n");
outb(0xFF, io[b] + RESET_OFFSET);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(milliseconds(10000));
+ msleep_interruptible(10000);
}
pr_debug("RAM Base for board %d is 0x%x, %s probe\n", b, ram[b],
ram[b] == 0 ? "will" : "won't");
@@ -500,8 +500,7 @@ int identify_board(unsigned long rambase, unsigned int iobase)
* Try to identify a PRI card
*/
outb(PRI_BASEPG_VAL, pgport);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ msleep_interruptible(1000);
sig = readl(rambase + SIG_OFFSET);
pr_debug("Looking for a signature, got 0x%x\n", sig);
if(sig == SIGNATURE)
@@ -511,8 +510,7 @@ int identify_board(unsigned long rambase, unsigned int iobase)
* Try to identify a PRI card
*/
outb(BRI_BASEPG_VAL, pgport);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ msleep_interruptible(1000);
sig = readl(rambase + SIG_OFFSET);
pr_debug("Looking for a signature, got 0x%x\n", sig);
if(sig == SIGNATURE)
diff --git a/drivers/isdn/tpam/tpam.h b/drivers/isdn/tpam/tpam.h
index c1456d4b6170..da3319453cd0 100644
--- a/drivers/isdn/tpam/tpam.h
+++ b/drivers/isdn/tpam/tpam.h
@@ -14,6 +14,8 @@
#ifndef _TPAM_PRIV_H_
#define _TPAM_PRIV_H_
+//#define DEBUG /* uncomment if you want debugging output */
+#include <linux/kernel.h>
#include <linux/isdnif.h>
#include <linux/init.h>
#include <linux/workqueue.h>
@@ -224,13 +226,4 @@ extern void hdlc_encode_modem(u8 *, u32, u8 *, u32 *);
extern void hdlc_no_accm_encode(u8 *, u32, u8 *, u32 *);
extern u32 hdlc_no_accm_decode(u8 *, u32);
-/* Define this to enable debug tracing prints */
-#undef DEBUG
-
-#ifdef DEBUG
-#define dprintk printk
-#else
-#define dprintk while(0) printk
-#endif
-
#endif /* _TPAM_H_ */
diff --git a/drivers/isdn/tpam/tpam_commands.c b/drivers/isdn/tpam/tpam_commands.c
index 9cf6ef14fb1d..60f75184a320 100644
--- a/drivers/isdn/tpam/tpam_commands.c
+++ b/drivers/isdn/tpam/tpam_commands.c
@@ -45,7 +45,7 @@ int tpam_command(isdn_ctrl *c) {
tpam_card *card;
unsigned long argp;
- dprintk("TurboPAM(tpam_command) card=%d, command=%d\n",
+ pr_debug("TurboPAM(tpam_command) card=%d, command=%d\n",
c->driver, c->command);
/* search for the board */
@@ -75,7 +75,7 @@ int tpam_command(isdn_ctrl *c) {
return tpam_command_ioctl_loopmode(card,
0);
default:
- dprintk("TurboPAM(tpam_command): "
+ pr_debug("TurboPAM(tpam_command): "
"invalid tpam ioctl %ld\n",
c->arg);
return -EINVAL;
@@ -95,7 +95,7 @@ int tpam_command(isdn_ctrl *c) {
case ISDN_CMD_PROCEED:
return tpam_command_proceed(card, c->arg);
default:
- dprintk("TurboPAM(tpam_command): "
+ pr_debug("TurboPAM(tpam_command): "
"unknown or unused isdn ioctl %d\n",
c->command);
return -EINVAL;
@@ -117,7 +117,7 @@ int tpam_command(isdn_ctrl *c) {
static int tpam_command_ioctl_dspload(tpam_card *card, u32 arg) {
tpam_dsp_ioctl tdl;
- dprintk("TurboPAM(tpam_command_ioctl_dspload): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_command_ioctl_dspload): card=%d\n", card->id);
/* get the IOCTL parameter from userspace */
if (copy_from_user(&tdl, (void __user *)arg, sizeof(tpam_dsp_ioctl)))
@@ -147,7 +147,7 @@ static int tpam_command_ioctl_dspload(tpam_card *card, u32 arg) {
static int tpam_command_ioctl_dspsave(tpam_card *card, u32 arg) {
tpam_dsp_ioctl tdl;
- dprintk("TurboPAM(tpam_command_ioctl_dspsave): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_command_ioctl_dspsave): card=%d\n", card->id);
/* get the IOCTL parameter from userspace */
if (copy_from_user(&tdl, (void __user *)arg, sizeof(tpam_dsp_ioctl)))
@@ -178,7 +178,7 @@ static int tpam_command_ioctl_dsprun(tpam_card *card) {
isdn_ctrl ctrl;
struct sk_buff *skb;
- dprintk("TurboPAM(tpam_command_ioctl_dsprun): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_command_ioctl_dsprun): card=%d\n", card->id);
/* board must _not_ be running */
if (card->running)
@@ -297,7 +297,7 @@ static int tpam_command_dial(tpam_card *card, u32 channel, u8 *phone) {
struct sk_buff *skb;
isdn_ctrl ctrl;
- dprintk("TurboPAM(tpam_command_dial): card=%d, channel=%lu, phone=%s\n",
+ pr_debug("TurboPAM(tpam_command_dial): card=%d, channel=%lu, phone=%s\n",
card->id, (unsigned long)channel, phone);
/* board must be running */
@@ -341,7 +341,7 @@ static int tpam_command_dial(tpam_card *card, u32 channel, u8 *phone) {
*/
static int tpam_command_setl2(tpam_card *card, u32 channel, u8 proto) {
- dprintk("TurboPAM(tpam_command_setl2): card=%d, channel=%lu, proto=%d\n",
+ pr_debug("TurboPAM(tpam_command_setl2): card=%d, channel=%lu, proto=%d\n",
card->id, (unsigned long)channel, proto);
/* board must be running */
@@ -376,7 +376,7 @@ static int tpam_command_acceptd(tpam_card *card, u32 channel) {
isdn_ctrl ctrl;
struct sk_buff *skb;
- dprintk("TurboPAM(tpam_command_acceptd): card=%d, channel=%lu\n",
+ pr_debug("TurboPAM(tpam_command_acceptd): card=%d, channel=%lu\n",
card->id, (unsigned long)channel);
/* board must be running */
@@ -410,7 +410,7 @@ static int tpam_command_acceptd(tpam_card *card, u32 channel) {
static int tpam_command_acceptb(tpam_card *card, u32 channel) {
isdn_ctrl ctrl;
- dprintk("TurboPAM(tpam_command_acceptb): card=%d, channel=%lu\n",
+ pr_debug("TurboPAM(tpam_command_acceptb): card=%d, channel=%lu\n",
card->id, (unsigned long)channel);
/* board must be running */
@@ -437,7 +437,7 @@ static int tpam_command_acceptb(tpam_card *card, u32 channel) {
static int tpam_command_hangup(tpam_card *card, u32 channel) {
struct sk_buff *skb;
- dprintk("TurboPAM(tpam_command_hangup): card=%d, channel=%lu\n",
+ pr_debug("TurboPAM(tpam_command_hangup): card=%d, channel=%lu\n",
card->id, (unsigned long)channel);
/* board must be running */
@@ -464,7 +464,7 @@ static int tpam_command_hangup(tpam_card *card, u32 channel) {
static int tpam_command_proceed(tpam_card *card, u32 channel) {
struct sk_buff *skb;
- dprintk("TurboPAM(tpam_command_proceed): card=%d, channel=%lu\n",
+ pr_debug("TurboPAM(tpam_command_proceed): card=%d, channel=%lu\n",
card->id, (unsigned long)channel);
/* board must be running */
@@ -496,7 +496,7 @@ int tpam_writebuf_skb(int driverId, int channel, int ack, struct sk_buff *skb) {
void *finaldata;
u32 finallen;
- dprintk("TurboPAM(tpam_writebuf_skb): "
+ pr_debug("TurboPAM(tpam_writebuf_skb): "
"card=%d, channel=%ld, ack=%d, data size=%d\n",
driverId, (unsigned long)channel, ack, skb->len);
@@ -569,7 +569,7 @@ void tpam_recv_ACreateNCOCnf(tpam_card *card, struct sk_buff *skb) {
u8 status;
u32 channel;
- dprintk("TurboPAM(tpam_recv_ACreateNCOCnf): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_recv_ACreateNCOCnf): card=%d\n", card->id);
/* parse the message contents */
if (parse_ACreateNCOCnf(skb, &status, &ncoid))
@@ -614,7 +614,7 @@ void tpam_recv_ADestroyNCOCnf(tpam_card *card, struct sk_buff *skb) {
u8 status;
u32 channel;
- dprintk("TurboPAM(tpam_recv_ADestroyNCOCnf): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_recv_ADestroyNCOCnf): card=%d\n", card->id);
/* parse the message contents */
if (parse_ADestroyNCOCnf(skb, &status, &ncoid))
@@ -647,7 +647,7 @@ void tpam_recv_CConnectCnf(tpam_card *card, struct sk_buff *skb) {
u32 channel;
isdn_ctrl ctrl;
- dprintk("TurboPAM(tpam_recv_CConnectCnf): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_recv_CConnectCnf): card=%d\n", card->id);
/* parse the message contents */
if (parse_CConnectCnf(skb, &ncoid))
@@ -685,7 +685,7 @@ void tpam_recv_CConnectInd(tpam_card *card, struct sk_buff *skb) {
isdn_ctrl ctrl;
int status;
- dprintk("TurboPAM(tpam_recv_CConnectInd): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_recv_CConnectInd): card=%d\n", card->id);
/* parse the message contents */
if (parse_CConnectInd(skb, &ncoid, &hdlc, calling, called, &plan, &screen))
@@ -720,13 +720,13 @@ void tpam_recv_CConnectInd(tpam_card *card, struct sk_buff *skb) {
case 4:
/* call accepted, link layer will send us a ACCEPTD
* command later */
- dprintk("TurboPAM(tpam_recv_CConnectInd): "
+ pr_debug("TurboPAM(tpam_recv_CConnectInd): "
"card=%d, channel=%d, icall waiting, status=%d\n",
card->id, channel, status);
break;
default:
/* call denied, we build and send a CDisconnectReq */
- dprintk("TurboPAM(tpam_recv_CConnectInd): "
+ pr_debug("TurboPAM(tpam_recv_CConnectInd): "
"card=%d, channel=%d, icall denied, status=%d\n",
card->id, channel, status);
skb = build_CDisconnectReq(ncoid);
@@ -749,7 +749,7 @@ void tpam_recv_CDisconnectInd(tpam_card *card, struct sk_buff *skb) {
u32 cause;
isdn_ctrl ctrl;
- dprintk("TurboPAM(tpam_recv_CDisconnectInd): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_recv_CDisconnectInd): card=%d\n", card->id);
/* parse the message contents */
if (parse_CDisconnectInd(skb, &ncoid, &cause))
@@ -794,7 +794,7 @@ void tpam_recv_CDisconnectCnf(tpam_card *card, struct sk_buff *skb) {
u32 cause;
isdn_ctrl ctrl;
- dprintk("TurboPAM(tpam_recv_CDisconnectCnf): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_recv_CDisconnectCnf): card=%d\n", card->id);
/* parse the message contents */
if (parse_CDisconnectCnf(skb, &ncoid, &cause))
@@ -835,7 +835,7 @@ void tpam_recv_U3DataInd(tpam_card *card, struct sk_buff *skb) {
u16 len;
struct sk_buff *result;
- dprintk("TurboPAM(tpam_recv_U3DataInd): card=%d, datalen=%d\n",
+ pr_debug("TurboPAM(tpam_recv_U3DataInd): card=%d, datalen=%d\n",
card->id, skb->len);
/* parse the message contents */
@@ -914,7 +914,7 @@ void tpam_recv_U3ReadyToReceiveInd(tpam_card *card, struct sk_buff *skb) {
u32 channel;
u8 ready;
- dprintk("TurboPAM(tpam_recv_U3ReadyToReceiveInd): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_recv_U3ReadyToReceiveInd): card=%d\n", card->id);
/* parse the message contents */
if (parse_U3ReadyToReceiveInd(skb, &ncoid, &ready))
@@ -943,7 +943,7 @@ void tpam_recv_U3ReadyToReceiveInd(tpam_card *card, struct sk_buff *skb) {
static void tpam_statcallb_run(unsigned long parm) {
tpam_statcallb_data *ds = (tpam_statcallb_data *)parm;
- dprintk("TurboPAM(tpam_statcallb_run)\n");
+ pr_debug("TurboPAM(tpam_statcallb_run)\n");
(* ds->card->interface.statcallb)(&ds->ctrl);
@@ -961,7 +961,7 @@ static void tpam_statcallb(tpam_card *card, isdn_ctrl ctrl) {
struct timer_list *timer;
tpam_statcallb_data *ds;
- dprintk("TurboPAM(tpam_statcallb): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_statcallb): card=%d\n", card->id);
if (!(timer = (struct timer_list *) kmalloc(sizeof(struct timer_list),
GFP_ATOMIC))) {
diff --git a/drivers/isdn/tpam/tpam_nco.c b/drivers/isdn/tpam/tpam_nco.c
index 69a6bd4ba60e..de4904f35296 100644
--- a/drivers/isdn/tpam/tpam_nco.c
+++ b/drivers/isdn/tpam/tpam_nco.c
@@ -84,7 +84,7 @@ struct sk_buff *build_ACreateNCOReq(const u8 *phone) {
struct sk_buff *skb;
u8 *tlv;
- dprintk("TurboPAM(build_ACreateNCOReq): phone=%s\n", phone);
+ pr_debug("TurboPAM(build_ACreateNCOReq): phone=%s\n", phone);
/* build the NCO packet */
if (!(skb = build_NCOpacket(ID_ACreateNCOReq, 23 + strlen(phone), 0, 0, 0)))
@@ -141,7 +141,7 @@ struct sk_buff *build_ADestroyNCOReq(u32 ncoid) {
struct sk_buff *skb;
u8 *tlv;
- dprintk("TurboPAM(build_ADestroyNCOReq): ncoid=%lu\n",
+ pr_debug("TurboPAM(build_ADestroyNCOReq): ncoid=%lu\n",
(unsigned long)ncoid);
/* build the NCO packet */
@@ -170,7 +170,7 @@ struct sk_buff *build_CConnectReq(u32 ncoid, const u8 *called, u8 hdlc) {
struct sk_buff *skb;
u8 *tlv;
- dprintk("TurboPAM(build_CConnectReq): ncoid=%lu, called=%s, hdlc=%d\n",
+ pr_debug("TurboPAM(build_CConnectReq): ncoid=%lu, called=%s, hdlc=%d\n",
(unsigned long)ncoid, called, hdlc);
/* build the NCO packet */
@@ -220,7 +220,7 @@ struct sk_buff *build_CConnectRsp(u32 ncoid) {
struct sk_buff *skb;
u8 *tlv;
- dprintk("TurboPAM(build_CConnectRsp): ncoid=%lu\n",
+ pr_debug("TurboPAM(build_CConnectRsp): ncoid=%lu\n",
(unsigned long)ncoid);
/* build the NCO packet */
@@ -247,7 +247,7 @@ struct sk_buff *build_CDisconnectReq(u32 ncoid) {
struct sk_buff *skb;
u8 *tlv;
- dprintk("TurboPAM(build_CDisconnectReq): ncoid=%lu\n",
+ pr_debug("TurboPAM(build_CDisconnectReq): ncoid=%lu\n",
(unsigned long)ncoid);
/* build the NCO packet */
@@ -274,7 +274,7 @@ struct sk_buff *build_CDisconnectRsp(u32 ncoid) {
struct sk_buff *skb;
u8 *tlv;
- dprintk("TurboPAM(build_CDisconnectRsp): ncoid=%lu\n",
+ pr_debug("TurboPAM(build_CDisconnectRsp): ncoid=%lu\n",
(unsigned long)ncoid);
/* build the NCO packet */
@@ -307,7 +307,7 @@ struct sk_buff *build_U3DataReq(u32 ncoid, void *data, u16 len,
u8 *tlv;
void *p;
- dprintk("TurboPAM(build_U3DataReq): "
+ pr_debug("TurboPAM(build_U3DataReq): "
"ncoid=%lu, len=%d, ack=%d, ack_size=%d\n",
(unsigned long)ncoid, len, ack, ack_size);
@@ -397,7 +397,7 @@ int parse_ACreateNCOCnf(struct sk_buff *skb, u8 *status, u32 *ncoid) {
}
if (*status) {
- dprintk("TurboPAM(parse_ACreateNCOCnf): status=%d\n", *status);
+ pr_debug("TurboPAM(parse_ACreateNCOCnf): status=%d\n", *status);
return 0;
}
@@ -408,7 +408,7 @@ int parse_ACreateNCOCnf(struct sk_buff *skb, u8 *status, u32 *ncoid) {
return -1;
}
- dprintk("TurboPAM(parse_ACreateNCOCnf): ncoid=%lu, status=%d\n",
+ pr_debug("TurboPAM(parse_ACreateNCOCnf): ncoid=%lu, status=%d\n",
(unsigned long)*ncoid, *status);
return 0;
}
@@ -432,7 +432,7 @@ int parse_ADestroyNCOCnf(struct sk_buff *skb, u8 *status, u32 *ncoid) {
}
if (*status) {
- dprintk("TurboPAM(parse_ADestroyNCOCnf): status=%d\n", *status);
+ pr_debug("TurboPAM(parse_ADestroyNCOCnf): status=%d\n", *status);
return 0;
}
@@ -443,7 +443,7 @@ int parse_ADestroyNCOCnf(struct sk_buff *skb, u8 *status, u32 *ncoid) {
return -1;
}
- dprintk("TurboPAM(parse_ADestroyNCOCnf): ncoid=%lu, status=%d\n",
+ pr_debug("TurboPAM(parse_ADestroyNCOCnf): ncoid=%lu, status=%d\n",
(unsigned long)*ncoid, *status);
return 0;
}
@@ -464,7 +464,7 @@ int parse_CConnectCnf(struct sk_buff *skb, u32 *ncoid) {
"NCOID not found\n");
return -1;
}
- dprintk("TurboPAM(parse_CConnectCnf): ncoid=%lu\n",
+ pr_debug("TurboPAM(parse_CConnectCnf): ncoid=%lu\n",
(unsigned long)*ncoid);
return 0;
}
@@ -522,7 +522,7 @@ int parse_CConnectInd(struct sk_buff *skb, u32 *ncoid, u8 *hdlc,
}
memcpy(called, phone + 2, PHONE_MAXIMUMSIZE);
- dprintk("TurboPAM(parse_CConnectInd): "
+ pr_debug("TurboPAM(parse_CConnectInd): "
"ncoid=%lu, hdlc=%d, plan=%d, scr=%d, calling=%s, called=%s\n",
(unsigned long)*ncoid, *hdlc, *plan, *screen, calling, called);
return 0;
@@ -553,7 +553,7 @@ int parse_CDisconnectCnf(struct sk_buff *skb, u32 *ncoid, u32 *causetopuf) {
return -1;
}
- dprintk("TurboPAM(parse_CDisconnectCnf): ncoid=%lu, causetopuf=%lu\n",
+ pr_debug("TurboPAM(parse_CDisconnectCnf): ncoid=%lu, causetopuf=%lu\n",
(unsigned long)*ncoid, (unsigned long)*causetopuf);
return 0;
}
@@ -583,7 +583,7 @@ int parse_CDisconnectInd(struct sk_buff *skb, u32 *ncoid, u32 *causetopuf) {
return -1;
}
- dprintk("TurboPAM(parse_CDisconnectInd): ncoid=%lu, causetopuf=%lu\n",
+ pr_debug("TurboPAM(parse_CDisconnectInd): ncoid=%lu, causetopuf=%lu\n",
(unsigned long)*ncoid, (unsigned long)*causetopuf);
return 0;
}
@@ -613,7 +613,7 @@ int parse_U3ReadyToReceiveInd(struct sk_buff *skb, u32 *ncoid, u8 *ready) {
return -1;
}
- dprintk("TurboPAM(parse_U3ReadyToReceiveInd): ncoid=%lu, ready=%d\n",
+ pr_debug("TurboPAM(parse_U3ReadyToReceiveInd): ncoid=%lu, ready=%d\n",
(unsigned long)*ncoid, *ready);
return 0;
}
@@ -644,7 +644,7 @@ int parse_U3DataInd(struct sk_buff *skb, u32 *ncoid, u8 **data, u16 *len) {
sizeof(skb_header) + sizeof(pci_mpb) + p->actualBlockTLVSize);
*data = skb->data;
- dprintk("TurboPAM(parse_U3DataInd): ncoid=%lu, datalen=%d\n",
+ pr_debug("TurboPAM(parse_U3DataInd): ncoid=%lu, datalen=%d\n",
(unsigned long)*ncoid, *len);
return 0;
}
diff --git a/drivers/isdn/tpam/tpam_queues.c b/drivers/isdn/tpam/tpam_queues.c
index 9b55684dde0c..a9d8bb08007e 100644
--- a/drivers/isdn/tpam/tpam_queues.c
+++ b/drivers/isdn/tpam/tpam_queues.c
@@ -30,7 +30,7 @@ static int tpam_sendpacket(tpam_card *card, tpam_channel *channel);
*/
void tpam_enqueue(tpam_card *card, struct sk_buff *skb) {
- dprintk("TurboPAM(tpam_enqueue): card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_enqueue): card=%d\n", card->id);
/* queue the sk_buff on the board's send queue */
skb_queue_tail(&card->sendq, skb);
@@ -49,7 +49,7 @@ void tpam_enqueue(tpam_card *card, struct sk_buff *skb) {
*/
void tpam_enqueue_data(tpam_channel *channel, struct sk_buff *skb) {
- dprintk("TurboPAM(tpam_enqueue_data): card=%d, channel=%d\n",
+ pr_debug("TurboPAM(tpam_enqueue_data): card=%d, channel=%d\n",
channel->card->id, channel->num);
/* if existant, queue the sk_buff on the channel's send queue */
@@ -84,7 +84,7 @@ irqreturn_t tpam_irq(int irq, void *dev_id, struct pt_regs *regs)
pci_mpb mpb;
skb_header *skbh;
- dprintk("TurboPAM(tpam_irq): IRQ received, card=%d\n", card->id);
+ pr_debug("TurboPAM(tpam_irq): IRQ received, card=%d\n", card->id);
/* grab the board lock */
spin_lock(&card->lock);
@@ -99,7 +99,7 @@ irqreturn_t tpam_irq(int irq, void *dev_id, struct pt_regs *regs)
if (!ackupload) {
/* it is a new message from the board */
- dprintk("TurboPAM(tpam_irq): message received, card=%d\n",
+ pr_debug("TurboPAM(tpam_irq): message received, card=%d\n",
card->id);
/* get the upload pointer */
@@ -176,7 +176,7 @@ irqreturn_t tpam_irq(int irq, void *dev_id, struct pt_regs *regs)
else {
/* it is a ack from the board */
- dprintk("TurboPAM(tpam_irq): message acknowledged, card=%d\n",
+ pr_debug("TurboPAM(tpam_irq): message acknowledged, card=%d\n",
card->id);
/* board is not busy anymore */
@@ -231,7 +231,7 @@ void tpam_recv_tq(tpam_card *card) {
tpam_recv_U3DataInd(card, skb);
break;
default:
- dprintk("TurboPAM(tpam_recv_tq): "
+ pr_debug("TurboPAM(tpam_recv_tq): "
"unknown messageID %d, card=%d\n",
p->messageID, card->id);
break;
@@ -286,13 +286,13 @@ static int tpam_sendpacket(tpam_card *card, tpam_channel *channel) {
skb_header *skbh;
u32 waiting_too_long;
- dprintk("TurboPAM(tpam_sendpacket), card=%d, channel=%d\n",
+ pr_debug("TurboPAM(tpam_sendpacket), card=%d, channel=%d\n",
card->id, channel ? channel->num : -1);
if (channel) {
/* dequeue a packet from the channel's send queue */
if (!(skb = skb_dequeue(&channel->sendq))) {
- dprintk("TurboPAM(tpam_sendpacket): "
+ pr_debug("TurboPAM(tpam_sendpacket): "
"card=%d, channel=%d, no packet\n",
card->id, channel->num);
return 0;
@@ -301,7 +301,7 @@ static int tpam_sendpacket(tpam_card *card, tpam_channel *channel) {
/* if the channel is not ready to receive, requeue the packet
* and return 0 to give a chance to another channel */
if (!channel->readytoreceive) {
- dprintk("TurboPAM(tpam_sendpacket): "
+ pr_debug("TurboPAM(tpam_sendpacket): "
"card=%d, channel=%d, channel not ready\n",
card->id, channel->num);
skb_queue_head(&channel->sendq, skb);
@@ -314,7 +314,7 @@ static int tpam_sendpacket(tpam_card *card, tpam_channel *channel) {
/* if the board is busy, requeue the packet and return 1 since
* there is no need to try another channel */
if (card->busy) {
- dprintk("TurboPAM(tpam_sendpacket): "
+ pr_debug("TurboPAM(tpam_sendpacket): "
"card=%d, channel=%d, card busy\n",
card->id, channel->num);
skb_queue_head(&channel->sendq, skb);
@@ -325,7 +325,7 @@ static int tpam_sendpacket(tpam_card *card, tpam_channel *channel) {
else {
/* dequeue a packet from the board's send queue */
if (!(skb = skb_dequeue(&card->sendq))) {
- dprintk("TurboPAM(tpam_sendpacket): "
+ pr_debug("TurboPAM(tpam_sendpacket): "
"card=%d, no packet\n", card->id);
return 0;
}
@@ -336,7 +336,7 @@ static int tpam_sendpacket(tpam_card *card, tpam_channel *channel) {
/* if the board is busy, requeue the packet and return 1 since
* there is no need to try another channel */
if (card->busy) {
- dprintk("TurboPAM(tpam_sendpacket): "
+ pr_debug("TurboPAM(tpam_sendpacket): "
"card=%d, card busy\n", card->id);
skb_queue_head(&card->sendq, skb);
spin_unlock_irq(&card->lock);
@@ -357,7 +357,7 @@ static int tpam_sendpacket(tpam_card *card, tpam_channel *channel) {
} while (hpic & 0x00000002);
skbh = (skb_header *)skb->data;
- dprintk("TurboPAM(tpam_sendpacket): "
+ pr_debug("TurboPAM(tpam_sendpacket): "
"card=%d, card ready, sending %d/%d bytes\n",
card->id, skbh->size, skbh->data_size);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 745f7e7bd3ee..10efe7942775 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3468,8 +3468,7 @@ static void md_do_sync(mddev_t *mddev)
if (currspeed > sysctl_speed_limit_min) {
if ((currspeed > sysctl_speed_limit_max) ||
!is_mddev_idle(mddev)) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(HZ/4);
+ msleep_interruptible(250);
goto repeat;
}
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1ed82ea4be8e..553e16da088c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1013,7 +1013,7 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
* put in a delay to throttle resync.
*/
if (!go_faster && waitqueue_active(&conf->wait_resume))
- schedule_timeout(HZ);
+ msleep_interruptible(1000);
device_barrier(conf, sector_nr + RESYNC_SECTORS);
/*
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6c3fde9ba192..0ba3a4e1831c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1358,7 +1358,7 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
* put in a delay to throttle resync.
*/
if (!go_faster && waitqueue_active(&conf->wait_resume))
- schedule_timeout(HZ);
+ msleep_interruptible(1000);
device_barrier(conf, sector_nr + RESYNC_SECTORS);
/* Again, very different code for resync and recovery.
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 31412e8d32db..293173885bd9 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -249,8 +249,7 @@ static int qc_waithand(struct qcam_device *q, int val)
if(runs++>maxpoll)
{
- current->state=TASK_INTERRUPTIBLE;
- schedule_timeout(HZ/200);
+ msleep_interruptible(5);
}
if(runs>(maxpoll+1000)) /* 5 seconds */
return -1;
@@ -269,8 +268,7 @@ static int qc_waithand(struct qcam_device *q, int val)
if(runs++>maxpoll)
{
- current->state=TASK_INTERRUPTIBLE;
- schedule_timeout(HZ/200);
+ msleep_interruptible(5);
}
if(runs++>(maxpoll+1000)) /* 5 seconds */
return -1;
@@ -302,8 +300,7 @@ static unsigned int qc_waithand2(struct qcam_device *q, int val)
if(runs++>maxpoll)
{
- current->state=TASK_INTERRUPTIBLE;
- schedule_timeout(HZ/200);
+ msleep_interruptible(5);
}
if(runs++>(maxpoll+1000)) /* 5 seconds */
return 0;
@@ -669,8 +666,7 @@ long qc_capture(struct qcam_device * q, char __user *buf, unsigned long len)
time will be 240 / 200 = 1.2 seconds. The compile-time
default is to yield every 4 lines. */
if (i >= yield) {
- current->state=TASK_INTERRUPTIBLE;
- schedule_timeout(HZ/200);
+ msleep_interruptible(5);
yield = i + yieldlines;
}
}
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index bd9fef419a88..703c4cba97b6 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -103,8 +103,7 @@ static unsigned int qcam_await_ready1(struct qcam_device *qcam,
{
if (qcam_ready1(qcam) == value)
return 0;
- current->state=TASK_INTERRUPTIBLE;
- schedule_timeout(HZ/10);
+ msleep_interruptible(100);
}
/* Probably somebody pulled the plug out. Not much we can do. */
@@ -129,8 +128,7 @@ static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
{
if (qcam_ready2(qcam) == value)
return 0;
- current->state=TASK_INTERRUPTIBLE;
- schedule_timeout(HZ/10);
+ msleep_interruptible(100);
}
/* Probably somebody pulled the plug out. Not much we can do. */
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 21db4b390277..388bc09366bd 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -37,6 +37,7 @@
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <linux/pagemap.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/semaphore.h>
@@ -2886,9 +2887,7 @@ static int fetch_frame(void *data)
cond_resched();
/* sleep for 10 ms, hopefully ;) */
- current->state = TASK_INTERRUPTIBLE;
-
- schedule_timeout(10*HZ/1000);
+ msleep_interruptible(10);
if (signal_pending(current))
return -EINTR;
@@ -2951,8 +2950,7 @@ static int fetch_frame(void *data)
CPIA_GRAB_SINGLE, 0, 0, 0);
/* FIXME: Trial & error - need up to 70ms for
the grab mode change to complete ? */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(70*HZ / 1000);
+ msleep_interruptible(70);
if (signal_pending(current))
return -EINTR;
}
@@ -3003,8 +3001,7 @@ static int goto_high_power(struct cam_data *cam)
{
if (do_command(cam, CPIA_COMMAND_GotoHiPower, 0, 0, 0, 0))
return -EIO;
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(40*HZ/1000); /* windows driver does it too */
+ msleep_interruptible(40); /* windows driver does it too */
if(signal_pending(current))
return -EINTR;
if (do_command(cam, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0))
@@ -3074,10 +3071,8 @@ static int set_camera_state(struct cam_data *cam)
/* Wait 6 frames for the sensor to get all settings and
AEC/ACB to settle */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout((6*(cam->params.sensorFps.baserate ? 33 : 40) *
- (1 << cam->params.sensorFps.divisor) + 10) *
- HZ / 1000);
+ msleep_interruptible(6*(cam->params.sensorFps.baserate ? 33 : 40) *
+ (1 << cam->params.sensorFps.divisor) + 10);
if(signal_pending(current))
return -EINTR;
diff --git a/drivers/media/video/ovcamchip/ovcamchip_core.c b/drivers/media/video/ovcamchip/ovcamchip_core.c
index d88956a26aee..67f2fc4a4f9f 100644
--- a/drivers/media/video/ovcamchip/ovcamchip_core.c
+++ b/drivers/media/video/ovcamchip/ovcamchip_core.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include "ovcamchip_priv.h"
#define DRIVER_VERSION "v2.27 for Linux 2.6"
@@ -128,8 +129,7 @@ static int init_camchip(struct i2c_client *c)
ov_write(c, 0x12, 0x80);
/* Wait for it to initialize */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1 + 150 * HZ / 1000);
+ msleep(150);
for (i = 0, success = 0; i < I2C_DETECT_RETRIES && !success; i++) {
if (ov_read(c, GENERIC_REG_ID_HIGH, &high) >= 0) {
@@ -145,8 +145,7 @@ static int init_camchip(struct i2c_client *c)
ov_write(c, 0x12, 0x80);
/* Wait for it to initialize */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1 + 150 * HZ / 1000);
+ msleep(150);
/* Dummy read to sync I2C */
ov_read(c, 0x00, &low);
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
index ebf130f167e3..3a828546952a 100644
--- a/drivers/media/video/planb.c
+++ b/drivers/media/video/planb.c
@@ -178,8 +178,7 @@ static unsigned char saa_status(int byte, struct planb *pb)
saa_write_reg (SAA7196_STDC, saa_regs[pb->win.norm][SAA7196_STDC]);
/* Let's wait 30msec for this one */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(30 * HZ / 1000);
+ msleep_interruptible(30);
return (unsigned char)in_8 (&planb_regs->saa_status);
}
diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c
index d486fb7faf67..7d5ca87c1619 100644
--- a/drivers/media/video/saa5249.c
+++ b/drivers/media/video/saa5249.c
@@ -273,8 +273,7 @@ static void jdelay(unsigned long delay)
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(delay);
+ msleep_interruptible(jiffies_to_msecs(delay));
spin_lock_irq(&current->sighand->siglock);
current->blocked = oldblocked;
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index e46fbdbc46e6..31977e770b23 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <media/audiochip.h>
#include <media/tuner.h>
@@ -543,8 +544,7 @@ static int tda9887_configure(struct tda9887 *t)
printk(PREFIX "i2c i/o error: rc == %d (should be 4)\n",rc);
if (debug > 2) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ msleep_interruptible(1000);
tda9887_status(t);
}
return 0;
diff --git a/drivers/media/video/videocodec.c b/drivers/media/video/videocodec.c
index 081eb06a4c7a..ae1063e09a01 100644
--- a/drivers/media/video/videocodec.c
+++ b/drivers/media/video/videocodec.c
@@ -43,11 +43,6 @@
#include <asm/uaccess.h>
#endif
-#include <linux/version.h>
-#ifndef KERNEL_VERSION
-#define KERNEL_VERSION(a,b,c) ((a)*65536+(b)*256+(c))
-#endif
-
#include "videocodec.h"
static int debug = 0;
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index d6294bd7909f..bb2b4201a24d 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -1917,8 +1917,7 @@ zoran_set_norm (struct zoran *zr,
decoder_command(zr, DECODER_SET_NORM, &norm);
/* let changes come into effect */
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(2 * HZ);
+ ssleep(2);
decoder_command(zr, DECODER_GET_STATUS, &status);
if (!(status & DECODER_STATUS_GOOD)) {
@@ -2639,8 +2638,7 @@ zoran_do_ioctl (struct inode *inode,
decoder_command(zr, DECODER_SET_NORM, &norm);
/* sleep 1 second */
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(1 * HZ);
+ ssleep(1);
/* Get status of video decoder */
decoder_command(zr, DECODER_GET_STATUS, &status);
diff --git a/drivers/media/video/zr36120.c b/drivers/media/video/zr36120.c
index 9b55341baf40..e99226434e73 100644
--- a/drivers/media/video/zr36120.c
+++ b/drivers/media/video/zr36120.c
@@ -819,8 +819,7 @@ void zoran_close(struct video_device* dev)
* be sure its safe to free the buffer. We wait 5-6 fields
* which is more than sufficient to be sure.
*/
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ/10); /* Wait 1/10th of a second */
+ msleep(100); /* Wait 1/10th of a second */
/* free the allocated framebuffer */
if (ztv->fbuffer)
@@ -1568,8 +1567,7 @@ void vbi_close(struct video_device *dev)
* be sure its safe to free the buffer. We wait 5-6 fields
* which is more than sufficient to be sure.
*/
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ/10); /* Wait 1/10th of a second */
+ msleep(100); /* Wait 1/10th of a second */
for (item=ztv->readinfo; item!=ztv->readinfo+ZORAN_VBI_BUFFERS; item++)
{
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index c467480b958c..d252c2bda137 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -2195,8 +2195,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
}
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1 * HZ / 1000);
+ msleep_interruptible(1);
} else {
mdelay (1); /* 1 msec delay */
}
@@ -2565,8 +2564,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
state = mpt_GetIocState(ioc, 1);
while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) {
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1 * HZ / 1000);
+ msleep_interruptible(1);
} else {
mdelay(1);
}
@@ -2833,8 +2831,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
/* wait 1 msec */
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1 * HZ / 1000);
+ msleep_interruptible(1);
} else {
mdelay (1);
}
@@ -2851,8 +2848,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
}
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1000 * HZ / 1000);
+ msleep_interruptible (1000);
} else {
mdelay (1000);
}
@@ -2952,8 +2948,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
return 0;
}
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(10 * HZ / 1000);
+ msleep_interruptible (10);
} else {
mdelay (10);
}
@@ -3004,8 +2999,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1000 * HZ / 1000);
+ msleep_interruptible (1000);
} else {
mdelay (1000);
}
@@ -3027,8 +3021,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
return hard_reset_done;
}
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(10 * HZ / 1000);
+ msleep_interruptible (10);
} else {
mdelay (10);
}
@@ -3099,8 +3092,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
/* wait 100 msec */
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(100 * HZ / 1000);
+ msleep_interruptible (100);
} else {
mdelay (100);
}
@@ -3207,8 +3199,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1000 * HZ / 1000);
+ msleep_interruptible (1000);
} else {
mdelay (1000);
}
@@ -3242,8 +3233,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
/* wait 100 msec */
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(100 * HZ / 1000);
+ msleep_interruptible (100);
} else {
mdelay (100);
}
@@ -3337,8 +3327,7 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
}
if (sleepFlag == CAN_SLEEP) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1 * HZ / 1000);
+ msleep_interruptible(1);
} else {
mdelay (1); /* 1 msec delay */
}
@@ -3775,8 +3764,7 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
break;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1 * HZ / 1000);
+ msleep_interruptible (1);
count++;
}
} else {
@@ -3825,8 +3813,7 @@ WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
break;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1 * HZ / 1000);
+ msleep_interruptible(1);
count++;
}
} else {
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
index 7227a8a1e0bd..85db8c4c58e0 100644
--- a/drivers/message/i2o/debug.c
+++ b/drivers/message/i2o/debug.c
@@ -54,7 +54,7 @@ void i2o_report_status(const char *severity, const char *str,
if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
return; // No status in this reply
- printk("%s%s: ", severity, str);
+ printk(KERN_DEBUG "%s%s: ", severity, str);
if (cmd < 0x1F) // Utility cmd
i2o_report_util_cmd(cmd);
@@ -62,7 +62,7 @@ void i2o_report_status(const char *severity, const char *str,
else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
i2o_report_exec_cmd(cmd);
else
- printk("Cmd = %0#2x, ", cmd); // Other cmds
+ printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); // Other cmds
if (msg[0] & MSG_FAIL) {
i2o_report_fail_status(req_status, msg);
@@ -74,7 +74,8 @@ void i2o_report_status(const char *severity, const char *str,
if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
i2o_report_common_dsc(detailed_status);
else
- printk(" / DetailedStatus = %0#4x.\n", detailed_status);
+ printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+ detailed_status);
}
/* Used to dump a message to syslog during debugging */
@@ -129,20 +130,20 @@ void i2o_report_controller_unit(struct i2o_controller *c, struct i2o_device *d)
printk(KERN_INFO " Class: ");
//sprintf(str, "%-21s", i2o_get_class_name(d->lct_data.class_id));
- printk("%s\n", str);
+ printk(KERN_DEBUG "%s\n", str);
printk(KERN_INFO " Subclass: 0x%04X\n", d->lct_data.sub_class);
printk(KERN_INFO " Flags: ");
if (d->lct_data.device_flags & (1 << 0))
- printk("C"); // ConfigDialog requested
+ printk(KERN_DEBUG "C"); // ConfigDialog requested
if (d->lct_data.device_flags & (1 << 1))
- printk("U"); // Multi-user capable
+ printk(KERN_DEBUG "U"); // Multi-user capable
if (!(d->lct_data.device_flags & (1 << 4)))
- printk("P"); // Peer service enabled!
+ printk(KERN_DEBUG "P"); // Peer service enabled!
if (!(d->lct_data.device_flags & (1 << 5)))
- printk("M"); // Mgmt service enabled!
- printk("\n");
+ printk(KERN_DEBUG "M"); // Mgmt service enabled!
+ printk(KERN_DEBUG "\n");
}
/*
@@ -177,9 +178,11 @@ void i2o_report_fail_status(u8 req_status, u32 * msg)
};
if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
- printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", req_status);
+ printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.",
+ req_status);
else
- printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]);
+ printk(KERN_DEBUG "TRANSPORT_%s.\n",
+ FAIL_STATUS[req_status & 0x0F]);
/* Dump some details */
@@ -192,16 +195,17 @@ void i2o_report_fail_status(u8 req_status, u32 * msg)
printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF);
if (msg[4] & (1 << 16))
- printk("(FormatError), "
+ printk(KERN_DEBUG "(FormatError), "
"this msg can never be delivered/processed.\n");
if (msg[4] & (1 << 17))
- printk("(PathError), "
+ printk(KERN_DEBUG "(PathError), "
"this msg can no longer be delivered/processed.\n");
if (msg[4] & (1 << 18))
- printk("(PathState), "
+ printk(KERN_DEBUG "(PathState), "
"the system state does not allow delivery.\n");
if (msg[4] & (1 << 19))
- printk("(Congestion), resources temporarily not available;"
+ printk(KERN_DEBUG
+ "(Congestion), resources temporarily not available;"
"do not retry immediately.\n");
}
@@ -227,9 +231,9 @@ void i2o_report_common_status(u8 req_status)
};
if (req_status >= ARRAY_SIZE(REPLY_STATUS))
- printk("RequestStatus = %0#2x", req_status);
+ printk(KERN_DEBUG "RequestStatus = %0#2x", req_status);
else
- printk("%s", REPLY_STATUS[req_status]);
+ printk(KERN_DEBUG "%s", REPLY_STATUS[req_status]);
}
/*
@@ -272,9 +276,10 @@ static void i2o_report_common_dsc(u16 detailed_status)
};
if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
- printk(" / DetailedStatus = %0#4x.\n", detailed_status);
+ printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+ detailed_status);
else
- printk(" / %s.\n", COMMON_DSC[detailed_status]);
+ printk(KERN_DEBUG " / %s.\n", COMMON_DSC[detailed_status]);
}
/*
@@ -284,49 +289,49 @@ static void i2o_report_util_cmd(u8 cmd)
{
switch (cmd) {
case I2O_CMD_UTIL_NOP:
- printk("UTIL_NOP, ");
+ printk(KERN_DEBUG "UTIL_NOP, ");
break;
case I2O_CMD_UTIL_ABORT:
- printk("UTIL_ABORT, ");
+ printk(KERN_DEBUG "UTIL_ABORT, ");
break;
case I2O_CMD_UTIL_CLAIM:
- printk("UTIL_CLAIM, ");
+ printk(KERN_DEBUG "UTIL_CLAIM, ");
break;
case I2O_CMD_UTIL_RELEASE:
- printk("UTIL_CLAIM_RELEASE, ");
+ printk(KERN_DEBUG "UTIL_CLAIM_RELEASE, ");
break;
case I2O_CMD_UTIL_CONFIG_DIALOG:
- printk("UTIL_CONFIG_DIALOG, ");
+ printk(KERN_DEBUG "UTIL_CONFIG_DIALOG, ");
break;
case I2O_CMD_UTIL_DEVICE_RESERVE:
- printk("UTIL_DEVICE_RESERVE, ");
+ printk(KERN_DEBUG "UTIL_DEVICE_RESERVE, ");
break;
case I2O_CMD_UTIL_DEVICE_RELEASE:
- printk("UTIL_DEVICE_RELEASE, ");
+ printk(KERN_DEBUG "UTIL_DEVICE_RELEASE, ");
break;
case I2O_CMD_UTIL_EVT_ACK:
- printk("UTIL_EVENT_ACKNOWLEDGE, ");
+ printk(KERN_DEBUG "UTIL_EVENT_ACKNOWLEDGE, ");
break;
case I2O_CMD_UTIL_EVT_REGISTER:
- printk("UTIL_EVENT_REGISTER, ");
+ printk(KERN_DEBUG "UTIL_EVENT_REGISTER, ");
break;
case I2O_CMD_UTIL_LOCK:
- printk("UTIL_LOCK, ");
+ printk(KERN_DEBUG "UTIL_LOCK, ");
break;
case I2O_CMD_UTIL_LOCK_RELEASE:
- printk("UTIL_LOCK_RELEASE, ");
+ printk(KERN_DEBUG "UTIL_LOCK_RELEASE, ");
break;
case I2O_CMD_UTIL_PARAMS_GET:
- printk("UTIL_PARAMS_GET, ");
+ printk(KERN_DEBUG "UTIL_PARAMS_GET, ");
break;
case I2O_CMD_UTIL_PARAMS_SET:
- printk("UTIL_PARAMS_SET, ");
+ printk(KERN_DEBUG "UTIL_PARAMS_SET, ");
break;
case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
- printk("UTIL_REPLY_FAULT_NOTIFY, ");
+ printk(KERN_DEBUG "UTIL_REPLY_FAULT_NOTIFY, ");
break;
default:
- printk("Cmd = %0#2x, ", cmd);
+ printk(KERN_DEBUG "Cmd = %0#2x, ", cmd);
}
}
@@ -337,106 +342,106 @@ static void i2o_report_exec_cmd(u8 cmd)
{
switch (cmd) {
case I2O_CMD_ADAPTER_ASSIGN:
- printk("EXEC_ADAPTER_ASSIGN, ");
+ printk(KERN_DEBUG "EXEC_ADAPTER_ASSIGN, ");
break;
case I2O_CMD_ADAPTER_READ:
- printk("EXEC_ADAPTER_READ, ");
+ printk(KERN_DEBUG "EXEC_ADAPTER_READ, ");
break;
case I2O_CMD_ADAPTER_RELEASE:
- printk("EXEC_ADAPTER_RELEASE, ");
+ printk(KERN_DEBUG "EXEC_ADAPTER_RELEASE, ");
break;
case I2O_CMD_BIOS_INFO_SET:
- printk("EXEC_BIOS_INFO_SET, ");
+ printk(KERN_DEBUG "EXEC_BIOS_INFO_SET, ");
break;
case I2O_CMD_BOOT_DEVICE_SET:
- printk("EXEC_BOOT_DEVICE_SET, ");
+ printk(KERN_DEBUG "EXEC_BOOT_DEVICE_SET, ");
break;
case I2O_CMD_CONFIG_VALIDATE:
- printk("EXEC_CONFIG_VALIDATE, ");
+ printk(KERN_DEBUG "EXEC_CONFIG_VALIDATE, ");
break;
case I2O_CMD_CONN_SETUP:
- printk("EXEC_CONN_SETUP, ");
+ printk(KERN_DEBUG "EXEC_CONN_SETUP, ");
break;
case I2O_CMD_DDM_DESTROY:
- printk("EXEC_DDM_DESTROY, ");
+ printk(KERN_DEBUG "EXEC_DDM_DESTROY, ");
break;
case I2O_CMD_DDM_ENABLE:
- printk("EXEC_DDM_ENABLE, ");
+ printk(KERN_DEBUG "EXEC_DDM_ENABLE, ");
break;
case I2O_CMD_DDM_QUIESCE:
- printk("EXEC_DDM_QUIESCE, ");
+ printk(KERN_DEBUG "EXEC_DDM_QUIESCE, ");
break;
case I2O_CMD_DDM_RESET:
- printk("EXEC_DDM_RESET, ");
+ printk(KERN_DEBUG "EXEC_DDM_RESET, ");
break;
case I2O_CMD_DDM_SUSPEND:
- printk("EXEC_DDM_SUSPEND, ");
+ printk(KERN_DEBUG "EXEC_DDM_SUSPEND, ");
break;
case I2O_CMD_DEVICE_ASSIGN:
- printk("EXEC_DEVICE_ASSIGN, ");
+ printk(KERN_DEBUG "EXEC_DEVICE_ASSIGN, ");
break;
case I2O_CMD_DEVICE_RELEASE:
- printk("EXEC_DEVICE_RELEASE, ");
+ printk(KERN_DEBUG "EXEC_DEVICE_RELEASE, ");
break;
case I2O_CMD_HRT_GET:
- printk("EXEC_HRT_GET, ");
+ printk(KERN_DEBUG "EXEC_HRT_GET, ");
break;
case I2O_CMD_ADAPTER_CLEAR:
- printk("EXEC_IOP_CLEAR, ");
+ printk(KERN_DEBUG "EXEC_IOP_CLEAR, ");
break;
case I2O_CMD_ADAPTER_CONNECT:
- printk("EXEC_IOP_CONNECT, ");
+ printk(KERN_DEBUG "EXEC_IOP_CONNECT, ");
break;
case I2O_CMD_ADAPTER_RESET:
- printk("EXEC_IOP_RESET, ");
+ printk(KERN_DEBUG "EXEC_IOP_RESET, ");
break;
case I2O_CMD_LCT_NOTIFY:
- printk("EXEC_LCT_NOTIFY, ");
+ printk(KERN_DEBUG "EXEC_LCT_NOTIFY, ");
break;
case I2O_CMD_OUTBOUND_INIT:
- printk("EXEC_OUTBOUND_INIT, ");
+ printk(KERN_DEBUG "EXEC_OUTBOUND_INIT, ");
break;
case I2O_CMD_PATH_ENABLE:
- printk("EXEC_PATH_ENABLE, ");
+ printk(KERN_DEBUG "EXEC_PATH_ENABLE, ");
break;
case I2O_CMD_PATH_QUIESCE:
- printk("EXEC_PATH_QUIESCE, ");
+ printk(KERN_DEBUG "EXEC_PATH_QUIESCE, ");
break;
case I2O_CMD_PATH_RESET:
- printk("EXEC_PATH_RESET, ");
+ printk(KERN_DEBUG "EXEC_PATH_RESET, ");
break;
case I2O_CMD_STATIC_MF_CREATE:
- printk("EXEC_STATIC_MF_CREATE, ");
+ printk(KERN_DEBUG "EXEC_STATIC_MF_CREATE, ");
break;
case I2O_CMD_STATIC_MF_RELEASE:
- printk("EXEC_STATIC_MF_RELEASE, ");
+ printk(KERN_DEBUG "EXEC_STATIC_MF_RELEASE, ");
break;
case I2O_CMD_STATUS_GET:
- printk("EXEC_STATUS_GET, ");
+ printk(KERN_DEBUG "EXEC_STATUS_GET, ");
break;
case I2O_CMD_SW_DOWNLOAD:
- printk("EXEC_SW_DOWNLOAD, ");
+ printk(KERN_DEBUG "EXEC_SW_DOWNLOAD, ");
break;
case I2O_CMD_SW_UPLOAD:
- printk("EXEC_SW_UPLOAD, ");
+ printk(KERN_DEBUG "EXEC_SW_UPLOAD, ");
break;
case I2O_CMD_SW_REMOVE:
- printk("EXEC_SW_REMOVE, ");
+ printk(KERN_DEBUG "EXEC_SW_REMOVE, ");
break;
case I2O_CMD_SYS_ENABLE:
- printk("EXEC_SYS_ENABLE, ");
+ printk(KERN_DEBUG "EXEC_SYS_ENABLE, ");
break;
case I2O_CMD_SYS_MODIFY:
- printk("EXEC_SYS_MODIFY, ");
+ printk(KERN_DEBUG "EXEC_SYS_MODIFY, ");
break;
case I2O_CMD_SYS_QUIESCE:
- printk("EXEC_SYS_QUIESCE, ");
+ printk(KERN_DEBUG "EXEC_SYS_QUIESCE, ");
break;
case I2O_CMD_SYS_TAB_SET:
- printk("EXEC_SYS_TAB_SET, ");
+ printk(KERN_DEBUG "EXEC_SYS_TAB_SET, ");
break;
default:
- printk("Cmd = %#02x, ", cmd);
+ printk(KERN_DEBUG "Cmd = %#02x, ", cmd);
}
}
@@ -445,28 +450,28 @@ void i2o_debug_state(struct i2o_controller *c)
printk(KERN_INFO "%s: State = ", c->name);
switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
case 0x01:
- printk("INIT\n");
+ printk(KERN_DEBUG "INIT\n");
break;
case 0x02:
- printk("RESET\n");
+ printk(KERN_DEBUG "RESET\n");
break;
case 0x04:
- printk("HOLD\n");
+ printk(KERN_DEBUG "HOLD\n");
break;
case 0x05:
- printk("READY\n");
+ printk(KERN_DEBUG "READY\n");
break;
case 0x08:
- printk("OPERATIONAL\n");
+ printk(KERN_DEBUG "OPERATIONAL\n");
break;
case 0x10:
- printk("FAILED\n");
+ printk(KERN_DEBUG "FAILED\n");
break;
case 0x11:
- printk("FAULTED\n");
+ printk(KERN_DEBUG "FAULTED\n");
break;
default:
- printk("%x (unknown !!)\n",
+ printk(KERN_DEBUG "%x (unknown !!)\n",
((i2o_status_block *) c->status_block.virt)->iop_state);
}
};
@@ -516,53 +521,58 @@ void i2o_dump_hrt(struct i2o_controller *c)
d = (u8 *) (rows + 2);
state = p[1] << 8 | p[0];
- printk("TID %04X:[", state & 0xFFF);
+ printk(KERN_DEBUG "TID %04X:[", state & 0xFFF);
state >>= 12;
if (state & (1 << 0))
- printk("H"); /* Hidden */
+ printk(KERN_DEBUG "H"); /* Hidden */
if (state & (1 << 2)) {
- printk("P"); /* Present */
+ printk(KERN_DEBUG "P"); /* Present */
if (state & (1 << 1))
- printk("C"); /* Controlled */
+ printk(KERN_DEBUG "C"); /* Controlled */
}
if (state > 9)
- printk("*"); /* Hard */
+ printk(KERN_DEBUG "*"); /* Hard */
- printk("]:");
+ printk(KERN_DEBUG "]:");
switch (p[3] & 0xFFFF) {
case 0:
/* Adapter private bus - easy */
- printk("Local bus %d: I/O at 0x%04X Mem 0x%08X",
- p[2], d[1] << 8 | d[0], *(u32 *) (d + 4));
+ printk(KERN_DEBUG
+ "Local bus %d: I/O at 0x%04X Mem 0x%08X", p[2],
+ d[1] << 8 | d[0], *(u32 *) (d + 4));
break;
case 1:
/* ISA bus */
- printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X",
- p[2], d[2], d[1] << 8 | d[0], *(u32 *) (d + 4));
+ printk(KERN_DEBUG
+ "ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", p[2],
+ d[2], d[1] << 8 | d[0], *(u32 *) (d + 4));
break;
case 2: /* EISA bus */
- printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
+ printk(KERN_DEBUG
+ "EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
break;
case 3: /* MCA bus */
- printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
- p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
+ printk(KERN_DEBUG
+ "MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", p[2],
+ d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
break;
case 4: /* PCI bus */
- printk("PCI %d: Bus %d Device %d Function %d",
- p[2], d[2], d[1], d[0]);
+ printk(KERN_DEBUG
+ "PCI %d: Bus %d Device %d Function %d", p[2],
+ d[2], d[1], d[0]);
break;
case 0x80: /* Other */
default:
- printk("Unsupported bus type.");
+ printk(KERN_DEBUG "Unsupported bus type.");
break;
}
- printk("\n");
+ printk(KERN_DEBUG "\n");
rows += length;
}
}
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index ff4822ed4df8..def6ea54416e 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/i2o.h>
+#include <linux/delay.h>
/* Exec OSM functions */
extern struct bus_type i2o_bus_type;
@@ -106,8 +107,7 @@ int i2o_device_claim_release(struct i2o_device *dev)
if (!rc)
break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
+ ssleep(1);
}
if (!rc)
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index bc69d66c2623..1102013ab768 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -18,7 +18,6 @@
#include <linux/rwsem.h>
#include <linux/i2o.h>
-
/* max_drivers - Maximum I2O drivers (OSMs) which could be registered */
unsigned int i2o_max_drivers = I2O_MAX_DRIVERS;
module_param_named(max_drivers, i2o_max_drivers, uint, 0);
@@ -146,7 +145,7 @@ void i2o_driver_unregister(struct i2o_driver *drv)
struct i2o_device *i2o_dev;
list_for_each_entry(i2o_dev, &c->devices, list)
- i2o_driver_notify_device_remove(drv, i2o_dev);
+ i2o_driver_notify_device_remove(drv, i2o_dev);
i2o_driver_notify_controller_remove(drv, c);
}
@@ -246,14 +245,15 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m,
* Send notifications to all registered drivers that a new controller was
* added.
*/
-void i2o_driver_notify_controller_add_all(struct i2o_controller *c) {
+void i2o_driver_notify_controller_add_all(struct i2o_controller *c)
+{
int i;
struct i2o_driver *drv;
- for(i = 0; i < I2O_MAX_DRIVERS; i ++) {
+ for (i = 0; i < I2O_MAX_DRIVERS; i++) {
drv = i2o_drivers[i];
- if(drv)
+ if (drv)
i2o_driver_notify_controller_add(drv, c);
}
}
@@ -265,14 +265,15 @@ void i2o_driver_notify_controller_add_all(struct i2o_controller *c) {
* Send notifications to all registered drivers that a controller was
* removed.
*/
-void i2o_driver_notify_controller_remove_all(struct i2o_controller *c) {
+void i2o_driver_notify_controller_remove_all(struct i2o_controller *c)
+{
int i;
struct i2o_driver *drv;
- for(i = 0; i < I2O_MAX_DRIVERS; i ++) {
+ for (i = 0; i < I2O_MAX_DRIVERS; i++) {
drv = i2o_drivers[i];
- if(drv)
+ if (drv)
i2o_driver_notify_controller_remove(drv, c);
}
}
@@ -283,14 +284,15 @@ void i2o_driver_notify_controller_remove_all(struct i2o_controller *c) {
*
* Send notifications to all registered drivers that a device was added.
*/
-void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev) {
+void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev)
+{
int i;
struct i2o_driver *drv;
- for(i = 0; i < I2O_MAX_DRIVERS; i ++) {
+ for (i = 0; i < I2O_MAX_DRIVERS; i++) {
drv = i2o_drivers[i];
- if(drv)
+ if (drv)
i2o_driver_notify_device_add(drv, i2o_dev);
}
}
@@ -301,14 +303,15 @@ void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev) {
*
* Send notifications to all registered drivers that a device was removed.
*/
-void i2o_driver_notify_device_remove_all(struct i2o_device *i2o_dev) {
+void i2o_driver_notify_device_remove_all(struct i2o_device *i2o_dev)
+{
int i;
struct i2o_driver *drv;
- for(i = 0; i < I2O_MAX_DRIVERS; i ++) {
+ for (i = 0; i < I2O_MAX_DRIVERS; i++) {
drv = i2o_drivers[i];
- if(drv)
+ if (drv)
i2o_driver_notify_device_remove(drv, i2o_dev);
}
}
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 117f26106491..6d865f89642f 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/i2o.h>
+#include <linux/delay.h>
struct i2o_driver i2o_exec_driver;
@@ -151,7 +152,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
if (!iwait->complete)
- schedule_timeout(timeout * HZ);
+ msleep_interruptible(timeout * 1000);
finish_wait(&wq, &wait);
@@ -322,13 +323,13 @@ static void i2o_exec_lct_modified(struct i2o_controller *c)
static int i2o_exec_reply(struct i2o_controller *c, u32 m,
struct i2o_message *msg)
{
- if (readl(&msg->u.head[0]) & MSG_FAIL) { // Fail bit is set
+ if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { // Fail bit is set
struct i2o_message *pmsg; /* preserved message */
u32 pm;
- pm = readl(&msg->body[3]);
+ pm = le32_to_cpu(msg->body[3]);
- pmsg = c->in_queue.virt + pm;
+ pmsg = i2o_msg_in_to_virt(c, pm);
i2o_report_status(KERN_INFO, "i2o_core", msg);
@@ -339,10 +340,10 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
return -1;
}
- if (readl(&msg->u.s.tcntxt) & 0x80000000)
+ if (le32_to_cpu(msg->u.s.tcntxt) & 0x80000000)
return i2o_msg_post_wait_complete(c, m, msg);
- if ((readl(&msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) {
+ if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) {
struct work_struct *work;
pr_debug("%s: LCT notify received\n", c->name);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 102fb83164c0..04080c80ba37 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -416,11 +416,10 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
unsigned long flags;
/* FAILed message */
- if (unlikely(readl(&msg->u.head[0]) & (1 << 13))) {
+ if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) {
struct i2o_message *pmsg;
u32 pm;
- printk(KERN_WARNING "FAIL");
/*
* FAILed message from controller
* We increment the error count and abort it
@@ -431,10 +430,10 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
* better be on the safe side since no one really follows
* the spec to the book :)
*/
- pm = readl(&msg->body[3]);
- pmsg = c->in_queue.virt + pm;
+ pm = le32_to_cpu(msg->body[3]);
+ pmsg = i2o_msg_in_to_virt(c, pm);
- req = i2o_cntxt_list_get(c, readl(&pmsg->u.s.tcntxt));
+ req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt));
if (unlikely(!req)) {
printk(KERN_ERR "block-osm: NULL reply received!\n");
return -1;
@@ -449,7 +448,7 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
spin_lock_irqsave(q->queue_lock, flags);
while (end_that_request_chunk(req, !req->errors,
- readl(&pmsg->body[1]))) ;
+ le32_to_cpu(pmsg->body[1]))) ;
end_that_request_last(req);
dev->open_queue_depth--;
@@ -464,7 +463,7 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
return -1;
}
- req = i2o_cntxt_list_get(c, readl(&msg->u.s.tcntxt));
+ req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
if (unlikely(!req)) {
printk(KERN_ERR "block-osm: NULL reply received!\n");
return -1;
@@ -487,7 +486,7 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
"I2O Block: Data transfer to deleted device!\n");
spin_lock_irqsave(q->queue_lock, flags);
while (end_that_request_chunk
- (req, !req->errors, readl(&msg->body[1]))) ;
+ (req, !req->errors, le32_to_cpu(msg->body[1]))) ;
end_that_request_last(req);
dev->open_queue_depth--;
@@ -503,7 +502,7 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
* request in the context.
*/
- st = readl(&msg->body[0]) >> 24;
+ st = le32_to_cpu(msg->body[0]) >> 24;
if (st != 0) {
int err;
@@ -524,7 +523,7 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
"Volume has changed, waiting for acknowledgement"
};
- err = readl(&msg->body[0]) & 0xffff;
+ err = le32_to_cpu(msg->body[0]) & 0xffff;
/*
* Device not ready means two things. One is that the
@@ -538,17 +537,18 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
* Don't stick a supertrak100 into cache aggressive modes
*/
- printk(KERN_ERR "\n/dev/%s error: %s", dev->gd->disk_name,
- bsa_errors[readl(&msg->body[0]) & 0xffff]);
- if (readl(&msg->body[0]) & 0x00ff0000)
- printk(" - DDM attempted %d retries",
- (readl(&msg->body[0]) >> 16) & 0x00ff);
- printk(".\n");
+ printk(KERN_ERR "/dev/%s error: %s", dev->gd->disk_name,
+ bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]);
+ if (le32_to_cpu(msg->body[0]) & 0x00ff0000)
+ printk(KERN_ERR " - DDM attempted %d retries",
+ (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff);
+ printk(KERN_ERR ".\n");
req->errors++;
} else
req->errors = 0;
- if (!end_that_request_chunk(req, !req->errors, readl(&msg->body[1]))) {
+ if (!end_that_request_chunk
+ (req, !req->errors, le32_to_cpu(msg->body[1]))) {
add_disk_randomness(req->rq_disk);
spin_lock_irqsave(q->queue_lock, flags);
@@ -563,7 +563,7 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
i2o_block_sglist_free(ireq);
i2o_block_request_free(ireq);
} else
- printk(KERN_ERR "still remaining chunks\n");
+ printk(KERN_ERR "i2o_block: still remaining chunks\n");
return 1;
};
@@ -573,174 +573,6 @@ static void i2o_block_event(struct i2o_event *evt)
printk(KERN_INFO "block-osm: event received\n");
};
-#if 0
-static int i2o_block_event(void *dummy)
-{
- unsigned int evt;
- unsigned long flags;
- struct i2o_block_device *dev;
- int unit;
- //The only event that has data is the SCSI_SMART event.
- struct i2o_reply {
- u32 header[4];
- u32 evt_indicator;
- u8 ASC;
- u8 ASCQ;
- u16 pad;
- u8 data[16];
- } *evt_local;
-
- daemonize("i2oblock");
- allow_signal(SIGKILL);
-
- evt_running = 1;
-
- while (1) {
- if (down_interruptible(&i2ob_evt_sem)) {
- evt_running = 0;
- printk("exiting...");
- break;
- }
-
- /*
- * Keep another CPU/interrupt from overwriting the
- * message while we're reading it
- *
- * We stuffed the unit in the TxContext and grab the event mask
- * None of the BSA we care about events have EventData
- */
- spin_lock_irqsave(&i2ob_evt_lock, flags);
- evt_local = (struct i2o_reply *)evt_msg;
- spin_unlock_irqrestore(&i2ob_evt_lock, flags);
-
- unit = le32_to_cpu(evt_local->header[3]);
- evt = le32_to_cpu(evt_local->evt_indicator);
-
- dev = &i2o_blk_dev[unit];
- switch (evt) {
- /*
- * New volume loaded on same TID, so we just re-install.
- * The TID/controller don't change as it is the same
- * I2O device. It's just new media that we have to
- * rescan.
- */
- case I2O_EVT_IND_BSA_VOLUME_LOAD:
- {
- i2ob_install_device(dev->i2o_device->iop,
- dev->i2o_device, unit);
- add_disk(dev->gendisk);
- break;
- }
-
- /*
- * No media, so set all parameters to 0 and set the media
- * change flag. The I2O device is still valid, just doesn't
- * have media, so we don't want to clear the controller or
- * device pointer.
- */
- case I2O_EVT_IND_BSA_VOLUME_UNLOAD:
- {
- struct gendisk *p = dev->gendisk;
- blk_queue_max_sectors(dev->gendisk->queue, 0);
- del_gendisk(p);
- put_disk(p);
- dev->gendisk = NULL;
- dev->media_change_flag = 1;
- break;
- }
-
- case I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ:
- printk(KERN_WARNING
- "%s: Attempt to eject locked media\n",
- dev->i2o_device->dev_name);
- break;
-
- /*
- * The capacity has changed and we are going to be
- * updating the max_sectors and other information
- * about this disk. We try a revalidate first. If
- * the block device is in use, we don't want to
- * do that as there may be I/Os bound for the disk
- * at the moment. In that case we read the size
- * from the device and update the information ourselves
- * and the user can later force a partition table
- * update through an ioctl.
- */
- case I2O_EVT_IND_BSA_CAPACITY_CHANGE:
- {
- u64 size;
-
- if (i2ob_query_device(dev, 0x0004, 0, &size, 8)
- != 0)
- i2ob_query_device(dev, 0x0000, 4, &size,
- 8);
-
- spin_lock_irqsave(dev->req_queue->queue_lock,
- flags);
- set_capacity(dev->gendisk, size >> 9);
- spin_unlock_irqrestore(dev->req_queue->
- queue_lock, flags);
- break;
- }
-
- /*
- * We got a SCSI SMART event, we just log the relevant
- * information and let the user decide what they want
- * to do with the information.
- */
- case I2O_EVT_IND_BSA_SCSI_SMART:
- {
- char buf[16];
- printk(KERN_INFO
- "I2O Block: %s received a SCSI SMART Event\n",
- dev->i2o_device->dev_name);
- evt_local->data[16] = '\0';
- sprintf(buf, "%s", &evt_local->data[0]);
- printk(KERN_INFO " Disk Serial#:%s\n",
- buf);
- printk(KERN_INFO " ASC 0x%02x \n",
- evt_local->ASC);
- printk(KERN_INFO " ASCQ 0x%02x \n",
- evt_local->ASCQ);
- break;
- }
-
- /*
- * Non event
- */
-
- case 0:
- break;
-
- /*
- * An event we didn't ask for. Call the card manufacturer
- * and tell them to fix their firmware :)
- */
-
- case 0x20:
- /*
- * If a promise card reports 0x20 event then the brown stuff
- * hit the fan big time. The card seems to recover but loses
- * the pending writes. Deeply ungood except for testing fsck
- */
- if (dev->i2o_device->iop->promise)
- panic
- ("I2O controller firmware failed. Reboot and force a filesystem check.\n");
- default:
- printk(KERN_INFO
- "%s: Received event 0x%X we didn't register for\n"
- KERN_INFO
- " Blame the I2O card manufacturer 8)\n",
- dev->i2o_device->dev_name, evt);
- break;
- }
- };
-
- complete_and_exit(&i2ob_thread_dead, 0);
- return 0;
-}
-#endif
-
/*
* SCSI-CAM for ioctl geometry mapping
* Duplicated with SCSI - this should be moved into somewhere common
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 4c240102a50e..353762860251 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -74,96 +74,6 @@ struct i2o_cfg_info {
static struct i2o_cfg_info *open_files = NULL;
static ulong i2o_cfg_info_id = 0;
-#if 0
-/*
- * This is the callback for any message we have posted. The message itself
- * will be returned to the message pool when we return from the IRQ
- *
- * This runs in irq context so be short and sweet.
- */
-static void i2o_cfg_reply(struct i2o_handler *h, struct i2o_controller *c,
- struct i2o_message *m)
-{
- u32 *msg = (u32 *) m;
-
- if (msg[0] & MSG_FAIL) {
- u32 *preserved_msg = (u32 *) (c->msg_virt + msg[7]);
-
- printk(KERN_ERR "i2o_config: IOP failed to process the msg.\n");
-
- /* Release the preserved msg frame by resubmitting it as a NOP */
-
- preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0;
- preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0;
- preserved_msg[2] = 0;
- i2o_post_message(c, msg[7]);
- }
-
- if (msg[4] >> 24) // ReqStatus != SUCCESS
- i2o_report_status(KERN_INFO, "i2o_config", msg);
-
- if (m->function == I2O_CMD_UTIL_EVT_REGISTER) {
- struct i2o_cfg_info *inf;
-
- for (inf = open_files; inf; inf = inf->next)
- if (inf->q_id == i2o_cntxt_list_get(c, msg[3]))
- break;
-
- //
- // If this is the case, it means that we're getting
- // events for a file descriptor that's been close()'d
- // w/o the user unregistering for events first.
- // The code currently assumes that the user will
- // take care of unregistering for events before closing
- // a file.
- //
- // TODO:
- // Should we track event registartion and deregister
- // for events when a file is close()'d so this doesn't
- // happen? That would get rid of the search through
- // the linked list since file->private_data could point
- // directly to the i2o_config_info data structure...but
- // it would mean having all sorts of tables to track
- // what each file is registered for...I think the
- // current method is simpler. - DS
- //
- if (!inf)
- return;
-
- inf->event_q[inf->q_in].id.iop = c->unit;
- inf->event_q[inf->q_in].id.tid = m->target_tid;
- inf->event_q[inf->q_in].id.evt_mask = msg[4];
-
- //
- // Data size = msg size - reply header
- //
- inf->event_q[inf->q_in].data_size = (m->size - 5) * 4;
- if (inf->event_q[inf->q_in].data_size)
- memcpy(inf->event_q[inf->q_in].evt_data,
- (unsigned char *)(msg + 5),
- inf->event_q[inf->q_in].data_size);
-
- spin_lock(&i2o_config_lock);
- MODINC(inf->q_in, I2O_EVT_Q_LEN);
- if (inf->q_len == I2O_EVT_Q_LEN) {
- MODINC(inf->q_out, I2O_EVT_Q_LEN);
- inf->q_lost++;
- } else {
- // Keep I2OEVTGET on another CPU from touching this
- inf->q_len++;
- }
- spin_unlock(&i2o_config_lock);
-
-// printk(KERN_INFO "File %p w/id %d has %d events\n",
-// inf->fp, inf->q_id, inf->q_len);
-
- kill_fasync(&inf->fasync, SIGIO, POLL_IN);
- }
-
- return;
-}
-#endif
-
/*
* Each of these describes an i2o message handler. They are
* multiplexed by the i2o_core code
@@ -388,7 +298,7 @@ static int i2o_cfg_swdl(unsigned long arg)
writel(0xD0000000 | fragsize, &msg->body[3]);
writel(buffer.phys, &msg->body[4]);
-// printk("i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
+// printk(KERN_INFO "i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
if (status != -ETIMEDOUT)
@@ -461,7 +371,7 @@ static int i2o_cfg_swul(unsigned long arg)
writel(0xD0000000 | fragsize, &msg->body[3]);
writel(buffer.phys, &msg->body[4]);
-// printk("i2o_config: swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
+// printk(KERN_INFO "i2o_config: swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
if (status != I2O_POST_WAIT_OK) {
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index a535c7a1f66f..25e9e3df3075 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -938,11 +938,6 @@ int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
seq_printf(seq, " ");
}
-#if 0
- if (c->i2oversion == 0x02)
- seq_printf(seq, "%-d", dst->module_state);
-#endif
-
seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
seq_printf(seq, "%-#8x", dst->module_id);
seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
@@ -950,10 +945,6 @@ int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
seq_printf(seq, "%8d ", dst->module_size);
seq_printf(seq, "%8d ", dst->mpb_size);
seq_printf(seq, "0x%04x", dst->module_flags);
-#if 0
- if (c->i2oversion == 0x02)
- seq_printf(seq, "%d", dst->notification_level);
-#endif
seq_printf(seq, "\n");
}
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 9be68f380b1b..7186d004ec1d 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -274,53 +274,6 @@ static const char *i2o_scsi_info(struct Scsi_Host *SChost)
return hostdata->iop->name;
}
-#if 0
-/**
- * i2o_retry_run - retry on timeout
- * @f: unused
- *
- * Retry congested frames. This actually needs pushing down into
- * i2o core. We should only bother the OSM with this when we can't
- * queue and retry the frame. Or perhaps we should call the OSM
- * and its default handler should be this in the core, and this
- * call a 2nd "I give up" handler in the OSM ?
- */
-
-static void i2o_retry_run(unsigned long f)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&retry_lock, flags);
- for (i = 0; i < retry_ct; i++)
- i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));
- retry_ct = 0;
- spin_unlock_irqrestore(&retry_lock, flags);
-}
-
-/**
- * flush_pending - empty the retry queue
- *
- * Turn each of the pending commands into a NOP and post it back
- * to the controller to clear it.
- */
-
-static void flush_pending(void)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&retry_lock, flags);
- for (i = 0; i < retry_ct; i++) {
- retry[i][0] &= ~0xFFFFFF;
- retry[i][0] |= I2O_CMD_UTIL_NOP << 24;
- i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));
- }
- retry_ct = 0;
- spin_unlock_irqrestore(&retry_lock, flags);
-}
-#endif
-
/**
* i2o_scsi_reply - SCSI OSM message reply handler
* @c: controller issuing the reply
@@ -343,38 +296,41 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
struct device *dev;
u8 as, ds, st;
- cmd = i2o_cntxt_list_get(c, readl(&msg->u.s.tcntxt));
+ cmd = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
if (msg->u.head[0] & (1 << 13)) {
struct i2o_message *pmsg; /* preserved message */
u32 pm;
+ int err = DID_ERROR;
- pm = readl(&msg->body[3]);
+ pm = le32_to_cpu(msg->body[3]);
- pmsg = c->in_queue.virt + pm;
+ pmsg = i2o_msg_in_to_virt(c, pm);
- printk("IOP fail.\n");
- printk("From %d To %d Cmd %d.\n",
+ printk(KERN_ERR "IOP fail.\n");
+ printk(KERN_ERR "From %d To %d Cmd %d.\n",
(msg->u.head[1] >> 12) & 0xFFF,
msg->u.head[1] & 0xFFF, msg->u.head[1] >> 24);
- printk("Failure Code %d.\n", msg->body[0] >> 24);
+ printk(KERN_ERR "Failure Code %d.\n", msg->body[0] >> 24);
if (msg->body[0] & (1 << 16))
- printk("Format error.\n");
+ printk(KERN_ERR "Format error.\n");
if (msg->body[0] & (1 << 17))
- printk("Path error.\n");
+ printk(KERN_ERR "Path error.\n");
if (msg->body[0] & (1 << 18))
- printk("Path State.\n");
+ printk(KERN_ERR "Path State.\n");
if (msg->body[0] & (1 << 18))
- printk("Congestion.\n");
+ {
+ printk(KERN_ERR "Congestion.\n");
+ err = DID_BUS_BUSY;
+ }
- printk("Failing message is %p.\n", pmsg);
+ printk(KERN_DEBUG "Failing message is %p.\n", pmsg);
cmd = i2o_cntxt_list_get(c, readl(&pmsg->u.s.tcntxt));
if (!cmd)
return 1;
- printk("Aborted %ld\n", cmd->serial_number);
- cmd->result = DID_ERROR << 16;
+ cmd->result = err << 16;
cmd->scsi_done(cmd);
/* Now flush the message by making it a NOP */
@@ -387,9 +343,9 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
* Low byte is device status, next is adapter status,
* (then one byte reserved), then request status.
*/
- ds = (u8) readl(&msg->body[0]);
- as = (u8) (readl(&msg->body[0]) >> 8);
- st = (u8) (readl(&msg->body[0]) >> 24);
+ ds = (u8) le32_to_cpu(msg->body[0]);
+ as = (u8) (le32_to_cpu(msg->body[0]) >> 8);
+ st = (u8) (le32_to_cpu(msg->body[0]) >> 24);
/*
* Is this a control request coming back - eg an abort ?
@@ -398,7 +354,7 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
if (!cmd) {
if (st)
printk(KERN_WARNING "SCSI abort: %08X",
- readl(&msg->body[0]));
+ le32_to_cpu(msg->body[0]));
printk(KERN_INFO "SCSI abort completed.\n");
return -EFAULT;
}
@@ -411,21 +367,22 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
switch (st) {
case 0x06:
- count = readl(&msg->body[1]);
+ count = le32_to_cpu(msg->body[1]);
if (count < cmd->underflow) {
int i;
printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X"
"\n", count, cmd->underflow);
- printk("Cmd: ");
+ printk(KERN_DEBUG "Cmd: ");
for (i = 0; i < 15; i++)
- printk("%02X ", cmd->cmnd[i]);
- printk(".\n");
+ printk(KERN_DEBUG "%02X ",
+ cmd->cmnd[i]);
+ printk(KERN_DEBUG ".\n");
cmd->result = (DID_ERROR << 16);
}
break;
default:
- error = readl(&msg->body[0]);
+ error = le32_to_cpu(msg->body[0]);
printk(KERN_ERR "scsi-osm: SCSI error %08x\n", error);
@@ -517,8 +474,7 @@ void i2o_scsi_notify_controller_add(struct i2o_controller *c)
rc = scsi_add_host(i2o_shost->scsi_host, &c->device);
if (rc) {
- printk(KERN_ERR "scsi-osm: Could not add SCSI "
- "host\n");
+ printk(KERN_ERR "scsi-osm: Could not add SCSI " "host\n");
scsi_host_put(i2o_shost->scsi_host);
return;
}
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 699723e3a3c3..46fa7dfd6eb1 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/i2o.h>
+#include <linux/delay.h>
/* global I2O controller list */
LIST_HEAD(i2o_controllers);
@@ -117,7 +118,7 @@ u32 i2o_msg_get_wait(struct i2o_controller *c, struct i2o_message **msg,
*
* Returns context id > 0 on success or 0 on failure.
*/
-u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
+u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
{
struct i2o_context_list_element *entry;
unsigned long flags;
@@ -162,7 +163,7 @@ u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
*
* Returns context id on succes or 0 on failure.
*/
-u32 i2o_cntxt_list_remove(struct i2o_controller *c, void *ptr)
+u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr)
{
struct i2o_context_list_element *entry;
u32 context = 0;
@@ -470,7 +471,7 @@ static int i2o_iop_reset(struct i2o_controller *c)
if (m == I2O_QUEUE_EMPTY)
return -ETIMEDOUT;
- memset(status, 0, 4);
+ memset(status, 0, 8);
/* Quiesce all IOPs first */
i2o_iop_quiesce_all();
@@ -495,6 +496,13 @@ static int i2o_iop_reset(struct i2o_controller *c)
rc = -ETIMEDOUT;
goto exit;
}
+
+ /* Promise bug */
+ if (status[1] || status[4]) {
+ *status = 0;
+ break;
+ }
+
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
@@ -605,6 +613,7 @@ int i2o_iop_init_outbound_queue(struct i2o_controller *c)
/* Post frames */
for (i = 0; i < NMBR_MSG_FRAMES; i++) {
i2o_flush_reply(c, m);
+ udelay(1); /* Promise */
m += MSG_FRAME_SIZE * 4;
}
@@ -612,6 +621,23 @@ int i2o_iop_init_outbound_queue(struct i2o_controller *c)
}
/**
+ * i2o_iop_send_nop - send a core NOP message
+ * @c: controller
+ *
+ * Send a no-operation message with a reply set to cause no
+ * action either. Needed for bringing up promise controllers.
+ */
+static int i2o_iop_send_nop(struct i2o_controller *c)
+{
+ struct i2o_message *msg;
+ u32 m = i2o_msg_get_wait(c, &msg, HZ);
+ if (m == I2O_QUEUE_EMPTY)
+ return -ETIMEDOUT;
+ i2o_msg_nop(c, m);
+ return 0;
+}
+
+/**
* i2o_iop_activate - Bring controller up to HOLD
* @c: controller
*
@@ -622,8 +648,27 @@ int i2o_iop_init_outbound_queue(struct i2o_controller *c)
*/
static int i2o_iop_activate(struct i2o_controller *c)
{
+ struct pci_dev *i960 = NULL;
i2o_status_block *sb = c->status_block.virt;
int rc;
+
+ if (c->promise) {
+ /* Beat up the hardware first of all */
+ i960 =
+ pci_find_slot(c->pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
+ if (i960)
+ pci_write_config_word(i960, 0x42, 0);
+
+ /* Follow this sequence precisely or the controller
+ ceases to perform useful functions until reboot */
+ if ((rc = i2o_iop_send_nop(c)))
+ return rc;
+
+ if ((rc = i2o_iop_reset(c)))
+ return rc;
+ }
+
/* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
/* In READY state, Get status */
@@ -659,13 +704,22 @@ static int i2o_iop_activate(struct i2o_controller *c)
if (rc)
return rc;
+ if (c->promise) {
+ if ((rc = i2o_iop_send_nop(c)))
+ return rc;
+
+ if ((rc = i2o_status_get(c)))
+ return rc;
+
+ if (i960)
+ pci_write_config_word(i960, 0x42, 0x3FF);
+ }
+
/* In HOLD state */
rc = i2o_hrt_get(c);
- if (rc)
- return rc;
- return 0;
+ return rc;
};
/**
@@ -691,10 +745,11 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
res->flags = IORESOURCE_MEM;
res->start = 0;
res->end = 0;
- printk("%s: requires private memory resources.\n", c->name);
+ printk(KERN_INFO "%s: requires private memory resources.\n",
+ c->name);
root = pci_find_parent_resource(c->pdev, res);
if (root == NULL)
- printk("Can't find parent resource!\n");
+ printk(KERN_WARNING "Can't find parent resource!\n");
if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
NULL, NULL) >= 0) {
c->mem_alloc = 1;
@@ -712,10 +767,11 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
res->flags = IORESOURCE_IO;
res->start = 0;
res->end = 0;
- printk("%s: requires private memory resources.\n", c->name);
+ printk(KERN_INFO "%s: requires private memory resources.\n",
+ c->name);
root = pci_find_parent_resource(c->pdev, res);
if (root == NULL)
- printk("Can't find parent resource!\n");
+ printk(KERN_WARNING "Can't find parent resource!\n");
if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
NULL, NULL) >= 0) {
c->io_alloc = 1;
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index 9ee58b6cf55b..f98849abbe2a 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -138,13 +138,13 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c)
* If we know what card it is, set the size
* correctly. Code is taken from dpt_i2o.c
*/
- if(pdev->device == 0xa501) {
- if(pdev->subsystem_device >= 0xc032 &&
- pdev->subsystem_device <= 0xc03b) {
- if(c->base.len > 0x400000)
+ if (pdev->device == 0xa501) {
+ if (pdev->subsystem_device >= 0xc032 &&
+ pdev->subsystem_device <= 0xc03b) {
+ if (c->base.len > 0x400000)
c->base.len = 0x400000;
} else {
- if(c->base.len > 0x100000)
+ if (c->base.len > 0x100000)
c->base.len = 0x100000;
}
}
@@ -231,7 +231,7 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c)
}
#endif
- if (i2o_dma_alloc(dev, &c->status, 4, GFP_KERNEL)) {
+ if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) {
i2o_pci_free(c);
return -ENOMEM;
}
@@ -277,7 +277,6 @@ static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
struct device *dev = &c->pdev->dev;
struct i2o_message *m;
u32 mv;
- u32 *msg;
/*
* Old 960 steppings had a bug in the I2O unit that caused
@@ -298,11 +297,7 @@ static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
* Because bus_to_virt is deprecated, we have calculate the
* location by ourself!
*/
- m = (struct i2o_message *)(mv -
- (unsigned long)c->out_queue.phys +
- (unsigned long)c->out_queue.virt);
-
- msg = (u32 *) m;
+ m = i2o_msg_out_to_virt(c, mv);
/*
* Ensure this message is seen coherently but cachably by
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index 409b75442bbf..3d88ad622bdb 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -1160,7 +1160,7 @@ static struct compressor ppp_bsd_compress = {
* Module support routines
*************************************************************/
-int __init bsdcomp_init(void)
+static int __init bsdcomp_init(void)
{
int answer = ppp_register_compressor(&ppp_bsd_compress);
if (answer == 0)
@@ -1168,7 +1168,7 @@ int __init bsdcomp_init(void)
return answer;
}
-void __exit bsdcomp_cleanup(void)
+static void __exit bsdcomp_cleanup(void)
{
ppp_unregister_compressor(&ppp_bsd_compress);
}
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 4beb7ac2a861..94191fdb8e13 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -37,7 +37,6 @@
#include <linux/rtnetlink.h>
#include <linux/sockios.h>
#include <linux/workqueue.h>
-#include <linux/version.h>
#include <asm/atomic.h>
#include <asm/bitops.h>
#include <asm/dma.h>
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 81282d94f91d..5f26d9ff30e9 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -168,6 +168,7 @@ enum StirTestMask {
struct stir_cb {
struct usb_device *usbdev; /* init: probe_irda */
+ struct usb_interface *usbintf;
struct net_device *netdev; /* network layer */
struct irlap_cb *irlap; /* The link layer we are binded to */
struct net_device_stats stats; /* network statistics */
@@ -508,6 +509,7 @@ static int change_speed(struct stir_cb *stir, unsigned speed)
{
int i, err;
__u8 mode;
+ int rc;
for (i = 0; i < ARRAY_SIZE(stir_modes); ++i) {
if (speed == stir_modes[i].speed)
@@ -521,7 +523,14 @@ static int change_speed(struct stir_cb *stir, unsigned speed)
pr_debug("speed change from %d to %d\n", stir->speed, speed);
/* sometimes needed to get chip out of stuck state */
+ rc = usb_lock_device_for_reset(stir->usbdev, stir->usbintf);
+ if (rc < 0) {
+ err = rc;
+ goto out;
+ }
err = usb_reset_device(stir->usbdev);
+ if (rc)
+ usb_unlock_device(stir->usbdev);
if (err)
goto out;
@@ -1066,6 +1075,7 @@ static int stir_probe(struct usb_interface *intf,
stir = net->priv;
stir->netdev = net;
stir->usbdev = dev;
+ stir->usbintf = intf;
ret = usb_reset_configuration(dev);
if (ret != 0) {
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 8c08255899ea..f65b0db111b8 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -98,6 +98,7 @@ static char *version =
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/delay.h>
#include <asm/system.h>
#include <asm/bitops.h>
@@ -308,8 +309,7 @@ void __init reset_chip(struct net_device *dev)
writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
/* wait 30 ms */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(30*HZ/1000);
+ msleep_interruptible(30);
/* Wait until the chip is reset */
reset_start_time = jiffies;
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index c51291a3c40a..df75c94e1d0c 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -636,7 +636,7 @@ struct compressor ppp_deflate_draft = {
.owner = THIS_MODULE
};
-int __init deflate_init(void)
+static int __init deflate_init(void)
{
int answer = ppp_register_compressor(&ppp_deflate);
if (answer == 0)
@@ -646,7 +646,7 @@ int __init deflate_init(void)
return answer;
}
-void __exit deflate_cleanup(void)
+static void __exit deflate_cleanup(void)
{
ppp_unregister_compressor(&ppp_deflate);
ppp_unregister_compressor(&ppp_deflate_draft);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index faaf5d04c05b..cf2b3ee2ce3f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -827,6 +827,236 @@ static void __init quirk_sis_96x_smbus(struct pci_dev *dev)
pci_read_config_byte(dev, 0x77, &val);
}
+
+#define UHCI_USBLEGSUP 0xc0 /* legacy support */
+#define UHCI_USBCMD 0 /* command register */
+#define UHCI_USBSTS 2 /* status register */
+#define UHCI_USBINTR 4 /* interrupt register */
+#define UHCI_USBLEGSUP_DEFAULT 0x2000 /* only PIRQ enable set */
+#define UHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */
+#define UHCI_USBCMD_GRESET (1 << 2) /* Global reset */
+#define UHCI_USBCMD_CONFIGURE (1 << 6) /* config semaphore */
+#define UHCI_USBSTS_HALTED (1 << 5) /* HCHalted bit */
+
+#define OHCI_CONTROL 0x04
+#define OHCI_CMDSTATUS 0x08
+#define OHCI_INTRSTATUS 0x0c
+#define OHCI_INTRENABLE 0x10
+#define OHCI_INTRDISABLE 0x14
+#define OHCI_OCR (1 << 3) /* ownership change request */
+#define OHCI_CTRL_IR (1 << 8) /* interrupt routing */
+#define OHCI_INTR_OC (1 << 30) /* ownership change */
+
+#define EHCI_HCC_PARAMS 0x08 /* extended capabilities */
+#define EHCI_USBCMD 0 /* command register */
+#define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */
+#define EHCI_USBSTS 4 /* status register */
+#define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */
+#define EHCI_USBINTR 8 /* interrupt register */
+#define EHCI_USBLEGSUP 0 /* legacy support register */
+#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */
+#define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */
+#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
+#define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */
+
+int usb_early_handoff __initdata = 0;
+static int __init usb_handoff_early(char *str)
+{
+ usb_early_handoff = 1;
+ return 0;
+}
+__setup("usb-handoff", usb_handoff_early);
+
+static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev)
+{
+ unsigned long base = 0;
+ int wait_time, delta;
+ u16 val, sts;
+ int i;
+
+ for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
+ base = pci_resource_start(pdev, i);
+ break;
+ }
+
+ if (!base)
+ return;
+
+ /*
+ * stop controller
+ */
+ sts = inw(base + UHCI_USBSTS);
+ val = inw(base + UHCI_USBCMD);
+ val &= ~(u16)(UHCI_USBCMD_RUN | UHCI_USBCMD_CONFIGURE);
+ outw(val, base + UHCI_USBCMD);
+
+ /*
+ * wait while it stops if it was running
+ */
+ if ((sts & UHCI_USBSTS_HALTED) == 0)
+ {
+ wait_time = 1000;
+ delta = 100;
+
+ do {
+ outw(0x1f, base + UHCI_USBSTS);
+ udelay(delta);
+ wait_time -= delta;
+ val = inw(base + UHCI_USBSTS);
+ if (val & UHCI_USBSTS_HALTED)
+ break;
+ } while (wait_time > 0);
+ }
+
+ /*
+ * disable interrupts & legacy support
+ */
+ outw(0, base + UHCI_USBINTR);
+ outw(0x1f, base + UHCI_USBSTS);
+ pci_read_config_word(pdev, UHCI_USBLEGSUP, &val);
+ if (val & 0xbf)
+ pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_DEFAULT);
+
+}
+
+static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
+{
+ void __iomem *base;
+ int wait_time;
+
+ base = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (base == NULL) return;
+
+ if (readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
+ wait_time = 500; /* 0.5 seconds */
+ writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
+ writel(OHCI_OCR, base + OHCI_CMDSTATUS);
+ while (wait_time > 0 &&
+ readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
+ wait_time -= 10;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout((HZ*10 + 999) / 1000);
+ }
+ }
+
+ /*
+ * disable interrupts
+ */
+ writel(~(u32)0, base + OHCI_INTRDISABLE);
+ writel(~(u32)0, base + OHCI_INTRSTATUS);
+
+ iounmap(base);
+}
+
+static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
+{
+ int wait_time, delta;
+ void __iomem *base, *op_reg_base;
+ u32 hcc_params, val, temp;
+ u8 cap_length;
+
+ base = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (base == NULL) return;
+
+ cap_length = readb(base);
+ op_reg_base = base + cap_length;
+ hcc_params = readl(base + EHCI_HCC_PARAMS);
+ hcc_params = (hcc_params >> 8) & 0xff;
+ if (hcc_params) {
+ pci_read_config_dword(pdev,
+ hcc_params + EHCI_USBLEGSUP,
+ &val);
+ if (((val & 0xff) == 1) && (val & EHCI_USBLEGSUP_BIOS)) {
+ /*
+ * Ok, BIOS is in smm mode, try to hand off...
+ */
+ pci_read_config_dword(pdev,
+ hcc_params + EHCI_USBLEGCTLSTS,
+ &temp);
+ pci_write_config_dword(pdev,
+ hcc_params + EHCI_USBLEGCTLSTS,
+ temp | EHCI_USBLEGCTLSTS_SOOE);
+ val |= EHCI_USBLEGSUP_OS;
+ pci_write_config_dword(pdev,
+ hcc_params + EHCI_USBLEGSUP,
+ val);
+
+ wait_time = 500;
+ do {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout((HZ*10+999)/1000);
+ wait_time -= 10;
+ pci_read_config_dword(pdev,
+ hcc_params + EHCI_USBLEGSUP,
+ &val);
+ } while (wait_time && (val & EHCI_USBLEGSUP_BIOS));
+ if (!wait_time) {
+ /*
+ * well, possibly buggy BIOS...
+ */
+ printk(KERN_WARNING "EHCI early BIOS handoff "
+ "failed (BIOS bug ?)\n");
+ pci_write_config_dword(pdev,
+ hcc_params + EHCI_USBLEGSUP,
+ EHCI_USBLEGSUP_OS);
+ pci_write_config_dword(pdev,
+ hcc_params + EHCI_USBLEGCTLSTS,
+ 0);
+ }
+ }
+ }
+
+ /*
+ * halt EHCI & disable its interrupts in any case
+ */
+ val = readl(op_reg_base + EHCI_USBSTS);
+ if ((val & EHCI_USBSTS_HALTED) == 0) {
+ val = readl(op_reg_base + EHCI_USBCMD);
+ val &= ~EHCI_USBCMD_RUN;
+ writel(val, op_reg_base + EHCI_USBCMD);
+
+ wait_time = 2000;
+ delta = 100;
+ do {
+ writel(0x3f, op_reg_base + EHCI_USBSTS);
+ udelay(delta);
+ wait_time -= delta;
+ val = readl(op_reg_base + EHCI_USBSTS);
+ if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
+ break;
+ }
+ } while (wait_time > 0);
+ }
+ writel(0, op_reg_base + EHCI_USBINTR);
+ writel(0x3f, op_reg_base + EHCI_USBSTS);
+
+ iounmap(base);
+
+ return;
+}
+
+
+
+static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
+{
+ if (!usb_early_handoff)
+ return;
+
+ if (pdev->class == ((PCI_CLASS_SERIAL_USB << 8) | 0x00)) { /* UHCI */
+ quirk_usb_handoff_uhci(pdev);
+ } else if (pdev->class == ((PCI_CLASS_SERIAL_USB << 8) | 0x10)) { /* OHCI */
+ quirk_usb_handoff_ohci(pdev);
+ } else if (pdev->class == ((PCI_CLASS_SERIAL_USB << 8) | 0x20)) { /* EHCI */
+ quirk_usb_disable_ehci(pdev);
+ }
+
+ return;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
+
/*
* ... This is further complicated by the fact that some SiS96x south
* bridges pretend to be 85C503/5513 instead. In that case see if we
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 2249b78487bd..b3714fbd0083 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1595,8 +1595,8 @@ dasd_alloc_queue(struct dasd_device * device)
device->request_queue->queuedata = device;
#if 0
- elevator_exit(device->request_queue);
- rc = elevator_init(device->request_queue, &elevator_noop);
+ elevator_exit(device->request_queue->elevator);
+ rc = elevator_init(device->request_queue, "noop");
if (rc) {
blk_cleanup_queue(device->request_queue);
return rc;
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index b7f4e7b8be74..1efc9f21229e 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -225,8 +225,8 @@ tapeblock_setup_device(struct tape_device * device)
if (!blkdat->request_queue)
return -ENOMEM;
- elevator_exit(blkdat->request_queue);
- rc = elevator_init(blkdat->request_queue, &elevator_noop);
+ elevator_exit(blkdat->request_queue->elevator);
+ rc = elevator_init(blkdat->request_queue, "noop");
if (rc)
goto cleanup_queue;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index b6c76b5680a2..da06adf48834 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -377,6 +377,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
return 0;
SCpnt->cmnd[0] = WRITE_10;
SCpnt->sc_data_direction = DMA_TO_DEVICE;
+ cd->cdi.media_written = 1;
} else if (rq_data_dir(SCpnt->request) == READ) {
SCpnt->cmnd[0] = READ_10;
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
@@ -875,10 +876,10 @@ static void get_capabilities(struct scsi_cd *cd)
cd->cdi.mask |= CDC_CLOSE_TRAY; */
/*
- * if DVD-RAM of MRW-W, we are randomly writeable
+ * if DVD-RAM, MRW-W or CD-RW, we are randomly writable
*/
- if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM)) !=
- (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM)) {
+ if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) !=
+ (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) {
cd->device->writeable = 1;
}
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 0e805e4f3aa1..7bb3fa4c38b6 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -7,7 +7,7 @@ menu "USB support"
# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
config USB
tristate "Support for Host-side USB"
- depends on PCI || SA1111 || ARCH_OMAP1510 || ARCH_OMAP1610 || ARCH_LH7A404
+ depends on PCI || SA1111 || ARCH_OMAP1510 || ARCH_OMAP1610 || ARCH_LH7A404 || PXA27x
---help---
Universal Serial Bus (USB) is a specification for a serial bus
subsystem which offers higher speeds and more features than the
@@ -91,6 +91,8 @@ source "drivers/usb/serial/Kconfig"
source "drivers/usb/misc/Kconfig"
+source "drivers/usb/atm/Kconfig"
+
source "drivers/usb/gadget/Kconfig"
endmenu
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index e3787f35cd21..63a7f2d80f83 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -62,8 +62,10 @@ obj-$(CONFIG_USB_LCD) += misc/
obj-$(CONFIG_USB_LED) += misc/
obj-$(CONFIG_USB_LEGOTOWER) += misc/
obj-$(CONFIG_USB_RIO500) += misc/
-obj-$(CONFIG_USB_SPEEDTOUCH) += misc/
obj-$(CONFIG_USB_TEST) += misc/
obj-$(CONFIG_USB_TIGL) += misc/
obj-$(CONFIG_USB_USS720) += misc/
obj-$(CONFIG_USB_PHIDGETSERVO) += misc/
+
+obj-$(CONFIG_USB_ATM) += atm/
+obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
diff --git a/drivers/usb/atm/Kconfig b/drivers/usb/atm/Kconfig
new file mode 100644
index 000000000000..0d9f5379b8cf
--- /dev/null
+++ b/drivers/usb/atm/Kconfig
@@ -0,0 +1,30 @@
+#
+# USB ATM driver configuration
+#
+comment "USB ATM/DSL drivers"
+ depends on USB
+
+config USB_ATM
+ tristate "Generic USB ATM/DSL core I/O support"
+ depends on USB && ATM
+ select CRC32
+ default n
+ help
+ This provides a library which is used for packet I/O by USB DSL
+ modems, such as the SpeedTouch driver below.
+
+ To compile this driver as a module, choose M here: the
+ module will be called usb_atm.
+
+config USB_SPEEDTOUCH
+ tristate "Alcatel Speedtouch USB support"
+ depends on USB && ATM
+ select USB_ATM
+ help
+ Say Y here if you have an Alcatel SpeedTouch USB or SpeedTouch 330
+ modem. In order to use your modem you will need to install the
+ two parts of the firmware, extracted by the user space tools; see
+ <http://www.linux-usb.org/SpeedTouch/> for details.
+
+ To compile this driver as a module, choose M here: the
+ module will be called speedtch.
diff --git a/drivers/usb/atm/Makefile b/drivers/usb/atm/Makefile
new file mode 100644
index 000000000000..9213b8b97587
--- /dev/null
+++ b/drivers/usb/atm/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the rest of the USB drivers
+# (the ones that don't fit into any other categories)
+#
+
+obj-$(CONFIG_USB_ATM) += usb_atm.o
+obj-$(CONFIG_USB_SPEEDTOUCH) += speedtch.o
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
new file mode 100644
index 000000000000..f17e576d12ec
--- /dev/null
+++ b/drivers/usb/atm/speedtch.c
@@ -0,0 +1,866 @@
+/******************************************************************************
+ * speedtch.c - Alcatel SpeedTouch USB xDSL modem driver
+ *
+ * Copyright (C) 2001, Alcatel
+ * Copyright (C) 2003, Duncan Sands
+ * Copyright (C) 2004, David Woodhouse
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/atm.h>
+#include <linux/atmdev.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+
+#include "usb_atm.h"
+
+/*
+#define DEBUG
+#define VERBOSE_DEBUG
+*/
+
+#if !defined (DEBUG) && defined (CONFIG_USB_DEBUG)
+# define DEBUG
+#endif
+
+#include <linux/usb.h>
+
+#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
+# define USE_FW_LOADER
+#endif
+
+#ifdef VERBOSE_DEBUG
+static int udsl_print_packet(const unsigned char *data, int len);
+#define PACKETDEBUG(arg...) udsl_print_packet (arg)
+#define vdbg(arg...) dbg (arg)
+#else
+#define PACKETDEBUG(arg...)
+#define vdbg(arg...)
+#endif
+
+#define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>"
+#define DRIVER_VERSION "1.8"
+#define DRIVER_DESC "Alcatel SpeedTouch USB driver version " DRIVER_VERSION
+
+static const char speedtch_driver_name[] = "speedtch";
+
+#define SPEEDTOUCH_VENDORID 0x06b9
+#define SPEEDTOUCH_PRODUCTID 0x4061
+
+/* Timeout in jiffies */
+#define CTRL_TIMEOUT (2*HZ)
+#define DATA_TIMEOUT (2*HZ)
+
+#define OFFSET_7 0 /* size 1 */
+#define OFFSET_b 1 /* size 8 */
+#define OFFSET_d 9 /* size 4 */
+#define OFFSET_e 13 /* size 1 */
+#define OFFSET_f 14 /* size 1 */
+#define TOTAL 15
+
+#define SIZE_7 1
+#define SIZE_b 8
+#define SIZE_d 4
+#define SIZE_e 1
+#define SIZE_f 1
+
+static int dl_512_first = 0;
+static int sw_buffering = 0;
+
+module_param(dl_512_first, bool, 0444);
+MODULE_PARM_DESC(dl_512_first, "Read 512 bytes before sending firmware");
+
+module_param(sw_buffering, uint, 0444);
+MODULE_PARM_DESC(sw_buffering, "Enable software buffering");
+
+#define UDSL_IOCTL_LINE_UP 1
+#define UDSL_IOCTL_LINE_DOWN 2
+
+#define SPEEDTCH_ENDPOINT_INT 0x81
+#define SPEEDTCH_ENDPOINT_DATA 0x07
+#define SPEEDTCH_ENDPOINT_FIRMWARE 0x05
+
+#define hex2int(c) ( (c >= '0') && (c <= '9') ? (c - '0') : ((c & 0xf) + 9) )
+
+static struct usb_device_id speedtch_usb_ids[] = {
+ {USB_DEVICE(SPEEDTOUCH_VENDORID, SPEEDTOUCH_PRODUCTID)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, speedtch_usb_ids);
+
+struct speedtch_instance_data {
+ struct udsl_instance_data u;
+
+ /* Status */
+ struct urb *int_urb;
+ unsigned char int_data[16];
+ struct work_struct poll_work;
+ struct timer_list poll_timer;
+};
+/* USB */
+
+static int speedtch_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id);
+static void speedtch_usb_disconnect(struct usb_interface *intf);
+static int speedtch_usb_ioctl(struct usb_interface *intf, unsigned int code,
+ void *user_data);
+static void speedtch_handle_int(struct urb *urb, struct pt_regs *regs);
+static void speedtch_poll_status(struct speedtch_instance_data *instance);
+
+static struct usb_driver speedtch_usb_driver = {
+ .owner = THIS_MODULE,
+ .name = speedtch_driver_name,
+ .probe = speedtch_usb_probe,
+ .disconnect = speedtch_usb_disconnect,
+ .ioctl = speedtch_usb_ioctl,
+ .id_table = speedtch_usb_ids,
+};
+
+/***************
+** firmware **
+***************/
+
+static void speedtch_got_firmware(struct speedtch_instance_data *instance,
+ int got_it)
+{
+ int err;
+ struct usb_interface *intf;
+
+ down(&instance->u.serialize); /* vs self, speedtch_firmware_start */
+ if (instance->u.status == UDSL_LOADED_FIRMWARE)
+ goto out;
+ if (!got_it) {
+ instance->u.status = UDSL_NO_FIRMWARE;
+ goto out;
+ }
+ if ((err = usb_set_interface(instance->u.usb_dev, 1, 1)) < 0) {
+ dbg("speedtch_got_firmware: usb_set_interface returned %d!", err);
+ instance->u.status = UDSL_NO_FIRMWARE;
+ goto out;
+ }
+
+ /* Set up interrupt endpoint */
+ intf = usb_ifnum_to_if(instance->u.usb_dev, 0);
+ if (intf && !usb_driver_claim_interface(&speedtch_usb_driver, intf, NULL)) {
+
+ instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (instance->int_urb) {
+
+ usb_fill_int_urb(instance->int_urb, instance->u.usb_dev,
+ usb_rcvintpipe(instance->u.usb_dev, SPEEDTCH_ENDPOINT_INT),
+ instance->int_data,
+ sizeof(instance->int_data),
+ speedtch_handle_int, instance, 50);
+ err = usb_submit_urb(instance->int_urb, GFP_KERNEL);
+ if (err) {
+ /* Doesn't matter; we'll poll anyway */
+ dbg("speedtch_got_firmware: Submission of interrupt URB failed %d", err);
+ usb_free_urb(instance->int_urb);
+ instance->int_urb = NULL;
+ usb_driver_release_interface(&speedtch_usb_driver, intf);
+ }
+ }
+ }
+ /* Start status polling */
+ mod_timer(&instance->poll_timer, jiffies + (1 * HZ));
+
+ instance->u.status = UDSL_LOADED_FIRMWARE;
+ tasklet_schedule(&instance->u.receive_tasklet);
+ out:
+ up(&instance->u.serialize);
+ wake_up_interruptible(&instance->u.firmware_waiters);
+}
+
+static int speedtch_set_swbuff(struct speedtch_instance_data *instance,
+ int state)
+{
+ struct usb_device *dev = instance->u.usb_dev;
+ int ret;
+
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x32, 0x40, state ? 0x01 : 0x00,
+ 0x00, NULL, 0, 100);
+ if (ret < 0) {
+ printk("Warning: %sabling SW buffering: usb_control_msg returned %d\n",
+ state ? "En" : "Dis", ret);
+ return ret;
+ }
+
+ dbg("speedtch_set_swbuff: %sbled SW buffering", state ? "En" : "Dis");
+ return 0;
+}
+
+static void speedtch_test_sequence(struct speedtch_instance_data *instance)
+{
+ struct usb_device *dev = instance->u.usb_dev;
+ unsigned char buf[10];
+ int ret;
+
+ /* URB 147 */
+ buf[0] = 0x1c;
+ buf[1] = 0x50;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x01, 0x40, 0x0b, 0x00, buf, 2, 100);
+ if (ret < 0)
+ printk(KERN_WARNING "%s failed on URB147: %d\n", __func__, ret);
+
+ /* URB 148 */
+ buf[0] = 0x32;
+ buf[1] = 0x00;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x01, 0x40, 0x02, 0x00, buf, 2, 100);
+ if (ret < 0)
+ printk(KERN_WARNING "%s failed on URB148: %d\n", __func__, ret);
+
+ /* URB 149 */
+ buf[0] = 0x01;
+ buf[1] = 0x00;
+ buf[2] = 0x01;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x01, 0x40, 0x03, 0x00, buf, 3, 100);
+ if (ret < 0)
+ printk(KERN_WARNING "%s failed on URB149: %d\n", __func__, ret);
+
+ /* URB 150 */
+ buf[0] = 0x01;
+ buf[1] = 0x00;
+ buf[2] = 0x01;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x01, 0x40, 0x04, 0x00, buf, 3, 100);
+ if (ret < 0)
+ printk(KERN_WARNING "%s failed on URB150: %d\n", __func__, ret);
+}
+
+static int speedtch_start_synchro(struct speedtch_instance_data *instance)
+{
+ struct usb_device *dev = instance->u.usb_dev;
+ unsigned char buf[2];
+ int ret;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x04, 0x00,
+ buf, sizeof(buf), CTRL_TIMEOUT);
+ if (ret < 0) {
+ printk(KERN_WARNING "SpeedTouch: Failed to start ADSL synchronisation: %d\n", ret);
+ return ret;
+ }
+
+ dbg("speedtch_start_synchro: modem prodded. %d Bytes returned: %02x %02x", ret, buf[0], buf[1]);
+ return 0;
+}
+
+static void speedtch_handle_int(struct urb *urb, struct pt_regs *regs)
+{
+ struct speedtch_instance_data *instance = urb->context;
+ unsigned int count = urb->actual_length;
+ int ret;
+
+ /* The magic interrupt for "up state" */
+ const static unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 };
+ /* The magic interrupt for "down state" */
+ const static unsigned char down_int[6] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated; clean up */
+ dbg("%s - urb shutting down with status: %d", __func__, urb->status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__, urb->status);
+ goto exit;
+ }
+
+ if (count < 6) {
+ dbg("%s - int packet too short", __func__);
+ goto exit;
+ }
+
+ if (!memcmp(up_int, instance->int_data, 6)) {
+ del_timer(&instance->poll_timer);
+ printk(KERN_NOTICE "DSL line goes up\n");
+ } else if (!memcmp(down_int, instance->int_data, 6)) {
+ printk(KERN_NOTICE "DSL line goes down\n");
+ } else {
+ int i;
+
+ printk(KERN_DEBUG "Unknown interrupt packet of %d bytes:", count);
+ for (i = 0; i < count; i++)
+ printk(" %02x", instance->int_data[i]);
+ printk("\n");
+ }
+ schedule_work(&instance->poll_work);
+
+ exit:
+ rmb();
+ if (!instance->int_urb)
+ return;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ err("%s - usb_submit_urb failed with result %d", __func__, ret);
+}
+
+static int speedtch_get_status(struct speedtch_instance_data *instance,
+ unsigned char *buf)
+{
+ struct usb_device *dev = instance->u.usb_dev;
+ int ret;
+
+ memset(buf, 0, TOTAL);
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x07, 0x00, buf + OFFSET_7, SIZE_7,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG 7 failed");
+ return ret;
+ }
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x0b, 0x00, buf + OFFSET_b, SIZE_b,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG B failed");
+ return ret;
+ }
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x0d, 0x00, buf + OFFSET_d, SIZE_d,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG D failed");
+ return ret;
+ }
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x01, 0xc0, 0x0e, 0x00, buf + OFFSET_e, SIZE_e,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG E failed");
+ return ret;
+ }
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x01, 0xc0, 0x0f, 0x00, buf + OFFSET_f, SIZE_f,
+ CTRL_TIMEOUT);
+ if (ret < 0) {
+ dbg("MSG F failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void speedtch_poll_status(struct speedtch_instance_data *instance)
+{
+ unsigned char buf[TOTAL];
+ int ret;
+
+ ret = speedtch_get_status(instance, buf);
+ if (ret) {
+ printk(KERN_WARNING
+ "SpeedTouch: Error %d fetching device status\n", ret);
+ return;
+ }
+
+ dbg("Line state %02x", buf[OFFSET_7]);
+
+ switch (buf[OFFSET_7]) {
+ case 0:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_LOST) {
+ instance->u.atm_dev->signal = ATM_PHY_SIG_LOST;
+ printk(KERN_NOTICE "ADSL line is down\n");
+ }
+ break;
+
+ case 0x08:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_UNKNOWN) {
+ instance->u.atm_dev->signal = ATM_PHY_SIG_UNKNOWN;
+ printk(KERN_NOTICE "ADSL line is blocked?\n");
+ }
+ break;
+
+ case 0x10:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_LOST) {
+ instance->u.atm_dev->signal = ATM_PHY_SIG_LOST;
+ printk(KERN_NOTICE "ADSL line is synchronising\n");
+ }
+ break;
+
+ case 0x20:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_FOUND) {
+ int down_speed = buf[OFFSET_b] | (buf[OFFSET_b + 1] << 8)
+ | (buf[OFFSET_b + 2] << 16) | (buf[OFFSET_b + 3] << 24);
+ int up_speed = buf[OFFSET_b + 4] | (buf[OFFSET_b + 5] << 8)
+ | (buf[OFFSET_b + 6] << 16) | (buf[OFFSET_b + 7] << 24);
+
+ if (!(down_speed & 0x0000ffff) &&
+ !(up_speed & 0x0000ffff)) {
+ down_speed >>= 16;
+ up_speed >>= 16;
+ }
+ instance->u.atm_dev->link_rate = down_speed * 1000 / 424;
+ instance->u.atm_dev->signal = ATM_PHY_SIG_FOUND;
+
+ printk(KERN_NOTICE
+ "ADSL line is up (%d Kib/s down | %d Kib/s up)\n",
+ down_speed, up_speed);
+ }
+ break;
+
+ default:
+ if (instance->u.atm_dev->signal != ATM_PHY_SIG_UNKNOWN) {
+ instance->u.atm_dev->signal = ATM_PHY_SIG_UNKNOWN;
+ printk(KERN_NOTICE "Unknown line state %02x\n", buf[OFFSET_7]);
+ }
+ break;
+ }
+}
+
+static void speedtch_timer_poll(unsigned long data)
+{
+ struct speedtch_instance_data *instance = (void *)data;
+
+ schedule_work(&instance->poll_work);
+ mod_timer(&instance->poll_timer, jiffies + (5 * HZ));
+}
+
+#ifdef USE_FW_LOADER
+static void speedtch_upload_firmware(struct speedtch_instance_data *instance,
+ const struct firmware *fw1,
+ const struct firmware *fw2)
+{
+ unsigned char *buffer;
+ struct usb_device *usb_dev = instance->u.usb_dev;
+ struct usb_interface *intf;
+ int actual_length, ret;
+ int offset;
+
+ dbg("speedtch_upload_firmware");
+
+ if (!(intf = usb_ifnum_to_if(usb_dev, 2))) {
+ dbg("speedtch_upload_firmware: interface not found!");
+ goto fail;
+ }
+
+ if (!(buffer = (unsigned char *)__get_free_page(GFP_KERNEL))) {
+ dbg("speedtch_upload_firmware: no memory for buffer!");
+ goto fail;
+ }
+
+ /* A user-space firmware loader may already have claimed interface #2 */
+ if ((ret =
+ usb_driver_claim_interface(&speedtch_usb_driver, intf, NULL)) < 0) {
+ dbg("speedtch_upload_firmware: interface in use (%d)!", ret);
+ goto fail_free;
+ }
+
+ /* URB 7 */
+ if (dl_512_first) { /* some modems need a read before writing the firmware */
+ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, 0x200, &actual_length, 2 * HZ);
+
+ if (ret < 0 && ret != -ETIMEDOUT)
+ dbg("speedtch_upload_firmware: read BLOCK0 from modem failed (%d)!", ret);
+ else
+ dbg("speedtch_upload_firmware: BLOCK0 downloaded (%d bytes)", ret);
+ }
+
+ /* URB 8 : both leds are static green */
+ for (offset = 0; offset < fw1->size; offset += PAGE_SIZE) {
+ int thislen = min_t(int, PAGE_SIZE, fw1->size - offset);
+ memcpy(buffer, fw1->data + offset, thislen);
+
+ ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, thislen, &actual_length, DATA_TIMEOUT);
+
+ if (ret < 0) {
+ dbg("speedtch_upload_firmware: write BLOCK1 to modem failed (%d)!", ret);
+ goto fail_release;
+ }
+ dbg("speedtch_upload_firmware: BLOCK1 uploaded (%d bytes)", fw1->size);
+ }
+
+ /* USB led blinking green, ADSL led off */
+
+ /* URB 11 */
+ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, 0x200, &actual_length, DATA_TIMEOUT);
+
+ if (ret < 0) {
+ dbg("speedtch_upload_firmware: read BLOCK2 from modem failed (%d)!", ret);
+ goto fail_release;
+ }
+ dbg("speedtch_upload_firmware: BLOCK2 downloaded (%d bytes)", actual_length);
+
+ /* URBs 12 to 139 - USB led blinking green, ADSL led off */
+ for (offset = 0; offset < fw2->size; offset += PAGE_SIZE) {
+ int thislen = min_t(int, PAGE_SIZE, fw2->size - offset);
+ memcpy(buffer, fw2->data + offset, thislen);
+
+ ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, thislen, &actual_length, DATA_TIMEOUT);
+
+ if (ret < 0) {
+ dbg("speedtch_upload_firmware: write BLOCK3 to modem failed (%d)!", ret);
+ goto fail_release;
+ }
+ }
+ dbg("speedtch_upload_firmware: BLOCK3 uploaded (%d bytes)", fw2->size);
+
+ /* USB led static green, ADSL led static red */
+
+ /* URB 142 */
+ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, SPEEDTCH_ENDPOINT_FIRMWARE),
+ buffer, 0x200, &actual_length, DATA_TIMEOUT);
+
+ if (ret < 0) {
+ dbg("speedtch_upload_firmware: read BLOCK4 from modem failed (%d)!", ret);
+ goto fail_release;
+ }
+
+ /* success */
+ dbg("speedtch_upload_firmware: BLOCK4 downloaded (%d bytes)", actual_length);
+
+ /* Delay to allow firmware to start up. We can do this here
+ because we're in our own kernel thread anyway. */
+ msleep(1000);
+
+ /* Enable software buffering, if requested */
+ if (sw_buffering)
+ speedtch_set_swbuff(instance, 1);
+
+ /* Magic spell; don't ask us what this does */
+ speedtch_test_sequence(instance);
+
+ /* Start modem synchronisation */
+ if (speedtch_start_synchro(instance))
+ dbg("speedtch_start_synchro: failed");
+
+ speedtch_got_firmware(instance, 1);
+
+ free_page((unsigned long)buffer);
+ return;
+
+ fail_release:
+ /* Only release interface #2 if uploading failed; we don't release it
+ we succeeded. This prevents the userspace tools from trying to load
+ the firmware themselves */
+ usb_driver_release_interface(&speedtch_usb_driver, intf);
+ fail_free:
+ free_page((unsigned long)buffer);
+ fail:
+ speedtch_got_firmware(instance, 0);
+}
+
+static int speedtch_find_firmware(struct speedtch_instance_data
+ *instance, int phase,
+ const struct firmware **fw_p)
+{
+ char buf[24];
+ const u16 bcdDevice = instance->u.usb_dev->descriptor.bcdDevice;
+ const u8 major_revision = bcdDevice >> 8;
+ const u8 minor_revision = bcdDevice & 0xff;
+
+ sprintf(buf, "speedtch-%d.bin.%x.%02x", phase, major_revision, minor_revision);
+ dbg("speedtch_find_firmware: looking for %s", buf);
+
+ if (request_firmware(fw_p, buf, &instance->u.usb_dev->dev)) {
+ sprintf(buf, "speedtch-%d.bin.%x", phase, major_revision);
+ dbg("speedtch_find_firmware: looking for %s", buf);
+
+ if (request_firmware(fw_p, buf, &instance->u.usb_dev->dev)) {
+ sprintf(buf, "speedtch-%d.bin", phase);
+ dbg("speedtch_find_firmware: looking for %s", buf);
+
+ if (request_firmware(fw_p, buf, &instance->u.usb_dev->dev)) {
+ dev_warn(&instance->u.usb_dev->dev, "no stage %d firmware found!", phase);
+ return -ENOENT;
+ }
+ }
+ }
+
+ dev_info(&instance->u.usb_dev->dev, "found stage %d firmware %s\n", phase, buf);
+
+ return 0;
+}
+
+static int speedtch_load_firmware(void *arg)
+{
+ const struct firmware *fw1, *fw2;
+ struct speedtch_instance_data *instance = arg;
+
+ BUG_ON(!instance);
+
+ daemonize("firmware/speedtch");
+
+ if (!speedtch_find_firmware(instance, 1, &fw1)) {
+ if (!speedtch_find_firmware(instance, 2, &fw2)) {
+ speedtch_upload_firmware(instance, fw1, fw2);
+ release_firmware(fw2);
+ }
+ release_firmware(fw1);
+ }
+
+ /* In case we failed, set state back to NO_FIRMWARE so that
+ another later attempt may work. Otherwise, we never actually
+ manage to recover if, for example, the firmware is on /usr and
+ we look for it too early. */
+ speedtch_got_firmware(instance, 0);
+
+ module_put(THIS_MODULE);
+ udsl_put_instance(&instance->u);
+ return 0;
+}
+#endif /* USE_FW_LOADER */
+
+static void speedtch_firmware_start(struct speedtch_instance_data *instance)
+{
+#ifdef USE_FW_LOADER
+ int ret;
+#endif
+
+ dbg("speedtch_firmware_start");
+
+ down(&instance->u.serialize); /* vs self, speedtch_got_firmware */
+
+ if (instance->u.status >= UDSL_LOADING_FIRMWARE) {
+ up(&instance->u.serialize);
+ return;
+ }
+
+ instance->u.status = UDSL_LOADING_FIRMWARE;
+ up(&instance->u.serialize);
+
+#ifdef USE_FW_LOADER
+ udsl_get_instance(&instance->u);
+ try_module_get(THIS_MODULE);
+
+ ret = kernel_thread(speedtch_load_firmware, instance,
+ CLONE_FS | CLONE_FILES);
+
+ if (ret >= 0)
+ return; /* OK */
+
+ dbg("speedtch_firmware_start: kernel_thread failed (%d)!", ret);
+
+ module_put(THIS_MODULE);
+ udsl_put_instance(&instance->u);
+ /* Just pretend it never happened... hope modem_run happens */
+#endif /* USE_FW_LOADER */
+
+ speedtch_got_firmware(instance, 0);
+}
+
+static int speedtch_firmware_wait(struct udsl_instance_data *instance)
+{
+ speedtch_firmware_start((void *)instance);
+
+ if (wait_event_interruptible(instance->firmware_waiters, instance->status != UDSL_LOADING_FIRMWARE) < 0)
+ return -ERESTARTSYS;
+
+ return (instance->status == UDSL_LOADED_FIRMWARE) ? 0 : -EAGAIN;
+}
+
+/**********
+** USB **
+**********/
+
+static int speedtch_usb_ioctl(struct usb_interface *intf, unsigned int code,
+ void *user_data)
+{
+ struct speedtch_instance_data *instance = usb_get_intfdata(intf);
+
+ dbg("speedtch_usb_ioctl entered");
+
+ if (!instance) {
+ dbg("speedtch_usb_ioctl: NULL instance!");
+ return -ENODEV;
+ }
+
+ switch (code) {
+ case UDSL_IOCTL_LINE_UP:
+ instance->u.atm_dev->signal = ATM_PHY_SIG_FOUND;
+ speedtch_got_firmware(instance, 1);
+ return (instance->u.status == UDSL_LOADED_FIRMWARE) ? 0 : -EIO;
+ case UDSL_IOCTL_LINE_DOWN:
+ instance->u.atm_dev->signal = ATM_PHY_SIG_LOST;
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int speedtch_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ int ifnum = intf->altsetting->desc.bInterfaceNumber;
+ struct speedtch_instance_data *instance;
+ unsigned char mac_str[13];
+ int ret, i;
+ char buf7[SIZE_7];
+
+ dbg("speedtch_usb_probe: trying device with vendor=0x%x, product=0x%x, ifnum %d", dev->descriptor.idVendor, dev->descriptor.idProduct, ifnum);
+
+ if ((dev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) ||
+ (dev->descriptor.idVendor != SPEEDTOUCH_VENDORID) ||
+ (dev->descriptor.idProduct != SPEEDTOUCH_PRODUCTID) || (ifnum != 1))
+ return -ENODEV;
+
+ dbg("speedtch_usb_probe: device accepted");
+
+ /* instance init */
+ instance = kmalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ dbg("speedtch_usb_probe: no memory for instance data!");
+ return -ENOMEM;
+ }
+
+ memset(instance, 0, sizeof(struct speedtch_instance_data));
+
+ if ((ret = usb_set_interface(dev, 0, 0)) < 0)
+ goto fail;
+
+ if ((ret = usb_set_interface(dev, 2, 0)) < 0)
+ goto fail;
+
+ instance->u.data_endpoint = SPEEDTCH_ENDPOINT_DATA;
+ instance->u.firmware_wait = speedtch_firmware_wait;
+ instance->u.driver_name = speedtch_driver_name;
+
+ ret = udsl_instance_setup(dev, &instance->u);
+ if (ret)
+ goto fail;
+
+ init_timer(&instance->poll_timer);
+ instance->poll_timer.function = speedtch_timer_poll;
+ instance->poll_timer.data = (unsigned long)instance;
+
+ INIT_WORK(&instance->poll_work, (void *)speedtch_poll_status, instance);
+
+ /* set MAC address, it is stored in the serial number */
+ memset(instance->u.atm_dev->esi, 0, sizeof(instance->u.atm_dev->esi));
+ if (usb_string(dev, dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) {
+ for (i = 0; i < 6; i++)
+ instance->u.atm_dev->esi[i] =
+ (hex2int(mac_str[i * 2]) * 16) + (hex2int(mac_str[i * 2 + 1]));
+ }
+
+ /* First check whether the modem already seems to be alive */
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x12, 0xc0, 0x07, 0x00, buf7, SIZE_7, HZ / 2);
+
+ if (ret == SIZE_7) {
+ dbg("firmware appears to be already loaded");
+ speedtch_got_firmware(instance, 1);
+ speedtch_poll_status(instance);
+ } else {
+ speedtch_firmware_start(instance);
+ }
+
+ usb_set_intfdata(intf, instance);
+
+ return 0;
+
+ fail:
+ kfree(instance);
+
+ return -ENOMEM;
+}
+
+static void speedtch_usb_disconnect(struct usb_interface *intf)
+{
+ struct speedtch_instance_data *instance = usb_get_intfdata(intf);
+
+ dbg("speedtch_usb_disconnect entered");
+
+ if (!instance) {
+ dbg("speedtch_usb_disconnect: NULL instance!");
+ return;
+ }
+
+/*QQ need to handle disconnects on interface #2 while uploading firmware */
+/*QQ and what about interface #1? */
+
+ if (instance->int_urb) {
+ struct urb *int_urb = instance->int_urb;
+ instance->int_urb = NULL;
+ wmb();
+ usb_unlink_urb(int_urb);
+ usb_free_urb(int_urb);
+ }
+
+ instance->int_data[0] = 1;
+ del_timer_sync(&instance->poll_timer);
+ wmb();
+ flush_scheduled_work();
+
+ udsl_instance_disconnect(&instance->u);
+
+ /* clean up */
+ usb_set_intfdata(intf, NULL);
+ udsl_put_instance(&instance->u);
+}
+
+/***********
+** init **
+***********/
+
+static int __init speedtch_usb_init(void)
+{
+ dbg("speedtch_usb_init: driver version " DRIVER_VERSION);
+
+ return usb_register(&speedtch_usb_driver);
+}
+
+static void __exit speedtch_usb_cleanup(void)
+{
+ dbg("speedtch_usb_cleanup entered");
+
+ usb_deregister(&speedtch_usb_driver);
+}
+
+module_init(speedtch_usb_init);
+module_exit(speedtch_usb_cleanup);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/usb/atm/usb_atm.c b/drivers/usb/atm/usb_atm.c
new file mode 100644
index 000000000000..8d1d365846a1
--- /dev/null
+++ b/drivers/usb/atm/usb_atm.c
@@ -0,0 +1,1205 @@
+/******************************************************************************
+ * usb_atm.c - Generic USB xDSL driver core
+ *
+ * Copyright (C) 2001, Alcatel
+ * Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas
+ * Copyright (C) 2004, David Woodhouse
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+/*
+ * Written by Johan Verrept, maintained by Duncan Sands (duncan.sands@free.fr)
+ *
+ * 1.7+: - See the check-in logs
+ *
+ * 1.6: - No longer opens a connection if the firmware is not loaded
+ * - Added support for the speedtouch 330
+ * - Removed the limit on the number of devices
+ * - Module now autoloads on device plugin
+ * - Merged relevant parts of sarlib
+ * - Replaced the kernel thread with a tasklet
+ * - New packet transmission code
+ * - Changed proc file contents
+ * - Fixed all known SMP races
+ * - Many fixes and cleanups
+ * - Various fixes by Oliver Neukum (oliver@neukum.name)
+ *
+ * 1.5A: - Version for inclusion in 2.5 series kernel
+ * - Modifications by Richard Purdie (rpurdie@rpsys.net)
+ * - made compatible with kernel 2.5.6 onwards by changing
+ * udsl_usb_send_data_context->urb to a pointer and adding code
+ * to alloc and free it
+ * - remove_wait_queue() added to udsl_atm_processqueue_thread()
+ *
+ * 1.5: - fixed memory leak when atmsar_decode_aal5 returned NULL.
+ * (reported by stephen.robinson@zen.co.uk)
+ *
+ * 1.4: - changed the spin_lock() under interrupt to spin_lock_irqsave()
+ * - unlink all active send urbs of a vcc that is being closed.
+ *
+ * 1.3.1: - added the version number
+ *
+ * 1.3: - Added multiple send urb support
+ * - fixed memory leak and vcc->tx_inuse starvation bug
+ * when not enough memory left in vcc.
+ *
+ * 1.2: - Fixed race condition in udsl_usb_send_data()
+ * 1.1: - Turned off packet debugging
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <asm/uaccess.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/atm.h>
+#include <linux/atmdev.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+
+#include "usb_atm.h"
+
+/*
+#define DEBUG
+#define VERBOSE_DEBUG
+*/
+
+#if !defined (DEBUG) && defined (CONFIG_USB_DEBUG)
+# define DEBUG
+#endif
+
+#include <linux/usb.h>
+
+#ifdef DEBUG
+#define UDSL_ASSERT(x) BUG_ON(!(x))
+#else
+#define UDSL_ASSERT(x) do { if (!(x)) warn("failed assertion '" #x "' at line %d", __LINE__); } while(0)
+#endif
+
+#ifdef VERBOSE_DEBUG
+static int udsl_print_packet(const unsigned char *data, int len);
+#define PACKETDEBUG(arg...) udsl_print_packet (arg)
+#define vdbg(arg...) dbg (arg)
+#else
+#define PACKETDEBUG(arg...)
+#define vdbg(arg...)
+#endif
+
+#define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>"
+#define DRIVER_VERSION "1.8"
+#define DRIVER_DESC "Alcatel SpeedTouch USB driver version " DRIVER_VERSION
+
+static unsigned int num_rcv_urbs = UDSL_DEFAULT_RCV_URBS;
+static unsigned int num_snd_urbs = UDSL_DEFAULT_SND_URBS;
+static unsigned int num_rcv_bufs = UDSL_DEFAULT_RCV_BUFS;
+static unsigned int num_snd_bufs = UDSL_DEFAULT_SND_BUFS;
+static unsigned int rcv_buf_size = UDSL_DEFAULT_RCV_BUF_SIZE;
+static unsigned int snd_buf_size = UDSL_DEFAULT_SND_BUF_SIZE;
+
+module_param(num_rcv_urbs, uint, 0444);
+MODULE_PARM_DESC(num_rcv_urbs,
+ "Number of urbs used for reception (range: 0-"
+ __MODULE_STRING(UDSL_MAX_RCV_URBS) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_RCV_URBS) ")");
+
+module_param(num_snd_urbs, uint, 0444);
+MODULE_PARM_DESC(num_snd_urbs,
+ "Number of urbs used for transmission (range: 0-"
+ __MODULE_STRING(UDSL_MAX_SND_URBS) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_SND_URBS) ")");
+
+module_param(num_rcv_bufs, uint, 0444);
+MODULE_PARM_DESC(num_rcv_bufs,
+ "Number of buffers used for reception (range: 0-"
+ __MODULE_STRING(UDSL_MAX_RCV_BUFS) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_RCV_BUFS) ")");
+
+module_param(num_snd_bufs, uint, 0444);
+MODULE_PARM_DESC(num_snd_bufs,
+ "Number of buffers used for transmission (range: 0-"
+ __MODULE_STRING(UDSL_MAX_SND_BUFS) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_SND_BUFS) ")");
+
+module_param(rcv_buf_size, uint, 0444);
+MODULE_PARM_DESC(rcv_buf_size,
+ "Size of the buffers used for reception (range: 0-"
+ __MODULE_STRING(UDSL_MAX_RCV_BUF_SIZE) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_RCV_BUF_SIZE) ")");
+
+module_param(snd_buf_size, uint, 0444);
+MODULE_PARM_DESC(snd_buf_size,
+ "Size of the buffers used for transmission (range: 0-"
+ __MODULE_STRING(UDSL_MAX_SND_BUF_SIZE) ", default: "
+ __MODULE_STRING(UDSL_DEFAULT_SND_BUF_SIZE) ")");
+
+/* ATM */
+
+static void udsl_atm_dev_close(struct atm_dev *dev);
+static int udsl_atm_open(struct atm_vcc *vcc);
+static void udsl_atm_close(struct atm_vcc *vcc);
+static int udsl_atm_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
+static int udsl_atm_send(struct atm_vcc *vcc, struct sk_buff *skb);
+static int udsl_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *page);
+
+static struct atmdev_ops udsl_atm_devops = {
+ .dev_close = udsl_atm_dev_close,
+ .open = udsl_atm_open,
+ .close = udsl_atm_close,
+ .ioctl = udsl_atm_ioctl,
+ .send = udsl_atm_send,
+ .proc_read = udsl_atm_proc_read,
+ .owner = THIS_MODULE,
+};
+
+/***********
+** misc **
+***********/
+
+static inline void udsl_pop(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ if (vcc->pop)
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb(skb);
+}
+
+/*************
+** decode **
+*************/
+
+static inline struct udsl_vcc_data *udsl_find_vcc(struct udsl_instance_data *instance,
+ short vpi, int vci)
+{
+ struct udsl_vcc_data *vcc;
+
+ list_for_each_entry(vcc, &instance->vcc_list, list)
+ if ((vcc->vci == vci) && (vcc->vpi == vpi))
+ return vcc;
+ return NULL;
+}
+
+static void udsl_extract_cells(struct udsl_instance_data *instance,
+ unsigned char *source, unsigned int howmany)
+{
+ struct udsl_vcc_data *cached_vcc = NULL;
+ struct atm_vcc *vcc;
+ struct sk_buff *sarb;
+ struct udsl_vcc_data *vcc_data;
+ int cached_vci = 0;
+ unsigned int i;
+ int pti;
+ int vci;
+ short cached_vpi = 0;
+ short vpi;
+
+ for (i = 0; i < howmany;
+ i++, source += ATM_CELL_SIZE + instance->rcv_padding) {
+ vpi = ((source[0] & 0x0f) << 4) | (source[1] >> 4);
+ vci = ((source[1] & 0x0f) << 12) | (source[2] << 4) | (source[3] >> 4);
+ pti = (source[3] & 0x2) != 0;
+
+ vdbg("udsl_extract_cells: vpi %hd, vci %d, pti %d", vpi, vci, pti);
+
+ if (cached_vcc && (vci == cached_vci) && (vpi == cached_vpi))
+ vcc_data = cached_vcc;
+ else if ((vcc_data = udsl_find_vcc(instance, vpi, vci))) {
+ cached_vcc = vcc_data;
+ cached_vpi = vpi;
+ cached_vci = vci;
+ } else {
+ dbg("udsl_extract_cells: unknown vpi/vci (%hd/%d)!", vpi, vci);
+ continue;
+ }
+
+ vcc = vcc_data->vcc;
+ sarb = vcc_data->sarb;
+
+ if (sarb->tail + ATM_CELL_PAYLOAD > sarb->end) {
+ dbg("udsl_extract_cells: buffer overrun (sarb->len %u, vcc: 0x%p)!", sarb->len, vcc);
+ /* discard cells already received */
+ skb_trim(sarb, 0);
+ }
+
+ memcpy(sarb->tail, source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
+ __skb_put(sarb, ATM_CELL_PAYLOAD);
+
+ if (pti) {
+ struct sk_buff *skb;
+ unsigned int length;
+ unsigned int pdu_length;
+
+ length = (source[ATM_CELL_SIZE - 6] << 8) + source[ATM_CELL_SIZE - 5];
+
+ /* guard against overflow */
+ if (length > ATM_MAX_AAL5_PDU) {
+ dbg("udsl_extract_cells: bogus length %u (vcc: 0x%p)!", length, vcc);
+ atomic_inc(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ pdu_length = UDSL_NUM_CELLS(length) * ATM_CELL_PAYLOAD;
+
+ if (sarb->len < pdu_length) {
+ dbg("udsl_extract_cells: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!", pdu_length, sarb->len, vcc);
+ atomic_inc(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ if (crc32_be(~0, sarb->tail - pdu_length, pdu_length) != 0xc704dd7b) {
+ dbg("udsl_extract_cells: packet failed crc check (vcc: 0x%p)!", vcc);
+ atomic_inc(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ vdbg("udsl_extract_cells: got packet (length: %u, pdu_length: %u, vcc: 0x%p)", length, pdu_length, vcc);
+
+ if (!(skb = dev_alloc_skb(length))) {
+ dbg("udsl_extract_cells: no memory for skb (length: %u)!", length);
+ atomic_inc(&vcc->stats->rx_drop);
+ goto out;
+ }
+
+ vdbg("udsl_extract_cells: allocated new sk_buff (skb: 0x%p, skb->truesize: %u)", skb, skb->truesize);
+
+ if (!atm_charge(vcc, skb->truesize)) {
+ dbg("udsl_extract_cells: failed atm_charge (skb->truesize: %u)!", skb->truesize);
+ dev_kfree_skb(skb);
+ goto out; /* atm_charge increments rx_drop */
+ }
+
+ memcpy(skb->data, sarb->tail - pdu_length, length);
+ __skb_put(skb, length);
+
+ vdbg("udsl_extract_cells: sending skb 0x%p, skb->len %u, skb->truesize %u", skb, skb->len, skb->truesize);
+
+ PACKETDEBUG(skb->data, skb->len);
+
+ vcc->push(vcc, skb);
+
+ atomic_inc(&vcc->stats->rx);
+ out:
+ skb_trim(sarb, 0);
+ }
+ }
+}
+
+/*************
+** encode **
+*************/
+
+static inline void udsl_fill_cell_header(unsigned char *target, struct atm_vcc *vcc)
+{
+ target[0] = vcc->vpi >> 4;
+ target[1] = (vcc->vpi << 4) | (vcc->vci >> 12);
+ target[2] = vcc->vci >> 4;
+ target[3] = vcc->vci << 4;
+ target[4] = 0xec;
+}
+
+static const unsigned char zeros[ATM_CELL_PAYLOAD];
+
+static void udsl_groom_skb(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct udsl_control *ctrl = UDSL_SKB(skb);
+ unsigned int zero_padding;
+ u32 crc;
+
+ ctrl->atm_data.vcc = vcc;
+
+ ctrl->num_cells = UDSL_NUM_CELLS(skb->len);
+ ctrl->num_entire = skb->len / ATM_CELL_PAYLOAD;
+
+ zero_padding = ctrl->num_cells * ATM_CELL_PAYLOAD - skb->len - ATM_AAL5_TRAILER;
+
+ if (ctrl->num_entire + 1 < ctrl->num_cells)
+ ctrl->pdu_padding = zero_padding - (ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER);
+ else
+ ctrl->pdu_padding = zero_padding;
+
+ ctrl->aal5_trailer[0] = 0; /* UU = 0 */
+ ctrl->aal5_trailer[1] = 0; /* CPI = 0 */
+ ctrl->aal5_trailer[2] = skb->len >> 8;
+ ctrl->aal5_trailer[3] = skb->len;
+
+ crc = crc32_be(~0, skb->data, skb->len);
+ crc = crc32_be(crc, zeros, zero_padding);
+ crc = crc32_be(crc, ctrl->aal5_trailer, 4);
+ crc = ~crc;
+
+ ctrl->aal5_trailer[4] = crc >> 24;
+ ctrl->aal5_trailer[5] = crc >> 16;
+ ctrl->aal5_trailer[6] = crc >> 8;
+ ctrl->aal5_trailer[7] = crc;
+}
+
+static unsigned int udsl_write_cells(struct udsl_instance_data *instance,
+ unsigned int howmany, struct sk_buff *skb,
+ unsigned char **target_p)
+{
+ struct udsl_control *ctrl = UDSL_SKB(skb);
+ unsigned char *target = *target_p;
+ unsigned int nc, ne, i;
+
+ vdbg("udsl_write_cells: howmany=%u, skb->len=%d, num_cells=%u, num_entire=%u, pdu_padding=%u", howmany, skb->len, ctrl->num_cells, ctrl->num_entire, ctrl->pdu_padding);
+
+ nc = ctrl->num_cells;
+ ne = min(howmany, ctrl->num_entire);
+
+ for (i = 0; i < ne; i++) {
+ udsl_fill_cell_header(target, ctrl->atm_data.vcc);
+ target += ATM_CELL_HEADER;
+ memcpy(target, skb->data, ATM_CELL_PAYLOAD);
+ target += ATM_CELL_PAYLOAD;
+ if (instance->snd_padding) {
+ memset(target, 0, instance->snd_padding);
+ target += instance->snd_padding;
+ }
+ __skb_pull(skb, ATM_CELL_PAYLOAD);
+ }
+
+ ctrl->num_entire -= ne;
+
+ if (!(ctrl->num_cells -= ne) || !(howmany -= ne))
+ goto out;
+
+ if (instance->snd_padding) {
+ memset(target, 0, instance->snd_padding);
+ target += instance->snd_padding;
+ }
+ udsl_fill_cell_header(target, ctrl->atm_data.vcc);
+ target += ATM_CELL_HEADER;
+ memcpy(target, skb->data, skb->len);
+ target += skb->len;
+ __skb_pull(skb, skb->len);
+ memset(target, 0, ctrl->pdu_padding);
+ target += ctrl->pdu_padding;
+
+ if (--ctrl->num_cells) {
+ if (!--howmany) {
+ ctrl->pdu_padding = ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER;
+ goto out;
+ }
+
+ udsl_fill_cell_header(target, ctrl->atm_data.vcc);
+ target += ATM_CELL_HEADER;
+ memset(target, 0, ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER);
+ target += ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER;
+
+ --ctrl->num_cells;
+ UDSL_ASSERT(!ctrl->num_cells);
+ }
+
+ memcpy(target, ctrl->aal5_trailer, ATM_AAL5_TRAILER);
+ target += ATM_AAL5_TRAILER;
+ /* set pti bit in last cell */
+ *(target + 3 - ATM_CELL_SIZE) |= 0x2;
+ if (instance->snd_padding) {
+ memset(target, 0, instance->snd_padding);
+ target += instance->snd_padding;
+ }
+ out:
+ *target_p = target;
+ return nc - ctrl->num_cells;
+}
+
+/**************
+** receive **
+**************/
+
+static void udsl_complete_receive(struct urb *urb, struct pt_regs *regs)
+{
+ struct udsl_receive_buffer *buf;
+ struct udsl_instance_data *instance;
+ struct udsl_receiver *rcv;
+ unsigned long flags;
+
+ if (!urb || !(rcv = urb->context)) {
+ dbg("udsl_complete_receive: bad urb!");
+ return;
+ }
+
+ instance = rcv->instance;
+ buf = rcv->buffer;
+
+ buf->filled_cells = urb->actual_length / (ATM_CELL_SIZE + instance->rcv_padding);
+
+ vdbg("udsl_complete_receive: urb 0x%p, status %d, actual_length %d, filled_cells %u, rcv 0x%p, buf 0x%p", urb, urb->status, urb->actual_length, buf->filled_cells, rcv, buf);
+
+ UDSL_ASSERT(buf->filled_cells <= rcv_buf_size);
+
+ /* may not be in_interrupt() */
+ spin_lock_irqsave(&instance->receive_lock, flags);
+ list_add(&rcv->list, &instance->spare_receivers);
+ list_add_tail(&buf->list, &instance->filled_receive_buffers);
+ if (likely(!urb->status))
+ tasklet_schedule(&instance->receive_tasklet);
+ spin_unlock_irqrestore(&instance->receive_lock, flags);
+}
+
+static void udsl_process_receive(unsigned long data)
+{
+ struct udsl_receive_buffer *buf;
+ struct udsl_instance_data *instance = (struct udsl_instance_data *)data;
+ struct udsl_receiver *rcv;
+ int err;
+
+ made_progress:
+ while (!list_empty(&instance->spare_receive_buffers)) {
+ spin_lock_irq(&instance->receive_lock);
+ if (list_empty(&instance->spare_receivers)) {
+ spin_unlock_irq(&instance->receive_lock);
+ break;
+ }
+ rcv = list_entry(instance->spare_receivers.next,
+ struct udsl_receiver, list);
+ list_del(&rcv->list);
+ spin_unlock_irq(&instance->receive_lock);
+
+ buf = list_entry(instance->spare_receive_buffers.next,
+ struct udsl_receive_buffer, list);
+ list_del(&buf->list);
+
+ rcv->buffer = buf;
+
+ usb_fill_bulk_urb(rcv->urb, instance->usb_dev,
+ usb_rcvbulkpipe(instance->usb_dev, instance->data_endpoint),
+ buf->base,
+ rcv_buf_size * (ATM_CELL_SIZE + instance->rcv_padding),
+ udsl_complete_receive, rcv);
+
+ vdbg("udsl_process_receive: sending urb 0x%p, rcv 0x%p, buf 0x%p",
+ rcv->urb, rcv, buf);
+
+ if ((err = usb_submit_urb(rcv->urb, GFP_ATOMIC)) < 0) {
+ dbg("udsl_process_receive: urb submission failed (%d)!", err);
+ list_add(&buf->list, &instance->spare_receive_buffers);
+ spin_lock_irq(&instance->receive_lock);
+ list_add(&rcv->list, &instance->spare_receivers);
+ spin_unlock_irq(&instance->receive_lock);
+ break;
+ }
+ }
+
+ spin_lock_irq(&instance->receive_lock);
+ if (list_empty(&instance->filled_receive_buffers)) {
+ spin_unlock_irq(&instance->receive_lock);
+ return; /* done - no more buffers */
+ }
+ buf = list_entry(instance->filled_receive_buffers.next,
+ struct udsl_receive_buffer, list);
+ list_del(&buf->list);
+ spin_unlock_irq(&instance->receive_lock);
+
+ vdbg("udsl_process_receive: processing buf 0x%p", buf);
+ udsl_extract_cells(instance, buf->base, buf->filled_cells);
+ list_add(&buf->list, &instance->spare_receive_buffers);
+ goto made_progress;
+}
+
+/***********
+** send **
+***********/
+
+static void udsl_complete_send(struct urb *urb, struct pt_regs *regs)
+{
+ struct udsl_instance_data *instance;
+ struct udsl_sender *snd;
+ unsigned long flags;
+
+ if (!urb || !(snd = urb->context) || !(instance = snd->instance)) {
+ dbg("udsl_complete_send: bad urb!");
+ return;
+ }
+
+ vdbg("udsl_complete_send: urb 0x%p, status %d, snd 0x%p, buf 0x%p", urb,
+ urb->status, snd, snd->buffer);
+
+ /* may not be in_interrupt() */
+ spin_lock_irqsave(&instance->send_lock, flags);
+ list_add(&snd->list, &instance->spare_senders);
+ list_add(&snd->buffer->list, &instance->spare_send_buffers);
+ tasklet_schedule(&instance->send_tasklet);
+ spin_unlock_irqrestore(&instance->send_lock, flags);
+}
+
+static void udsl_process_send(unsigned long data)
+{
+ struct udsl_send_buffer *buf;
+ struct udsl_instance_data *instance = (struct udsl_instance_data *)data;
+ struct sk_buff *skb;
+ struct udsl_sender *snd;
+ int err;
+ unsigned int num_written;
+
+ made_progress:
+ spin_lock_irq(&instance->send_lock);
+ while (!list_empty(&instance->spare_senders)) {
+ if (!list_empty(&instance->filled_send_buffers)) {
+ buf = list_entry(instance->filled_send_buffers.next,
+ struct udsl_send_buffer, list);
+ list_del(&buf->list);
+ } else if ((buf = instance->current_buffer)) {
+ instance->current_buffer = NULL;
+ } else /* all buffers empty */
+ break;
+
+ snd = list_entry(instance->spare_senders.next,
+ struct udsl_sender, list);
+ list_del(&snd->list);
+ spin_unlock_irq(&instance->send_lock);
+
+ snd->buffer = buf;
+ usb_fill_bulk_urb(snd->urb, instance->usb_dev,
+ usb_sndbulkpipe(instance->usb_dev, instance->data_endpoint),
+ buf->base,
+ (snd_buf_size - buf->free_cells) * (ATM_CELL_SIZE + instance->snd_padding),
+ udsl_complete_send, snd);
+
+ vdbg("udsl_process_send: submitting urb 0x%p (%d cells), snd 0x%p, buf 0x%p",
+ snd->urb, snd_buf_size - buf->free_cells, snd, buf);
+
+ if ((err = usb_submit_urb(snd->urb, GFP_ATOMIC)) < 0) {
+ dbg("udsl_process_send: urb submission failed (%d)!", err);
+ spin_lock_irq(&instance->send_lock);
+ list_add(&snd->list, &instance->spare_senders);
+ spin_unlock_irq(&instance->send_lock);
+ list_add(&buf->list, &instance->filled_send_buffers);
+ return; /* bail out */
+ }
+
+ spin_lock_irq(&instance->send_lock);
+ } /* while */
+ spin_unlock_irq(&instance->send_lock);
+
+ if (!instance->current_skb)
+ instance->current_skb = skb_dequeue(&instance->sndqueue);
+ if (!instance->current_skb)
+ return; /* done - no more skbs */
+
+ skb = instance->current_skb;
+
+ if (!(buf = instance->current_buffer)) {
+ spin_lock_irq(&instance->send_lock);
+ if (list_empty(&instance->spare_send_buffers)) {
+ instance->current_buffer = NULL;
+ spin_unlock_irq(&instance->send_lock);
+ return; /* done - no more buffers */
+ }
+ buf = list_entry(instance->spare_send_buffers.next,
+ struct udsl_send_buffer, list);
+ list_del(&buf->list);
+ spin_unlock_irq(&instance->send_lock);
+
+ buf->free_start = buf->base;
+ buf->free_cells = snd_buf_size;
+
+ instance->current_buffer = buf;
+ }
+
+ num_written = udsl_write_cells(instance, buf->free_cells, skb, &buf->free_start);
+
+ vdbg("udsl_process_send: wrote %u cells from skb 0x%p to buffer 0x%p",
+ num_written, skb, buf);
+
+ if (!(buf->free_cells -= num_written)) {
+ list_add_tail(&buf->list, &instance->filled_send_buffers);
+ instance->current_buffer = NULL;
+ }
+
+ vdbg("udsl_process_send: buffer contains %d cells, %d left",
+ snd_buf_size - buf->free_cells, buf->free_cells);
+
+ if (!UDSL_SKB(skb)->num_cells) {
+ struct atm_vcc *vcc = UDSL_SKB(skb)->atm_data.vcc;
+
+ udsl_pop(vcc, skb);
+ instance->current_skb = NULL;
+
+ atomic_inc(&vcc->stats->tx);
+ }
+
+ goto made_progress;
+}
+
+static void udsl_cancel_send(struct udsl_instance_data *instance,
+ struct atm_vcc *vcc)
+{
+ struct sk_buff *skb, *n;
+
+ dbg("udsl_cancel_send entered");
+ spin_lock_irq(&instance->sndqueue.lock);
+ for (skb = instance->sndqueue.next, n = skb->next;
+ skb != (struct sk_buff *)&instance->sndqueue;
+ skb = n, n = skb->next)
+ if (UDSL_SKB(skb)->atm_data.vcc == vcc) {
+ dbg("udsl_cancel_send: popping skb 0x%p", skb);
+ __skb_unlink(skb, &instance->sndqueue);
+ udsl_pop(vcc, skb);
+ }
+ spin_unlock_irq(&instance->sndqueue.lock);
+
+ tasklet_disable(&instance->send_tasklet);
+ if ((skb = instance->current_skb) && (UDSL_SKB(skb)->atm_data.vcc == vcc)) {
+ dbg("udsl_cancel_send: popping current skb (0x%p)", skb);
+ instance->current_skb = NULL;
+ udsl_pop(vcc, skb);
+ }
+ tasklet_enable(&instance->send_tasklet);
+ dbg("udsl_cancel_send done");
+}
+
+static int udsl_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct udsl_instance_data *instance = vcc->dev->dev_data;
+ int err;
+
+ vdbg("udsl_atm_send called (skb 0x%p, len %u)", skb, skb->len);
+
+ if (!instance) {
+ dbg("udsl_atm_send: NULL data!");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (vcc->qos.aal != ATM_AAL5) {
+ dbg("udsl_atm_send: unsupported ATM type %d!", vcc->qos.aal);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if (skb->len > ATM_MAX_AAL5_PDU) {
+ dbg("udsl_atm_send: packet too long (%d vs %d)!", skb->len,
+ ATM_MAX_AAL5_PDU);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ PACKETDEBUG(skb->data, skb->len);
+
+ udsl_groom_skb(vcc, skb);
+ skb_queue_tail(&instance->sndqueue, skb);
+ tasklet_schedule(&instance->send_tasklet);
+
+ return 0;
+
+ fail:
+ udsl_pop(vcc, skb);
+ return err;
+}
+
+/********************
+** bean counting **
+********************/
+
+static void udsl_destroy_instance(struct kref *kref)
+{
+ struct udsl_instance_data *instance =
+ container_of(kref, struct udsl_instance_data, refcount);
+
+ tasklet_kill(&instance->receive_tasklet);
+ tasklet_kill(&instance->send_tasklet);
+ usb_put_dev(instance->usb_dev);
+ kfree(instance);
+}
+
+void udsl_get_instance(struct udsl_instance_data *instance)
+{
+ kref_get(&instance->refcount);
+}
+
+void udsl_put_instance(struct udsl_instance_data *instance)
+{
+ kref_put(&instance->refcount, udsl_destroy_instance);
+}
+
+/**********
+** ATM **
+**********/
+
+static void udsl_atm_dev_close(struct atm_dev *dev)
+{
+ struct udsl_instance_data *instance = dev->dev_data;
+
+ dev->dev_data = NULL;
+ udsl_put_instance(instance);
+}
+
+static int udsl_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *page)
+{
+ struct udsl_instance_data *instance = atm_dev->dev_data;
+ int left = *pos;
+
+ if (!instance) {
+ dbg("udsl_atm_proc_read: NULL instance!");
+ return -ENODEV;
+ }
+
+ if (!left--)
+ return sprintf(page, "%s\n", instance->description);
+
+ if (!left--)
+ return sprintf(page, "MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ atm_dev->esi[0], atm_dev->esi[1],
+ atm_dev->esi[2], atm_dev->esi[3],
+ atm_dev->esi[4], atm_dev->esi[5]);
+
+ if (!left--)
+ return sprintf(page,
+ "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
+ atomic_read(&atm_dev->stats.aal5.tx),
+ atomic_read(&atm_dev->stats.aal5.tx_err),
+ atomic_read(&atm_dev->stats.aal5.rx),
+ atomic_read(&atm_dev->stats.aal5.rx_err),
+ atomic_read(&atm_dev->stats.aal5.rx_drop));
+
+ if (!left--) {
+ switch (atm_dev->signal) {
+ case ATM_PHY_SIG_FOUND:
+ sprintf(page, "Line up");
+ break;
+ case ATM_PHY_SIG_LOST:
+ sprintf(page, "Line down");
+ break;
+ default:
+ sprintf(page, "Line state unknown");
+ break;
+ }
+
+ if (instance->usb_dev->state == USB_STATE_NOTATTACHED)
+ strcat(page, ", disconnected\n");
+ else {
+ if (instance->status == UDSL_LOADED_FIRMWARE)
+ strcat(page, ", firmware loaded\n");
+ else if (instance->status == UDSL_LOADING_FIRMWARE)
+ strcat(page, ", firmware loading\n");
+ else
+ strcat(page, ", no firmware\n");
+ }
+
+ return strlen(page);
+ }
+
+ return 0;
+}
+
+static int udsl_atm_open(struct atm_vcc *vcc)
+{
+ struct udsl_instance_data *instance = vcc->dev->dev_data;
+ struct udsl_vcc_data *new;
+ unsigned int max_pdu;
+ int vci = vcc->vci;
+ short vpi = vcc->vpi;
+ int err;
+
+ dbg("udsl_atm_open: vpi %hd, vci %d", vpi, vci);
+
+ if (!instance) {
+ dbg("udsl_atm_open: NULL data!");
+ return -ENODEV;
+ }
+
+ /* only support AAL5 */
+ if ((vcc->qos.aal != ATM_AAL5) || (vcc->qos.rxtp.max_sdu < 0)
+ || (vcc->qos.rxtp.max_sdu > ATM_MAX_AAL5_PDU)) {
+ dbg("udsl_atm_open: unsupported ATM type %d!", vcc->qos.aal);
+ return -EINVAL;
+ }
+
+ if (instance->firmware_wait &&
+ (err = instance->firmware_wait(instance)) < 0) {
+ dbg("udsl_atm_open: firmware not loaded (%d)!", err);
+ return err;
+ }
+
+ down(&instance->serialize); /* vs self, udsl_atm_close */
+
+ if (udsl_find_vcc(instance, vpi, vci)) {
+ dbg("udsl_atm_open: %hd/%d already in use!", vpi, vci);
+ up(&instance->serialize);
+ return -EADDRINUSE;
+ }
+
+ if (!(new = kmalloc(sizeof(struct udsl_vcc_data), GFP_KERNEL))) {
+ dbg("udsl_atm_open: no memory for vcc_data!");
+ up(&instance->serialize);
+ return -ENOMEM;
+ }
+
+ memset(new, 0, sizeof(struct udsl_vcc_data));
+ new->vcc = vcc;
+ new->vpi = vpi;
+ new->vci = vci;
+
+ /* udsl_extract_cells requires at least one cell */
+ max_pdu = max(1, UDSL_NUM_CELLS(vcc->qos.rxtp.max_sdu)) * ATM_CELL_PAYLOAD;
+ if (!(new->sarb = alloc_skb(max_pdu, GFP_KERNEL))) {
+ dbg("udsl_atm_open: no memory for SAR buffer!");
+ kfree(new);
+ up(&instance->serialize);
+ return -ENOMEM;
+ }
+
+ vcc->dev_data = new;
+
+ tasklet_disable(&instance->receive_tasklet);
+ list_add(&new->list, &instance->vcc_list);
+ tasklet_enable(&instance->receive_tasklet);
+
+ set_bit(ATM_VF_ADDR, &vcc->flags);
+ set_bit(ATM_VF_PARTIAL, &vcc->flags);
+ set_bit(ATM_VF_READY, &vcc->flags);
+
+ up(&instance->serialize);
+
+ tasklet_schedule(&instance->receive_tasklet);
+
+ dbg("udsl_atm_open: allocated vcc data 0x%p (max_pdu: %u)", new, max_pdu);
+
+ return 0;
+}
+
+static void udsl_atm_close(struct atm_vcc *vcc)
+{
+ struct udsl_instance_data *instance = vcc->dev->dev_data;
+ struct udsl_vcc_data *vcc_data = vcc->dev_data;
+
+ dbg("udsl_atm_close called");
+
+ if (!instance || !vcc_data) {
+ dbg("udsl_atm_close: NULL data!");
+ return;
+ }
+
+ dbg("udsl_atm_close: deallocating vcc 0x%p with vpi %d vci %d",
+ vcc_data, vcc_data->vpi, vcc_data->vci);
+
+ udsl_cancel_send(instance, vcc);
+
+ down(&instance->serialize); /* vs self, udsl_atm_open */
+
+ tasklet_disable(&instance->receive_tasklet);
+ list_del(&vcc_data->list);
+ tasklet_enable(&instance->receive_tasklet);
+
+ kfree_skb(vcc_data->sarb);
+ vcc_data->sarb = NULL;
+
+ kfree(vcc_data);
+ vcc->dev_data = NULL;
+
+ vcc->vpi = ATM_VPI_UNSPEC;
+ vcc->vci = ATM_VCI_UNSPEC;
+ clear_bit(ATM_VF_READY, &vcc->flags);
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+
+ up(&instance->serialize);
+
+ dbg("udsl_atm_close successful");
+}
+
+static int udsl_atm_ioctl(struct atm_dev *dev, unsigned int cmd,
+ void __user * arg)
+{
+ switch (cmd) {
+ case ATM_QUERYLOOP:
+ return put_user(ATM_LM_NONE, (int __user *)arg) ? -EFAULT : 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/**********
+** USB **
+**********/
+
+int udsl_instance_setup(struct usb_device *dev,
+ struct udsl_instance_data *instance)
+{
+ char *buf;
+ int i, length;
+
+ kref_init(&instance->refcount); /* one for USB */
+ udsl_get_instance(instance); /* one for ATM */
+
+ init_MUTEX(&instance->serialize);
+
+ instance->usb_dev = dev;
+
+ INIT_LIST_HEAD(&instance->vcc_list);
+
+ instance->status = UDSL_NO_FIRMWARE;
+ init_waitqueue_head(&instance->firmware_waiters);
+
+ spin_lock_init(&instance->receive_lock);
+ INIT_LIST_HEAD(&instance->spare_receivers);
+ INIT_LIST_HEAD(&instance->filled_receive_buffers);
+
+ tasklet_init(&instance->receive_tasklet, udsl_process_receive, (unsigned long)instance);
+ INIT_LIST_HEAD(&instance->spare_receive_buffers);
+
+ skb_queue_head_init(&instance->sndqueue);
+
+ spin_lock_init(&instance->send_lock);
+ INIT_LIST_HEAD(&instance->spare_senders);
+ INIT_LIST_HEAD(&instance->spare_send_buffers);
+
+ tasklet_init(&instance->send_tasklet, udsl_process_send,
+ (unsigned long)instance);
+ INIT_LIST_HEAD(&instance->filled_send_buffers);
+
+ /* receive init */
+ for (i = 0; i < num_rcv_urbs; i++) {
+ struct udsl_receiver *rcv = &(instance->receivers[i]);
+
+ if (!(rcv->urb = usb_alloc_urb(0, GFP_KERNEL))) {
+ dbg("udsl_usb_probe: no memory for receive urb %d!", i);
+ goto fail;
+ }
+
+ rcv->instance = instance;
+
+ list_add(&rcv->list, &instance->spare_receivers);
+ }
+
+ for (i = 0; i < num_rcv_bufs; i++) {
+ struct udsl_receive_buffer *buf =
+ &(instance->receive_buffers[i]);
+
+ buf->base = kmalloc(rcv_buf_size * (ATM_CELL_SIZE + instance->rcv_padding),
+ GFP_KERNEL);
+ if (!buf->base) {
+ dbg("udsl_usb_probe: no memory for receive buffer %d!", i);
+ goto fail;
+ }
+
+ list_add(&buf->list, &instance->spare_receive_buffers);
+ }
+
+ /* send init */
+ for (i = 0; i < num_snd_urbs; i++) {
+ struct udsl_sender *snd = &(instance->senders[i]);
+
+ if (!(snd->urb = usb_alloc_urb(0, GFP_KERNEL))) {
+ dbg("udsl_usb_probe: no memory for send urb %d!", i);
+ goto fail;
+ }
+
+ snd->instance = instance;
+
+ list_add(&snd->list, &instance->spare_senders);
+ }
+
+ for (i = 0; i < num_snd_bufs; i++) {
+ struct udsl_send_buffer *buf = &(instance->send_buffers[i]);
+
+ buf->base = kmalloc(snd_buf_size * (ATM_CELL_SIZE + instance->snd_padding),
+ GFP_KERNEL);
+ if (!buf->base) {
+ dbg("udsl_usb_probe: no memory for send buffer %d!", i);
+ goto fail;
+ }
+
+ list_add(&buf->list, &instance->spare_send_buffers);
+ }
+
+ /* ATM init */
+ instance->atm_dev = atm_dev_register(instance->driver_name,
+ &udsl_atm_devops, -1, NULL);
+ if (!instance->atm_dev) {
+ dbg("udsl_usb_probe: failed to register ATM device!");
+ goto fail;
+ }
+
+ instance->atm_dev->ci_range.vpi_bits = ATM_CI_MAX;
+ instance->atm_dev->ci_range.vci_bits = ATM_CI_MAX;
+ instance->atm_dev->signal = ATM_PHY_SIG_UNKNOWN;
+
+ /* temp init ATM device, set to 128kbit */
+ instance->atm_dev->link_rate = 128 * 1000 / 424;
+
+ /* device description */
+ buf = instance->description;
+ length = sizeof(instance->description);
+
+ if ((i = usb_string(dev, dev->descriptor.iProduct, buf, length)) < 0)
+ goto finish;
+
+ buf += i;
+ length -= i;
+
+ i = scnprintf(buf, length, " (");
+ buf += i;
+ length -= i;
+
+ if (length <= 0 || (i = usb_make_path(dev, buf, length)) < 0)
+ goto finish;
+
+ buf += i;
+ length -= i;
+
+ snprintf(buf, length, ")");
+
+ finish:
+ /* ready for ATM callbacks */
+ wmb();
+ instance->atm_dev->dev_data = instance;
+
+ usb_get_dev(dev);
+
+ return 0;
+
+ fail:
+ for (i = 0; i < num_snd_bufs; i++)
+ kfree(instance->send_buffers[i].base);
+
+ for (i = 0; i < num_snd_urbs; i++)
+ usb_free_urb(instance->senders[i].urb);
+
+ for (i = 0; i < num_rcv_bufs; i++)
+ kfree(instance->receive_buffers[i].base);
+
+ for (i = 0; i < num_rcv_urbs; i++)
+ usb_free_urb(instance->receivers[i].urb);
+
+ return -ENOMEM;
+}
+
+void udsl_instance_disconnect(struct udsl_instance_data *instance)
+{
+ int i;
+
+ dbg("udsl_instance_disconnect entered");
+
+ if (!instance) {
+ dbg("udsl_instance_disconnect: NULL instance!");
+ return;
+ }
+
+ /* receive finalize */
+ tasklet_disable(&instance->receive_tasklet);
+
+ for (i = 0; i < num_rcv_urbs; i++)
+ usb_kill_urb(instance->receivers[i].urb);
+
+ /* no need to take the spinlock */
+ INIT_LIST_HEAD(&instance->filled_receive_buffers);
+ INIT_LIST_HEAD(&instance->spare_receive_buffers);
+
+ tasklet_enable(&instance->receive_tasklet);
+
+ for (i = 0; i < num_rcv_urbs; i++)
+ usb_free_urb(instance->receivers[i].urb);
+
+ for (i = 0; i < num_rcv_bufs; i++)
+ kfree(instance->receive_buffers[i].base);
+
+ /* send finalize */
+ tasklet_disable(&instance->send_tasklet);
+
+ for (i = 0; i < num_snd_urbs; i++)
+ usb_kill_urb(instance->senders[i].urb);
+
+ /* no need to take the spinlock */
+ INIT_LIST_HEAD(&instance->spare_senders);
+ INIT_LIST_HEAD(&instance->spare_send_buffers);
+ instance->current_buffer = NULL;
+
+ tasklet_enable(&instance->send_tasklet);
+
+ for (i = 0; i < num_snd_urbs; i++)
+ usb_free_urb(instance->senders[i].urb);
+
+ for (i = 0; i < num_snd_bufs; i++)
+ kfree(instance->send_buffers[i].base);
+
+ /* ATM finalize */
+ shutdown_atm_dev(instance->atm_dev);
+}
+
+EXPORT_SYMBOL_GPL(udsl_get_instance);
+EXPORT_SYMBOL_GPL(udsl_put_instance);
+EXPORT_SYMBOL_GPL(udsl_instance_setup);
+EXPORT_SYMBOL_GPL(udsl_instance_disconnect);
+
+/***********
+** init **
+***********/
+
+static int __init udsl_usb_init(void)
+{
+ dbg("udsl_usb_init: driver version " DRIVER_VERSION);
+
+ if (sizeof(struct udsl_control) > sizeof(((struct sk_buff *) 0)->cb)) {
+ printk(KERN_ERR __FILE__ ": unusable with this kernel!\n");
+ return -EIO;
+ }
+
+ if ((num_rcv_urbs > UDSL_MAX_RCV_URBS)
+ || (num_snd_urbs > UDSL_MAX_SND_URBS)
+ || (num_rcv_bufs > UDSL_MAX_RCV_BUFS)
+ || (num_snd_bufs > UDSL_MAX_SND_BUFS)
+ || (rcv_buf_size > UDSL_MAX_RCV_BUF_SIZE)
+ || (snd_buf_size > UDSL_MAX_SND_BUF_SIZE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void __exit udsl_usb_exit(void)
+{
+}
+
+module_init(udsl_usb_init);
+module_exit(udsl_usb_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+/************
+** debug **
+************/
+
+#ifdef VERBOSE_DEBUG
+static int udsl_print_packet(const unsigned char *data, int len)
+{
+ unsigned char buffer[256];
+ int i = 0, j = 0;
+
+ for (i = 0; i < len;) {
+ buffer[0] = '\0';
+ sprintf(buffer, "%.3d :", i);
+ for (j = 0; (j < 16) && (i < len); j++, i++) {
+ sprintf(buffer, "%s %2.2x", buffer, data[i]);
+ }
+ dbg("%s", buffer);
+ }
+ return i;
+}
+#endif
diff --git a/drivers/usb/atm/usb_atm.h b/drivers/usb/atm/usb_atm.h
new file mode 100644
index 000000000000..219763cc3242
--- /dev/null
+++ b/drivers/usb/atm/usb_atm.h
@@ -0,0 +1,159 @@
+/******************************************************************************
+ * usb_atm.h - Generic USB xDSL driver core
+ *
+ * Copyright (C) 2001, Alcatel
+ * Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas
+ * Copyright (C) 2004, David Woodhouse
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/kref.h>
+#include <linux/atm.h>
+#include <linux/atmdev.h>
+#include <asm/semaphore.h>
+
+#define UDSL_MAX_RCV_URBS 4
+#define UDSL_MAX_SND_URBS 4
+#define UDSL_MAX_RCV_BUFS 8
+#define UDSL_MAX_SND_BUFS 8
+#define UDSL_MAX_RCV_BUF_SIZE 1024 /* ATM cells */
+#define UDSL_MAX_SND_BUF_SIZE 1024 /* ATM cells */
+#define UDSL_DEFAULT_RCV_URBS 2
+#define UDSL_DEFAULT_SND_URBS 2
+#define UDSL_DEFAULT_RCV_BUFS 4
+#define UDSL_DEFAULT_SND_BUFS 4
+#define UDSL_DEFAULT_RCV_BUF_SIZE 64 /* ATM cells */
+#define UDSL_DEFAULT_SND_BUF_SIZE 64 /* ATM cells */
+
+#define ATM_CELL_HEADER (ATM_CELL_SIZE - ATM_CELL_PAYLOAD)
+#define UDSL_NUM_CELLS(x) (((x) + ATM_AAL5_TRAILER + ATM_CELL_PAYLOAD - 1) / ATM_CELL_PAYLOAD)
+
+/* receive */
+
+struct udsl_receive_buffer {
+ struct list_head list;
+ unsigned char *base;
+ unsigned int filled_cells;
+};
+
+struct udsl_receiver {
+ struct list_head list;
+ struct udsl_receive_buffer *buffer;
+ struct urb *urb;
+ struct udsl_instance_data *instance;
+};
+
+struct udsl_vcc_data {
+ /* vpi/vci lookup */
+ struct list_head list;
+ short vpi;
+ int vci;
+ struct atm_vcc *vcc;
+
+ /* raw cell reassembly */
+ struct sk_buff *sarb;
+};
+
+/* send */
+
+struct udsl_send_buffer {
+ struct list_head list;
+ unsigned char *base;
+ unsigned char *free_start;
+ unsigned int free_cells;
+};
+
+struct udsl_sender {
+ struct list_head list;
+ struct udsl_send_buffer *buffer;
+ struct urb *urb;
+ struct udsl_instance_data *instance;
+};
+
+struct udsl_control {
+ struct atm_skb_data atm_data;
+ unsigned int num_cells;
+ unsigned int num_entire;
+ unsigned int pdu_padding;
+ unsigned char aal5_trailer[ATM_AAL5_TRAILER];
+};
+
+#define UDSL_SKB(x) ((struct udsl_control *)(x)->cb)
+
+/* main driver data */
+
+enum udsl_status {
+ UDSL_NO_FIRMWARE,
+ UDSL_LOADING_FIRMWARE,
+ UDSL_LOADED_FIRMWARE
+};
+
+struct udsl_instance_data {
+ struct kref refcount;
+ struct semaphore serialize;
+
+ /* USB device part */
+ struct usb_device *usb_dev;
+ char description[64];
+ int data_endpoint;
+ int snd_padding;
+ int rcv_padding;
+ const char *driver_name;
+
+ /* ATM device part */
+ struct atm_dev *atm_dev;
+ struct list_head vcc_list;
+
+ /* firmware */
+ int (*firmware_wait) (struct udsl_instance_data *);
+ enum udsl_status status;
+ wait_queue_head_t firmware_waiters;
+
+ /* receive */
+ struct udsl_receiver receivers[UDSL_MAX_RCV_URBS];
+ struct udsl_receive_buffer receive_buffers[UDSL_MAX_RCV_BUFS];
+
+ spinlock_t receive_lock;
+ struct list_head spare_receivers;
+ struct list_head filled_receive_buffers;
+
+ struct tasklet_struct receive_tasklet;
+ struct list_head spare_receive_buffers;
+
+ /* send */
+ struct udsl_sender senders[UDSL_MAX_SND_URBS];
+ struct udsl_send_buffer send_buffers[UDSL_MAX_SND_BUFS];
+
+ struct sk_buff_head sndqueue;
+
+ spinlock_t send_lock;
+ struct list_head spare_senders;
+ struct list_head spare_send_buffers;
+
+ struct tasklet_struct send_tasklet;
+ struct sk_buff *current_skb; /* being emptied */
+ struct udsl_send_buffer *current_buffer; /* being filled */
+ struct list_head filled_send_buffers;
+};
+
+extern int udsl_instance_setup(struct usb_device *dev,
+ struct udsl_instance_data *instance);
+extern void udsl_instance_disconnect(struct udsl_instance_data *instance);
+extern void udsl_get_instance(struct udsl_instance_data *instance);
+extern void udsl_put_instance(struct udsl_instance_data *instance);
diff --git a/drivers/usb/class/Kconfig b/drivers/usb/class/Kconfig
index 3d49e9d8cb67..0561d0234f23 100644
--- a/drivers/usb/class/Kconfig
+++ b/drivers/usb/class/Kconfig
@@ -9,7 +9,8 @@ config USB_AUDIO
depends on USB && SOUND
help
Say Y here if you want to connect USB audio equipment such as
- speakers to your computer's USB port.
+ speakers to your computer's USB port. You only need this if you use
+ the OSS sound driver; ALSA has its own option for usb audio support.
To compile this driver as a module, choose M here: the
module will be called audio.
diff --git a/drivers/usb/class/audio.c b/drivers/usb/class/audio.c
index 17a6dd74713f..88628d85dff2 100644
--- a/drivers/usb/class/audio.c
+++ b/drivers/usb/class/audio.c
@@ -635,13 +635,13 @@ static void usbin_stop(struct usb_audiodev *as)
spin_unlock_irqrestore(&as->lock, flags);
if (notkilled && signal_pending(current)) {
if (i & FLG_URB0RUNNING)
- usb_unlink_urb(u->durb[0].urb);
+ usb_kill_urb(u->durb[0].urb);
if (i & FLG_URB1RUNNING)
- usb_unlink_urb(u->durb[1].urb);
+ usb_kill_urb(u->durb[1].urb);
if (i & FLG_SYNC0RUNNING)
- usb_unlink_urb(u->surb[0].urb);
+ usb_kill_urb(u->surb[0].urb);
if (i & FLG_SYNC1RUNNING)
- usb_unlink_urb(u->surb[1].urb);
+ usb_kill_urb(u->surb[1].urb);
notkilled = 0;
}
}
@@ -1114,13 +1114,13 @@ static void usbout_stop(struct usb_audiodev *as)
spin_unlock_irqrestore(&as->lock, flags);
if (notkilled && signal_pending(current)) {
if (i & FLG_URB0RUNNING)
- usb_unlink_urb(u->durb[0].urb);
+ usb_kill_urb(u->durb[0].urb);
if (i & FLG_URB1RUNNING)
- usb_unlink_urb(u->durb[1].urb);
+ usb_kill_urb(u->durb[1].urb);
if (i & FLG_SYNC0RUNNING)
- usb_unlink_urb(u->surb[0].urb);
+ usb_kill_urb(u->surb[0].urb);
if (i & FLG_SYNC1RUNNING)
- usb_unlink_urb(u->surb[1].urb);
+ usb_kill_urb(u->surb[1].urb);
notkilled = 0;
}
}
@@ -1949,15 +1949,12 @@ static inline int prog_dmabuf_out(struct usb_audiodev *as)
static int usb_audio_open_mixdev(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
- struct list_head *devs, *mdevs;
struct usb_mixerdev *ms;
struct usb_audio_state *s;
down(&open_sem);
- list_for_each(devs, &audiodevs) {
- s = list_entry(devs, struct usb_audio_state, audiodev);
- list_for_each(mdevs, &s->mixerlist) {
- ms = list_entry(mdevs, struct usb_mixerdev, list);
+ list_for_each_entry(s, &audiodevs, audiodev) {
+ list_for_each_entry(ms, &s->mixerlist, list) {
if (ms->dev_mixer == minor)
goto mixer_found;
}
@@ -2634,16 +2631,13 @@ static int usb_audio_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
DECLARE_WAITQUEUE(wait, current);
- struct list_head *devs, *adevs;
struct usb_audiodev *as;
struct usb_audio_state *s;
for (;;) {
down(&open_sem);
- list_for_each(devs, &audiodevs) {
- s = list_entry(devs, struct usb_audio_state, audiodev);
- list_for_each(adevs, &s->audiolist) {
- as = list_entry(adevs, struct usb_audiodev, list);
+ list_for_each_entry(s, &audiodevs, audiodev) {
+ list_for_each_entry(as, &s->audiolist, list) {
if (!((as->dev_audio ^ minor) & ~0xf))
goto device_found;
}
@@ -3809,7 +3803,6 @@ static int usb_audio_probe(struct usb_interface *intf,
static void usb_audio_disconnect(struct usb_interface *intf)
{
struct usb_audio_state *s = usb_get_intfdata (intf);
- struct list_head *list;
struct usb_audiodev *as;
struct usb_mixerdev *ms;
@@ -3831,8 +3824,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
usb_set_intfdata (intf, NULL);
/* deregister all audio and mixer devices, so no new processes can open this device */
- list_for_each(list, &s->audiolist) {
- as = list_entry(list, struct usb_audiodev, list);
+ list_for_each_entry(as, &s->audiolist, list) {
usbin_disc(as);
usbout_disc(as);
wake_up(&as->usbin.dma.wait);
@@ -3843,8 +3835,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
}
as->dev_audio = -1;
}
- list_for_each(list, &s->mixerlist) {
- ms = list_entry(list, struct usb_mixerdev, list);
+ list_for_each_entry(ms, &s->mixerlist, list) {
if (ms->dev_mixer >= 0) {
unregister_sound_mixer(ms->dev_mixer);
printk(KERN_INFO "usbaudio: unregister mixer 14,%d\n", ms->dev_mixer);
diff --git a/drivers/usb/class/bluetty.c b/drivers/usb/class/bluetty.c
index 4d8e01895ce9..6baf68badc06 100644
--- a/drivers/usb/class/bluetty.c
+++ b/drivers/usb/class/bluetty.c
@@ -426,8 +426,8 @@ static void bluetooth_close (struct tty_struct *tty, struct file * filp)
bluetooth->open_count = 0;
/* shutdown any in-flight urbs that we know about */
- usb_unlink_urb (bluetooth->read_urb);
- usb_unlink_urb (bluetooth->interrupt_in_urb);
+ usb_kill_urb (bluetooth->read_urb);
+ usb_kill_urb (bluetooth->interrupt_in_urb);
}
up(&bluetooth->lock);
}
@@ -705,7 +705,7 @@ void btusb_disable_bulk_read(struct tty_struct *tty){
}
if ((bluetooth->read_urb) && (bluetooth->read_urb->actual_length))
- usb_unlink_urb(bluetooth->read_urb);
+ usb_kill_urb(bluetooth->read_urb);
}
#endif
@@ -1179,14 +1179,14 @@ static void usb_bluetooth_disconnect(struct usb_interface *intf)
bluetooth->open_count = 0;
if (bluetooth->read_urb) {
- usb_unlink_urb (bluetooth->read_urb);
+ usb_kill_urb (bluetooth->read_urb);
usb_free_urb (bluetooth->read_urb);
}
if (bluetooth->bulk_in_buffer)
kfree (bluetooth->bulk_in_buffer);
if (bluetooth->interrupt_in_urb) {
- usb_unlink_urb (bluetooth->interrupt_in_urb);
+ usb_kill_urb (bluetooth->interrupt_in_urb);
usb_free_urb (bluetooth->interrupt_in_urb);
}
if (bluetooth->interrupt_in_buffer)
@@ -1196,7 +1196,7 @@ static void usb_bluetooth_disconnect(struct usb_interface *intf)
for (i = 0; i < NUM_CONTROL_URBS; ++i) {
if (bluetooth->control_urb_pool[i]) {
- usb_unlink_urb (bluetooth->control_urb_pool[i]);
+ usb_kill_urb (bluetooth->control_urb_pool[i]);
if (bluetooth->control_urb_pool[i]->transfer_buffer)
kfree (bluetooth->control_urb_pool[i]->transfer_buffer);
usb_free_urb (bluetooth->control_urb_pool[i]);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index a929a521242b..0d0d49923bee 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -301,9 +301,9 @@ done:
return 0;
full_bailout:
- usb_unlink_urb(acm->readurb);
+ usb_kill_urb(acm->readurb);
bail_out_and_unlink:
- usb_unlink_urb(acm->ctrlurb);
+ usb_kill_urb(acm->ctrlurb);
bail_out:
up(&open_sem);
return -EIO;
@@ -320,9 +320,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
if (!--acm->used) {
if (acm->dev) {
acm_set_control(acm, acm->ctrlout = 0);
- usb_unlink_urb(acm->ctrlurb);
- usb_unlink_urb(acm->writeurb);
- usb_unlink_urb(acm->readurb);
+ usb_kill_urb(acm->ctrlurb);
+ usb_kill_urb(acm->writeurb);
+ usb_kill_urb(acm->readurb);
} else {
tty_unregister_device(acm_tty_driver, acm->minor);
acm_table[acm->minor] = NULL;
@@ -542,6 +542,17 @@ static int acm_probe (struct usb_interface *intf,
return -EINVAL;
}
+ if (!buflen) {
+ if (intf->cur_altsetting->endpoint->extralen && intf->cur_altsetting->endpoint->extra) {
+ dev_dbg(&intf->dev,"Seeking extra descriptors on endpoint");
+ buflen = intf->cur_altsetting->endpoint->extralen;
+ buffer = intf->cur_altsetting->endpoint->extra;
+ } else {
+ err("Zero length descriptor references");
+ return -EINVAL;
+ }
+ }
+
while (buflen > 0) {
if (buffer [1] != USB_DT_CS_INTERFACE) {
err("skipping garbage");
@@ -558,6 +569,8 @@ static int acm_probe (struct usb_interface *intf,
break;
case CDC_COUNTRY_TYPE: /* maybe somehow export */
break; /* for now we ignore it */
+ case CDC_HEADER_TYPE: /* maybe check version */
+ break; /* for now we ignore it */
case CDC_AC_MANAGEMENT_TYPE:
ac_management_function = buffer[3];
break;
@@ -569,7 +582,7 @@ static int acm_probe (struct usb_interface *intf,
break;
default:
- err("Ignoring extra header");
+ err("Ignoring extra header, type %d, length %d", buffer[2], buffer[0]);
break;
}
next_desc:
@@ -637,7 +650,7 @@ next_desc:
dbg("interfaces are valid");
for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
- if (acm_table[minor]) {
+ if (minor == ACM_TTY_MINORS) {
err("no more free acm devices");
return -ENODEV;
}
@@ -762,9 +775,9 @@ static void acm_disconnect(struct usb_interface *intf)
acm->dev = NULL;
usb_set_intfdata (intf, NULL);
- usb_unlink_urb(acm->ctrlurb);
- usb_unlink_urb(acm->readurb);
- usb_unlink_urb(acm->writeurb);
+ usb_kill_urb(acm->ctrlurb);
+ usb_kill_urb(acm->readurb);
+ usb_kill_urb(acm->writeurb);
flush_scheduled_work(); /* wait for acm_softint */
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index cc26d9517398..da945b367a85 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -117,6 +117,7 @@ struct union_desc {
} __attribute__ ((packed));
/* class specific descriptor types */
+#define CDC_HEADER_TYPE 0x00
#define CDC_CALL_MANAGEMENT_TYPE 0x01
#define CDC_AC_MANAGEMENT_TYPE 0x02
#define CDC_UNION_TYPE 0x06
diff --git a/drivers/usb/class/usb-midi.c b/drivers/usb/class/usb-midi.c
index a9dc047a04a7..1dc952e9bc81 100644
--- a/drivers/usb/class/usb-midi.c
+++ b/drivers/usb/class/usb-midi.c
@@ -805,7 +805,6 @@ static int usb_midi_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
DECLARE_WAITQUEUE(wait, current);
- struct list_head *devs, *mdevs;
struct usb_midi_state *s;
struct usb_mididev *m;
unsigned long flags;
@@ -817,10 +816,8 @@ static int usb_midi_open(struct inode *inode, struct file *file)
for(;;) {
down(&open_sem);
- list_for_each(devs, &mididevs) {
- s = list_entry(devs, struct usb_midi_state, mididev);
- list_for_each(mdevs, &s->midiDevList) {
- m = list_entry(mdevs, struct usb_mididev, list);
+ list_for_each_entry(s, &mididevs, mididev) {
+ list_for_each_entry(m, &s->midiDevList, list) {
if ( !((m->dev_midi ^ minor) & ~0xf) )
goto device_found;
}
@@ -939,7 +936,7 @@ static int usb_midi_release(struct inode *inode, struct file *file)
if ( m->open_mode & FMODE_WRITE ) {
m->open_mode &= ~FMODE_WRITE;
- usb_unlink_urb( m->mout.ep->urb );
+ usb_kill_urb( m->mout.ep->urb );
}
if ( m->open_mode & FMODE_READ ) {
@@ -951,7 +948,7 @@ static int usb_midi_release(struct inode *inode, struct file *file)
if ( m->min.ep->readers == 0 &&
m->min.ep->urbSubmitted ) {
m->min.ep->urbSubmitted = 0;
- usb_unlink_urb(m->min.ep->urb);
+ usb_kill_urb(m->min.ep->urb);
}
spin_unlock_irqrestore( &m->min.ep->lock, flagsep );
}
@@ -1042,7 +1039,7 @@ static struct midi_in_endpoint *alloc_midi_in_endpoint( struct usb_device *d, in
static int remove_midi_in_endpoint( struct midi_in_endpoint *min )
{
- usb_unlink_urb( min->urb );
+ usb_kill_urb( min->urb );
usb_free_urb( min->urb );
kfree( min->recvBuf );
kfree( min );
@@ -1102,7 +1099,7 @@ static struct midi_out_endpoint *alloc_midi_out_endpoint( struct usb_device *d,
static int remove_midi_out_endpoint( struct midi_out_endpoint *mout )
{
- usb_unlink_urb( mout->urb );
+ usb_kill_urb( mout->urb );
usb_free_urb( mout->urb );
kfree( mout->buf );
kfree( mout );
@@ -1994,7 +1991,6 @@ static int usb_midi_probe(struct usb_interface *intf,
static void usb_midi_disconnect(struct usb_interface *intf)
{
struct usb_midi_state *s = usb_get_intfdata (intf);
- struct list_head *list;
struct usb_mididev *m;
if ( !s )
@@ -2012,8 +2008,7 @@ static void usb_midi_disconnect(struct usb_interface *intf)
s->usbdev = NULL;
usb_set_intfdata (intf, NULL);
- list_for_each(list, &s->midiDevList) {
- m = list_entry(list, struct usb_mididev, list);
+ list_for_each_entry(m, &s->midiDevList, list) {
wake_up(&(m->min.ep->wait));
wake_up(&(m->mout.ep->wait));
if ( m->dev_midi >= 0 ) {
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index da91bbd6735c..da145c91c1fa 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -406,9 +406,9 @@ static void usblp_cleanup (struct usblp *usblp)
static void usblp_unlink_urbs(struct usblp *usblp)
{
- usb_unlink_urb(usblp->writeurb);
+ usb_kill_urb(usblp->writeurb);
if (usblp->bidir)
- usb_unlink_urb(usblp->readurb);
+ usb_kill_urb(usblp->readurb);
}
static int usblp_release(struct inode *inode, struct file *file)
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 8e535789fce7..50009ed51e8d 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -149,7 +149,7 @@ static const struct class_info clas_info[] =
/*****************************************************************/
-void usbdevfs_conn_disc_event(void)
+void usbfs_conn_disc_event(void)
{
conndiscevcnt++;
wake_up(&deviceconndiscwq);
@@ -451,7 +451,7 @@ static char *usb_dump_string(char *start, char *end, const struct usb_device *de
* nbytes - the maximum number of bytes to write
* skip_bytes - the number of bytes to skip before writing anything
* file_offset - the offset into the devices file on completion
- * The caller must own the usbdev->serialize semaphore.
+ * The caller must own the device lock.
*/
static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, loff_t *skip_bytes, loff_t *file_offset,
struct usb_device *usbdev, struct usb_bus *bus, int level, int index, int count)
@@ -569,7 +569,6 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, loff_t *ski
static ssize_t usb_device_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- struct list_head *buslist;
struct usb_bus *bus;
ssize_t ret, total_written = 0;
loff_t skip_bytes = *ppos;
@@ -581,18 +580,15 @@ static ssize_t usb_device_read(struct file *file, char __user *buf, size_t nbyte
if (!access_ok(VERIFY_WRITE, buf, nbytes))
return -EFAULT;
- /* enumerate busses */
down (&usb_bus_list_lock);
- list_for_each(buslist, &usb_bus_list) {
- /* print devices for this bus */
- bus = list_entry(buslist, struct usb_bus, bus_list);
-
+ /* print devices for all busses */
+ list_for_each_entry(bus, &usb_bus_list, bus_list) {
/* recurse through all children of the root hub */
if (!bus->root_hub)
continue;
- down(&bus->root_hub->serialize);
+ usb_lock_device(bus->root_hub);
ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, bus->root_hub, bus, 0, 0, 0);
- up(&bus->root_hub->serialize);
+ usb_unlock_device(bus->root_hub);
if (ret < 0) {
up(&usb_bus_list_lock);
return ret;
@@ -682,7 +678,7 @@ static loff_t usb_device_lseek(struct file * file, loff_t offset, int orig)
return ret;
}
-struct file_operations usbdevfs_devices_fops = {
+struct file_operations usbfs_devices_fops = {
.llseek = usb_device_lseek,
.read = usb_device_read,
.poll = usb_device_poll,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 776c1bf0df9b..0e0ea0806606 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -21,7 +21,7 @@
*
* $Id: devio.c,v 1.7 2000/02/01 17:28:48 fliegl Exp $
*
- * This file implements the usbdevfs/x/y files, where
+ * This file implements the usbfs/x/y files, where
* x is the bus number and y the device number.
*
* It allows user space programs/"drivers" to communicate directly
@@ -113,7 +113,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, l
int i;
pos = *ppos;
- down(&dev->serialize);
+ usb_lock_device(dev);
if (!connected(dev)) {
ret = -ENODEV;
goto err;
@@ -175,7 +175,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, l
}
err:
- up(&dev->serialize);
+ usb_unlock_device(dev);
return ret;
}
@@ -286,9 +286,10 @@ static void destroy_async (struct dev_state *ps, struct list_head *list)
while (!list_empty(list)) {
as = list_entry(list->next, struct async, asynclist);
list_del_init(&as->asynclist);
+
+ /* drop the spinlock so the completion handler can run */
spin_unlock_irqrestore(&ps->lock, flags);
- /* usb_unlink_urb calls the completion handler with status == -ENOENT */
- usb_unlink_urb(as->urb);
+ usb_kill_urb(as->urb);
spin_lock_irqsave(&ps->lock, flags);
}
spin_unlock_irqrestore(&ps->lock, flags);
@@ -353,7 +354,7 @@ static void driver_disconnect(struct usb_interface *intf)
destroy_async_on_interface(ps, ifnum);
}
-struct usb_driver usbdevfs_driver = {
+struct usb_driver usbfs_driver = {
.owner = THIS_MODULE,
.name = "usbfs",
.probe = driver_probe,
@@ -378,7 +379,7 @@ static int claimintf(struct dev_state *ps, unsigned int ifnum)
if (!intf)
err = -ENOENT;
else
- err = usb_driver_claim_interface(&usbdevfs_driver, intf, ps);
+ err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
up_write(&usb_bus_type.subsys.rwsem);
if (err == 0)
set_bit(ifnum, &ps->ifclaimed);
@@ -401,7 +402,7 @@ static int releaseintf(struct dev_state *ps, unsigned int ifnum)
if (!intf)
err = -ENOENT;
else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
- usb_driver_release_interface(&usbdevfs_driver, intf);
+ usb_driver_release_interface(&usbfs_driver, intf);
err = 0;
}
up_write(&usb_bus_type.subsys.rwsem);
@@ -516,7 +517,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
struct usb_device *dev = ps->dev;
unsigned int ifnum;
- down(&dev->serialize);
+ usb_lock_device(dev);
list_del_init(&ps->list);
if (connected(dev)) {
@@ -525,7 +526,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
releaseintf(ps, ifnum);
destroy_all_async(ps);
}
- up(&dev->serialize);
+ usb_unlock_device(dev);
usb_put_dev(dev);
ps->dev = NULL;
kfree(ps);
@@ -557,10 +558,10 @@ static int proc_control(struct dev_state *ps, void __user *arg)
snoop(&dev->dev, "control read: bRequest=%02x bRrequestType=%02x wValue=%04x wIndex=%04x\n",
ctrl.bRequest, ctrl.bRequestType, ctrl.wValue, ctrl.wIndex);
- up(&dev->serialize);
+ usb_unlock_device(dev);
i = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ctrl.bRequest, ctrl.bRequestType,
ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo);
- down(&dev->serialize);
+ usb_lock_device(dev);
if ((i > 0) && ctrl.wLength) {
if (usbfs_snoop) {
dev_info(&dev->dev, "control read: data ");
@@ -588,10 +589,10 @@ static int proc_control(struct dev_state *ps, void __user *arg)
printk ("%02x ", (unsigned char)(tbuf)[j]);
printk("\n");
}
- up(&dev->serialize);
+ usb_unlock_device(dev);
i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest, ctrl.bRequestType,
ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo);
- down(&dev->serialize);
+ usb_lock_device(dev);
}
free_page((unsigned long)tbuf);
if (i<0) {
@@ -635,9 +636,9 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
kfree(tbuf);
return -EINVAL;
}
- up(&dev->serialize);
+ usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
- down(&dev->serialize);
+ usb_lock_device(dev);
if (!i && len2) {
if (copy_to_user(bulk.data, tbuf, len2)) {
kfree(tbuf);
@@ -651,9 +652,9 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
return -EFAULT;
}
}
- up(&dev->serialize);
+ usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
- down(&dev->serialize);
+ usb_lock_device(dev);
}
kfree(tbuf);
if (i < 0) {
@@ -734,7 +735,7 @@ static int proc_connectinfo(struct dev_state *ps, void __user *arg)
static int proc_resetdevice(struct dev_state *ps)
{
- return __usb_reset_device(ps->dev);
+ return usb_reset_device(ps->dev);
}
@@ -976,7 +977,7 @@ static int proc_unlinkurb(struct dev_state *ps, void __user *arg)
as = async_getpending(ps, arg);
if (!as)
return -EINVAL;
- usb_unlink_urb(as->urb);
+ usb_kill_urb(as->urb);
return 0;
}
@@ -1024,9 +1025,9 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
break;
if (signal_pending(current))
break;
- up(&dev->serialize);
+ usb_unlock_device(dev);
schedule();
- down(&dev->serialize);
+ usb_lock_device(dev);
}
remove_wait_queue(&ps->wait, &wait);
set_current_state(TASK_RUNNING);
@@ -1149,7 +1150,11 @@ static int proc_ioctl (struct dev_state *ps, void __user *arg)
/* let kernel drivers try to (re)bind to the interface */
case USBDEVFS_CONNECT:
+ usb_unlock_device(ps->dev);
+ usb_lock_all_devices();
bus_rescan_devices(intf->dev.bus);
+ usb_unlock_all_devices();
+ usb_lock_device(ps->dev);
break;
/* talk directly to the interface's driver */
@@ -1192,9 +1197,9 @@ static int usbdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
- down(&dev->serialize);
+ usb_lock_device(dev);
if (!connected(dev)) {
- up(&dev->serialize);
+ usb_unlock_device(dev);
return -ENODEV;
}
@@ -1294,7 +1299,7 @@ static int usbdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
ret = proc_ioctl(ps, p);
break;
}
- up(&dev->serialize);
+ usb_unlock_device(dev);
if (ret >= 0)
inode->i_atime = CURRENT_TIME;
return ret;
@@ -1314,7 +1319,7 @@ static unsigned int usbdev_poll(struct file *file, struct poll_table_struct *wai
return mask;
}
-struct file_operations usbdevfs_device_file_operations = {
+struct file_operations usbfs_device_file_operations = {
.llseek = usbdev_lseek,
.read = usbdev_read,
.poll = usbdev_poll,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 5fdaae079cf6..837bb267f194 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -188,9 +188,9 @@ clean_3:
}
hcd->irq = dev->irq;
- dev_info (hcd->self.controller, "irq %s, %s %p\n", bufp,
+ dev_info (hcd->self.controller, "irq %s, %s 0x%lx\n", bufp,
(driver->flags & HCD_MEMORY) ? "pci mem" : "io base",
- base);
+ resource);
usb_bus_init (&hcd->self);
hcd->self.op = &usb_hcd_operations;
@@ -260,6 +260,8 @@ void usb_hcd_pci_remove (struct pci_dev *dev)
}
usb_deregister_bus (&hcd->self);
+
+ pci_disable_device(dev);
}
EXPORT_SYMBOL (usb_hcd_pci_remove);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index d970e2c0c066..8ab1163e59b9 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -798,9 +798,9 @@ int usb_register_root_hub (struct usb_device *usb_dev, struct device *parent_dev
return (retval < 0) ? retval : -EMSGSIZE;
}
- down (&usb_dev->serialize);
+ usb_lock_device (usb_dev);
retval = usb_new_device (usb_dev);
- up (&usb_dev->serialize);
+ usb_unlock_device (usb_dev);
if (retval) {
usb_dev->bus->root_hub = NULL;
dev_err (parent_dev, "can't register root hub for %s, %d\n",
@@ -1264,7 +1264,7 @@ static int hcd_unlink_urb (struct urb *urb, int status)
* never get completion IRQs ... maybe even the ones we need to
* finish unlinking the initial failed usb_set_address().
*/
- if (!hcd->saw_irq) {
+ if (!hcd->saw_irq && hcd->rh_timer.data != (unsigned long) urb) {
dev_warn (hcd->self.controller, "Unlink after no-IRQ? "
"Different ACPI or APIC settings may help."
"\n");
@@ -1573,13 +1573,12 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd, struct pt_regs * r)
struct usb_hcd *hcd = __hcd;
int start = hcd->state;
- if (unlikely (hcd->state == USB_STATE_HALT)) /* irq sharing? */
+ if (start == USB_STATE_HALT)
return IRQ_NONE;
-
- hcd->saw_irq = 1;
if (hcd->driver->irq (hcd, r) == IRQ_NONE)
return IRQ_NONE;
+ hcd->saw_irq = 1;
if (hcd->state != start && hcd->state == USB_STATE_HALT)
usb_hc_died (hcd);
return IRQ_HANDLED;
@@ -1588,22 +1587,6 @@ EXPORT_SYMBOL (usb_hcd_irq);
/*-------------------------------------------------------------------------*/
-static void hcd_panic (void *_hcd)
-{
- struct usb_hcd *hcd = _hcd;
- struct usb_device *hub = hcd->self.root_hub;
- unsigned i;
-
- /* hc's root hub is removed later removed in hcd->stop() */
- down (&hub->serialize);
- usb_set_device_state(hub, USB_STATE_NOTATTACHED);
- for (i = 0; i < hub->maxchild; i++) {
- if (hub->children [i])
- usb_disconnect (&hub->children [i]);
- }
- up (&hub->serialize);
-}
-
/**
* usb_hc_died - report abnormal shutdown of a host controller (bus glue)
* @hcd: pointer to the HCD representing the controller
@@ -1616,9 +1599,9 @@ void usb_hc_died (struct usb_hcd *hcd)
{
dev_err (hcd->self.controller, "HC died; cleaning up\n");
- /* clean up old urbs and devices; needs a task context */
- INIT_WORK (&hcd->work, hcd_panic, hcd);
- (void) schedule_work (&hcd->work);
+ /* make khubd clean up old urbs and devices */
+ usb_set_device_state(hcd->self.root_hub, USB_STATE_NOTATTACHED);
+ mod_timer(&hcd->rh_timer, jiffies);
}
EXPORT_SYMBOL (usb_hc_died);
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 340977b72925..eb8ae109096e 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -67,7 +67,6 @@ struct usb_hcd { /* usb_bus.hcpriv points to this */
struct timer_list rh_timer; /* drives root hub */
struct list_head dev_list; /* devices on this bus */
- struct work_struct work;
/*
* hardware info/state
@@ -363,6 +362,9 @@ static inline int hcd_register_root (struct usb_device *usb_dev,
return usb_register_root_hub (usb_dev, hcd->self.controller);
}
+extern void usb_set_device_state(struct usb_device *udev,
+ enum usb_device_state new_state);
+
/*-------------------------------------------------------------------------*/
/* exported only within usbcore */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 7f72ee96c7fe..10baa8f52566 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -36,15 +36,18 @@
#include "hcd.h"
#include "hub.h"
-/* Protect struct usb_device state and children members */
+/* Protect struct usb_device->state and ->children members
+ * Note: Both are also protected by ->serialize, except that ->state can
+ * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
static spinlock_t device_state_lock = SPIN_LOCK_UNLOCKED;
-/* Wakes up khubd */
+/* khubd's worklist and its lock */
static spinlock_t hub_event_lock = SPIN_LOCK_UNLOCKED;
-
static LIST_HEAD(hub_event_list); /* List of hubs needing servicing */
+/* Wakes up khubd */
static DECLARE_WAIT_QUEUE_HEAD(khubd_wait);
+
static pid_t khubd_pid = 0; /* PID of khubd */
static DECLARE_COMPLETION(khubd_exited);
@@ -226,6 +229,19 @@ static int get_port_status(struct usb_device *hdev, int port,
data, sizeof(*data), HZ * USB_CTRL_GET_TIMEOUT);
}
+static void kick_khubd(struct usb_hub *hub)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hub_event_lock, flags);
+ if (list_empty(&hub->event_list)) {
+ list_add_tail(&hub->event_list, &hub_event_list);
+ wake_up(&khubd_wait);
+ }
+ spin_unlock_irqrestore(&hub_event_lock, flags);
+}
+
+
/* completion function, fires on port status changes and various faults */
static void hub_irq(struct urb *urb, struct pt_regs *regs)
{
@@ -261,12 +277,7 @@ static void hub_irq(struct urb *urb, struct pt_regs *regs)
hub->nerrors = 0;
/* Something happened, let khubd figure it out */
- spin_lock(&hub_event_lock);
- if (list_empty(&hub->event_list)) {
- list_add_tail(&hub->event_list, &hub_event_list);
- wake_up(&khubd_wait);
- }
- spin_unlock(&hub_event_lock);
+ kick_khubd(hub);
resubmit:
if (hub->quiescing)
@@ -384,6 +395,33 @@ static void hub_power_on(struct usb_hub *hub)
msleep(hub->descriptor->bPwrOn2PwrGood * 2);
}
+static void hub_quiesce(struct usb_hub *hub)
+{
+ /* stop khubd and related activity */
+ hub->quiescing = 1;
+ usb_kill_urb(hub->urb);
+ if (hub->has_indicators)
+ cancel_delayed_work(&hub->leds);
+ if (hub->has_indicators || hub->tt.hub)
+ flush_scheduled_work();
+}
+
+static void hub_activate(struct usb_hub *hub)
+{
+ int status;
+
+ hub->quiescing = 0;
+ status = usb_submit_urb(hub->urb, GFP_NOIO);
+ if (status < 0)
+ dev_err(&hub->intf->dev, "activate --> %d\n", status);
+ if (hub->has_indicators && blinkenlights)
+ schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD);
+
+ /* scan all ports ASAP */
+ hub->event_bits[0] = ~0;
+ kick_khubd(hub);
+}
+
static int hub_hub_status(struct usb_hub *hub,
u16 *status, u16 *change)
{
@@ -579,7 +617,7 @@ static int hub_configure(struct usb_hub *hub,
dev_dbg(hub_dev, "%sover-current condition exists\n",
(hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no ");
- /* Start the interrupt endpoint */
+ /* set up the interrupt endpoint */
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
@@ -597,24 +635,13 @@ static int hub_configure(struct usb_hub *hub,
hub, endpoint->bInterval);
hub->urb->transfer_dma = hub->buffer_dma;
hub->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- ret = usb_submit_urb(hub->urb, GFP_KERNEL);
- if (ret) {
- message = "couldn't submit status urb";
- goto fail;
- }
-
- /* Wake up khubd */
- wake_up(&khubd_wait);
- /* maybe start cycling the hub leds */
- if (hub->has_indicators && blinkenlights) {
- set_port_led(hdev, 1, HUB_LED_GREEN);
+ /* maybe cycle the hub leds */
+ if (hub->has_indicators && blinkenlights)
hub->indicator [0] = INDICATOR_CYCLE;
- schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD);
- }
hub_power_on(hub);
-
+ hub_activate(hub);
return 0;
fail:
@@ -626,33 +653,6 @@ fail:
static unsigned highspeed_hubs;
-static void hub_quiesce(struct usb_hub *hub)
-{
- /* stop khubd and related activity */
- hub->quiescing = 1;
- usb_kill_urb(hub->urb);
- if (hub->has_indicators)
- cancel_delayed_work(&hub->leds);
- if (hub->has_indicators || hub->tt.hub)
- flush_scheduled_work();
-}
-
-#ifdef CONFIG_USB_SUSPEND
-
-static void hub_reactivate(struct usb_hub *hub)
-{
- int status;
-
- hub->quiescing = 0;
- status = usb_submit_urb(hub->urb, GFP_NOIO);
- if (status < 0)
- dev_err(&hub->intf->dev, "reactivate --> %d\n", status);
- if (hub->has_indicators && blinkenlights)
- schedule_delayed_work(&hub->leds, LED_CYCLE_PERIOD);
-}
-
-#endif
-
static void hub_disconnect(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata (intf);
@@ -794,68 +794,29 @@ hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
}
}
-/* caller has locked the hub and must own the device lock */
-static int hub_reset(struct usb_hub *hub)
+/* caller has locked the hub device */
+static void hub_pre_reset(struct usb_device *hdev)
{
- struct usb_device *hdev = hub->hdev;
+ struct usb_hub *hub = usb_get_intfdata(hdev->actconfig->interface[0]);
int i;
- /* Disconnect any attached devices */
- for (i = 0; i < hub->descriptor->bNbrPorts; i++) {
+ for (i = 0; i < hdev->maxchild; ++i) {
if (hdev->children[i])
usb_disconnect(&hdev->children[i]);
}
-
- /* Attempt to reset the hub */
- if (hub->urb)
- usb_kill_urb(hub->urb);
- else
- return -1;
-
- if (__usb_reset_device(hdev))
- return -1;
-
- hub->urb->dev = hdev;
- if (usb_submit_urb(hub->urb, GFP_KERNEL))
- return -1;
-
- hub_power_on(hub);
-
- return 0;
+ hub_quiesce(hub);
}
-/* caller has locked the hub */
-/* FIXME! This routine should be subsumed into hub_reset */
-static void hub_start_disconnect(struct usb_device *hdev)
+/* caller has locked the hub device */
+static void hub_post_reset(struct usb_device *hdev)
{
- struct usb_device *parent = hdev->parent;
- int i;
-
- /* Find the device pointer to disconnect */
- if (parent) {
- for (i = 0; i < parent->maxchild; i++) {
- if (parent->children[i] == hdev) {
- usb_disconnect(&parent->children[i]);
- return;
- }
- }
- }
+ struct usb_hub *hub = usb_get_intfdata(hdev->actconfig->interface[0]);
- dev_err(&hdev->dev, "cannot disconnect hub!\n");
+ hub_activate(hub);
+ hub_power_on(hub);
}
-static void recursively_mark_NOTATTACHED(struct usb_device *udev)
-{
- int i;
-
- for (i = 0; i < udev->maxchild; ++i) {
- if (udev->children[i])
- recursively_mark_NOTATTACHED(udev->children[i]);
- }
- udev->state = USB_STATE_NOTATTACHED;
-}
-
/* grab device/port lock, returning index of that port (zero based).
* protects the upstream link used by this device from concurrent
* tree operations like suspend, resume, reset, and disconnect, which
@@ -872,21 +833,16 @@ static int locktree(struct usb_device *udev)
/* root hub is always the first lock in the series */
hdev = udev->parent;
if (!hdev) {
- down(&udev->serialize);
+ usb_lock_device(udev);
return 0;
}
/* on the path from root to us, lock everything from
* top down, dropping parent locks when not needed
- *
- * NOTE: if disconnect were to ignore the locking, we'd need
- * to get extra refcounts to everything since hdev->children
- * and udev->parent could be invalidated while we work...
*/
t = locktree(hdev);
if (t < 0)
return t;
- spin_lock_irq(&device_state_lock);
for (t = 0; t < hdev->maxchild; t++) {
if (hdev->children[t] == udev) {
/* everything is fail-fast once disconnect
@@ -898,33 +854,45 @@ static int locktree(struct usb_device *udev)
/* when everyone grabs locks top->bottom,
* non-overlapping work may be concurrent
*/
- spin_unlock_irq(&device_state_lock);
down(&udev->serialize);
up(&hdev->serialize);
return t;
}
}
- spin_unlock_irq(&device_state_lock);
- up(&hdev->serialize);
+ usb_unlock_device(hdev);
return -ENODEV;
}
+static void recursively_mark_NOTATTACHED(struct usb_device *udev)
+{
+ int i;
+
+ for (i = 0; i < udev->maxchild; ++i) {
+ if (udev->children[i])
+ recursively_mark_NOTATTACHED(udev->children[i]);
+ }
+ udev->state = USB_STATE_NOTATTACHED;
+}
+
/**
- * usb_set_device_state - change a device's current state (usbcore-internal)
+ * usb_set_device_state - change a device's current state (usbcore, hcds)
* @udev: pointer to device whose state should be changed
* @new_state: new state value to be stored
*
- * udev->state is _not_ protected by the device lock. This
+ * udev->state is _not_ fully protected by the device lock. Although
+ * most transitions are made only while holding the lock, the state can
+ * can change to USB_STATE_NOTATTACHED at almost any time. This
* is so that devices can be marked as disconnected as soon as possible,
- * without having to wait for the semaphore to be released. Instead,
- * changes to the state must be protected by the device_state_lock spinlock.
+ * without having to wait for any semaphores to be released. As a result,
+ * all changes to any device's state must be protected by the
+ * device_state_lock spinlock.
*
* Once a device has been added to the device tree, all changes to its state
* should be made using this routine. The state should _not_ be set directly.
*
* If udev->state is already USB_STATE_NOTATTACHED then no change is made.
* Otherwise udev->state is set to new_state, and if new_state is
- * USB_STATE_NOTATTACHED then all of udev's descendant's states are also set
+ * USB_STATE_NOTATTACHED then all of udev's descendants' states are also set
* to USB_STATE_NOTATTACHED.
*/
void usb_set_device_state(struct usb_device *udev,
@@ -941,6 +909,7 @@ void usb_set_device_state(struct usb_device *udev,
recursively_mark_NOTATTACHED(udev);
spin_unlock_irqrestore(&device_state_lock, flags);
}
+EXPORT_SYMBOL(usb_set_device_state);
static void choose_address(struct usb_device *udev)
@@ -974,11 +943,12 @@ static void release_address(struct usb_device *udev)
/**
* usb_disconnect - disconnect a device (usbcore-internal)
- * @pdev: pointer to device being disconnected, into a locked hub
+ * @pdev: pointer to device being disconnected
* Context: !in_interrupt ()
*
- * Something got disconnected. Get rid of it, and all of its children.
- * If *pdev is a normal device then the parent hub should be locked.
+ * Something got disconnected. Get rid of it and all of its children.
+ *
+ * If *pdev is a normal device then the parent hub must already be locked.
* If *pdev is a root hub then this routine will acquire the
* usb_bus_list_lock on behalf of the caller.
*
@@ -1004,9 +974,11 @@ void usb_disconnect(struct usb_device **pdev)
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
/* lock the bus list on behalf of HCDs unregistering their root hubs */
- if (!udev->parent)
+ if (!udev->parent) {
down(&usb_bus_list_lock);
- down(&udev->serialize);
+ usb_lock_device(udev);
+ } else
+ down(&udev->serialize);
dev_info (&udev->dev, "USB disconnect, address %d\n", udev->devnum);
@@ -1031,14 +1003,16 @@ void usb_disconnect(struct usb_device **pdev)
usbfs_remove_device(udev);
usb_remove_sysfs_dev_files(udev);
- /* Avoid races with recursively_mark_NOTATTACHED() and locktree() */
+ /* Avoid races with recursively_mark_NOTATTACHED() */
spin_lock_irq(&device_state_lock);
*pdev = NULL;
spin_unlock_irq(&device_state_lock);
- up(&udev->serialize);
- if (!udev->parent)
+ if (!udev->parent) {
+ usb_unlock_device(udev);
up(&usb_bus_list_lock);
+ } else
+ up(&udev->serialize);
device_unregister(&udev->dev);
}
@@ -1061,11 +1035,19 @@ static int choose_configuration(struct usb_device *udev)
->altsetting->desc;
if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC)
continue;
- /* COMM/2/all is CDC ACM, except 0xff is MSFT RNDIS */
+ /* COMM/2/all is CDC ACM, except 0xff is MSFT RNDIS.
+ * MSFT needs this to be the first config; never use
+ * it as the default unless Linux has host-side RNDIS.
+ * A second config would ideally be CDC-Ethernet, but
+ * may instead be the "vendor specific" CDC subset
+ * long used by ARM Linux for sa1100 or pxa255.
+ */
if (desc->bInterfaceClass == USB_CLASS_COMM
&& desc->bInterfaceSubClass == 2
- && desc->bInterfaceProtocol == 0xff)
+ && desc->bInterfaceProtocol == 0xff) {
+ c = udev->config[1].desc.bConfigurationValue;
continue;
+ }
c = udev->config[i].desc.bConfigurationValue;
break;
}
@@ -1384,7 +1366,6 @@ static int hub_port_disable(struct usb_device *hdev, int port)
int ret;
if (hdev->children[port]) {
- /* FIXME need disconnect() for NOTATTACHED device */
usb_set_device_state(hdev->children[port],
USB_STATE_NOTATTACHED);
}
@@ -1396,6 +1377,33 @@ static int hub_port_disable(struct usb_device *hdev, int port)
return ret;
}
+/*
+ * Disable a port and mark a logical connnect-change event, so that some
+ * time later khubd will disconnect() any existing usb_device on the port
+ * and will re-enumerate if there actually is a device attached.
+ */
+static void hub_port_logical_disconnect(struct usb_device *hdev, int port)
+{
+ struct usb_hub *hub;
+
+ dev_dbg(hubdev(hdev), "logical disconnect on port %d\n", port + 1);
+ hub_port_disable(hdev, port);
+
+ /* FIXME let caller ask to power down the port:
+ * - some devices won't enumerate without a VBUS power cycle
+ * - SRP saves power that way
+ * - usb_suspend_device(dev,PM_SUSPEND_DISK)
+ * That's easy if this hub can switch power per-port, and
+ * khubd reactivates the port later (timer, SRP, etc).
+ * Powerdown must be optional, because of reset/DFU.
+ */
+
+ hub = usb_get_intfdata(hdev->actconfig->interface[0]);
+ set_bit(port, hub->change_bits);
+ kick_khubd(hub);
+}
+
+
#ifdef CONFIG_USB_SUSPEND
/*
@@ -1413,8 +1421,8 @@ static int hub_port_suspend(struct usb_device *hdev, int port)
int status;
struct usb_device *udev;
- udev = hdev->children[port - 1];
- // dev_dbg(hubdev(hdev), "suspend port %d\n", port);
+ udev = hdev->children[port];
+ // dev_dbg(hubdev(hdev), "suspend port %d\n", port + 1);
/* enable remote wakeup when appropriate; this lets the device
* wake up the upstream hub (including maybe the root hub).
@@ -1439,11 +1447,11 @@ static int hub_port_suspend(struct usb_device *hdev, int port)
}
/* see 7.1.7.6 */
- status = set_port_feature(hdev, port, USB_PORT_FEAT_SUSPEND);
+ status = set_port_feature(hdev, port + 1, USB_PORT_FEAT_SUSPEND);
if (status) {
dev_dbg(hubdev(hdev),
"can't suspend port %d, status %d\n",
- port, status);
+ port + 1, status);
/* paranoia: "should not happen" */
(void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
@@ -1477,16 +1485,14 @@ static int __usb_suspend_device (struct usb_device *udev, int port, u32 state)
{
int status;
+ /* caller owns the udev device lock */
if (port < 0)
return port;
- /* NOTE: udev->serialize released on all real returns! */
-
if (state <= udev->dev.power.power_state
|| state < PM_SUSPEND_MEM
|| udev->state == USB_STATE_SUSPENDED
|| udev->state == USB_STATE_NOTATTACHED) {
- up(&udev->serialize);
return 0;
}
@@ -1562,11 +1568,10 @@ static int __usb_suspend_device (struct usb_device *udev, int port, u32 state)
else
status = -EOPNOTSUPP;
} else
- status = hub_port_suspend(udev->parent, port + 1);
+ status = hub_port_suspend(udev->parent, port);
if (status == 0)
udev->dev.power.power_state = state;
- up(&udev->serialize);
return status;
}
@@ -1590,7 +1595,15 @@ static int __usb_suspend_device (struct usb_device *udev, int port, u32 state)
*/
int usb_suspend_device(struct usb_device *udev, u32 state)
{
- return __usb_suspend_device(udev, locktree(udev), state);
+ int port, status;
+
+ port = locktree(udev);
+ if (port < 0)
+ return port;
+
+ status = __usb_suspend_device(udev, port, state);
+ usb_unlock_device(udev);
+ return status;
}
/*
@@ -1603,7 +1616,7 @@ static int finish_port_resume(struct usb_device *udev)
int status;
u16 devstatus;
- /* caller owns udev->serialize */
+ /* caller owns the udev device lock */
dev_dbg(&udev->dev, "usb resume\n");
udev->dev.power.power_state = PM_SUSPEND_ON;
@@ -1687,15 +1700,15 @@ hub_port_resume(struct usb_device *hdev, int port)
int status;
struct usb_device *udev;
- udev = hdev->children[port - 1];
- // dev_dbg(hubdev(hdev), "resume port %d\n", port);
+ udev = hdev->children[port];
+ // dev_dbg(hubdev(hdev), "resume port %d\n", port + 1);
/* see 7.1.7.7; affects power usage, but not budgeting */
- status = clear_port_feature(hdev, port, USB_PORT_FEAT_SUSPEND);
+ status = clear_port_feature(hdev, port + 1, USB_PORT_FEAT_SUSPEND);
if (status) {
dev_dbg(&hdev->actconfig->interface[0]->dev,
"can't resume port %d, status %d\n",
- port, status);
+ port + 1, status);
} else {
u16 devstatus;
u16 portchange;
@@ -1713,7 +1726,7 @@ hub_port_resume(struct usb_device *hdev, int port)
* sequence.
*/
devstatus = portchange = 0;
- status = hub_port_status(hdev, port - 1,
+ status = hub_port_status(hdev, port,
&devstatus, &portchange);
if (status < 0
|| (devstatus & LIVE_FLAGS) != LIVE_FLAGS
@@ -1721,7 +1734,7 @@ hub_port_resume(struct usb_device *hdev, int port)
) {
dev_dbg(&hdev->actconfig->interface[0]->dev,
"port %d status %04x.%04x after resume, %d\n",
- port, portchange, devstatus, status);
+ port + 1, portchange, devstatus, status);
} else {
/* TRSMRCY = 10 msec */
msleep(10);
@@ -1729,7 +1742,7 @@ hub_port_resume(struct usb_device *hdev, int port)
}
}
if (status < 0)
- status = hub_port_disable(hdev, port);
+ hub_port_logical_disconnect(hdev, port);
return status;
}
@@ -1773,7 +1786,7 @@ int usb_resume_device(struct usb_device *udev)
->actconfig->interface[0]);
}
} else if (udev->state == USB_STATE_SUSPENDED) {
- status = hub_port_resume(udev->parent, port + 1);
+ status = hub_port_resume(udev->parent, port);
} else {
status = 0;
udev->dev.power.power_state = PM_SUSPEND_ON;
@@ -1783,10 +1796,12 @@ int usb_resume_device(struct usb_device *udev)
status);
}
- up(&udev->serialize);
+ usb_unlock_device(udev);
/* rebind drivers that had no suspend() */
+ usb_lock_all_devices();
bus_rescan_devices(&usb_bus_type);
+ usb_unlock_all_devices();
return status;
}
@@ -1828,6 +1843,7 @@ static int hub_suspend(struct usb_interface *intf, u32 state)
continue;
down(&udev->serialize);
status = __usb_suspend_device(udev, port, state);
+ up(&udev->serialize);
if (status < 0)
dev_dbg(&intf->dev, "suspend port %d --> %d\n",
port, status);
@@ -1866,20 +1882,20 @@ static int hub_resume(struct usb_interface *intf)
continue;
down (&udev->serialize);
if (portstat & USB_PORT_STAT_SUSPEND)
- status = hub_port_resume(hdev, port + 1);
+ status = hub_port_resume(hdev, port);
else {
status = finish_port_resume(udev);
- if (status < 0)
- status = hub_port_disable(hdev, port);
- if (status < 0)
+ if (status < 0) {
dev_dbg(&intf->dev, "resume port %d --> %d\n",
- port, status);
+ port + 1, status);
+ hub_port_logical_disconnect(hdev, port);
+ }
}
up(&udev->serialize);
}
intf->dev.power.power_state = PM_SUSPEND_ON;
- hub_reactivate(hub);
+ hub_activate(hub);
return 0;
}
@@ -2011,7 +2027,7 @@ hub_port_init (struct usb_device *hdev, struct usb_device *udev, int port)
hdev->bus->b_hnp_enable = 0;
}
- retval = clear_port_feature(hdev, port, USB_PORT_FEAT_SUSPEND);
+ retval = clear_port_feature(hdev, port + 1, USB_PORT_FEAT_SUSPEND);
if (retval < 0 && retval != -EPIPE)
dev_dbg(&udev->dev, "can't clear suspend; %d\n", retval);
@@ -2443,10 +2459,10 @@ static void hub_events(void)
dev_dbg (hub_dev, "resetting for error %d\n",
hub->error);
- if (hub_reset(hub)) {
+ ret = usb_reset_device(hdev);
+ if (ret) {
dev_dbg (hub_dev,
- "can't reset; disconnecting\n");
- hub_start_disconnect(hdev);
+ "error resetting hub: %d\n", ret);
goto loop;
}
@@ -2502,15 +2518,17 @@ static void hub_events(void)
if (portchange & USB_PORT_STAT_C_SUSPEND) {
clear_port_feature(hdev, i + 1,
USB_PORT_FEAT_C_SUSPEND);
- if (hdev->children[i])
+ if (hdev->children[i]) {
ret = remote_wakeup(hdev->children[i]);
- else
+ if (ret < 0)
+ connect_change = 1;
+ } else {
ret = -ENODEV;
+ hub_port_disable(hdev, i);
+ }
dev_dbg (hub_dev,
"resume on port %d, status %d\n",
i + 1, ret);
- if (ret < 0)
- ret = hub_port_disable(hdev, i);
}
if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
@@ -2554,7 +2572,7 @@ static void hub_events(void)
}
loop:
- up(&hdev->serialize);
+ usb_unlock_device(hdev);
usb_put_dev(hdev);
} /* end while (1) */
@@ -2707,13 +2725,15 @@ static int config_descriptors_changed(struct usb_device *udev)
*
* The caller must own the device lock. For example, it's safe to use
* this from a driver probe() routine after downloading new firmware.
+ * For calls that might not occur during probe(), drivers should lock
+ * the device using usb_lock_device_for_reset().
*/
-int __usb_reset_device(struct usb_device *udev)
+int usb_reset_device(struct usb_device *udev)
{
struct usb_device *parent = udev->parent;
struct usb_device_descriptor descriptor = udev->descriptor;
int i, ret, port = -1;
- struct usb_hub *hub;
+ int udev_is_a_hub = 0;
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
@@ -2722,13 +2742,9 @@ int __usb_reset_device(struct usb_device *udev)
return -EINVAL;
}
- /* FIXME: This should be legal for regular hubs. Root hubs may
- * have special requirements. */
- if (udev->maxchild) {
- /* this requires hub- or hcd-specific logic;
- * see hub_reset() and OHCI hc_restart()
- */
- dev_dbg(&udev->dev, "%s for hub!\n", __FUNCTION__);
+ if (!parent) {
+ /* this requires hcd-specific logic; see OHCI hc_restart() */
+ dev_dbg(&udev->dev, "%s for root hub!\n", __FUNCTION__);
return -EISDIR;
}
@@ -2744,6 +2760,19 @@ int __usb_reset_device(struct usb_device *udev)
return -ENOENT;
}
+ /* If we're resetting an active hub, take some special actions */
+ if (udev->actconfig &&
+ udev->actconfig->interface[0]->dev.driver ==
+ &hub_driver.driver) {
+ udev_is_a_hub = 1;
+ hub_pre_reset(udev);
+ }
+
+ /* ep0 maxpacket size may change; let the HCD know about it.
+ * Other endpoints will be handled by re-enumeration. */
+ usb_disable_endpoint(udev, 0);
+ usb_disable_endpoint(udev, 0 + USB_DIR_IN);
+
ret = hub_port_init(parent, udev, port);
if (ret < 0)
goto re_enumerate;
@@ -2757,7 +2786,7 @@ int __usb_reset_device(struct usb_device *udev)
}
if (!udev->actconfig)
- return 0;
+ goto done;
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_CONFIGURATION, 0,
@@ -2791,32 +2820,12 @@ int __usb_reset_device(struct usb_device *udev)
}
}
+done:
+ if (udev_is_a_hub)
+ hub_post_reset(udev);
return 0;
re_enumerate:
- hub_port_disable(parent, port);
-
- hub = usb_get_intfdata(parent->actconfig->interface[0]);
- set_bit(port, hub->change_bits);
-
- spin_lock_irq(&hub_event_lock);
- if (list_empty(&hub->event_list)) {
- list_add_tail(&hub->event_list, &hub_event_list);
- wake_up(&khubd_wait);
- }
- spin_unlock_irq(&hub_event_lock);
-
+ hub_port_logical_disconnect(parent, port);
return -ENODEV;
}
-EXPORT_SYMBOL(__usb_reset_device);
-
-int usb_reset_device(struct usb_device *udev)
-{
- int r;
-
- down(&udev->serialize);
- r = __usb_reset_device(udev);
- up(&udev->serialize);
-
- return r;
-}
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index c9f57e334a4f..24452d66d576 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -4,7 +4,7 @@
* inode.c -- Inode/Dentry functions for the USB device file system.
*
* Copyright (C) 2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
- * Copyright (C) 2001,2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001,2002,2004 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -40,17 +40,15 @@
#include <linux/smp_lock.h>
#include <linux/parser.h>
#include <asm/byteorder.h>
+#include "usb.h"
static struct super_operations usbfs_ops;
static struct file_operations default_file_operations;
static struct inode_operations usbfs_dir_inode_operations;
-static struct vfsmount *usbdevfs_mount;
static struct vfsmount *usbfs_mount;
-static int usbdevfs_mount_count; /* = 0 */
static int usbfs_mount_count; /* = 0 */
static int ignore_mount = 0;
-static struct dentry *devices_usbdevfs_dentry;
static struct dentry *devices_usbfs_dentry;
static int num_buses; /* = 0 */
@@ -240,9 +238,6 @@ static int remount(struct super_block *sb, int *flags, char *data)
if (usbfs_mount && usbfs_mount->mnt_sb)
update_sb(usbfs_mount->mnt_sb);
- if (usbdevfs_mount && usbdevfs_mount->mnt_sb)
- update_sb(usbdevfs_mount->mnt_sb);
-
return 0;
}
@@ -561,28 +556,12 @@ static void fs_remove_file (struct dentry *dentry)
/* --------------------------------------------------------------------- */
-
-
-/*
- * The usbdevfs name is now deprecated (as of 2.5.1).
- * It will be removed when the 2.7.x development cycle is started.
- * You have been warned :)
- */
-static struct file_system_type usbdevice_fs_type;
-
static struct super_block *usb_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return get_sb_single(fs_type, flags, data, usbfs_fill_super);
}
-static struct file_system_type usbdevice_fs_type = {
- .owner = THIS_MODULE,
- .name = "usbdevfs",
- .get_sb = usb_get_sb,
- .kill_sb = kill_litter_super,
-};
-
static struct file_system_type usb_fs_type = {
.owner = THIS_MODULE,
.name = "usbfs",
@@ -603,16 +582,10 @@ static int create_special_files (void)
ignore_mount = 1;
/* create the devices special file */
- retval = simple_pin_fs("usbdevfs", &usbdevfs_mount, &usbdevfs_mount_count);
- if (retval) {
- err ("Unable to get usbdevfs mount");
- goto exit;
- }
-
retval = simple_pin_fs("usbfs", &usbfs_mount, &usbfs_mount_count);
if (retval) {
err ("Unable to get usbfs mount");
- goto error_clean_usbdevfs_mount;
+ goto exit;
}
ignore_mount = 0;
@@ -620,7 +593,7 @@ static int create_special_files (void)
parent = usbfs_mount->mnt_sb->s_root;
devices_usbfs_dentry = fs_create_file ("devices",
listmode | S_IFREG, parent,
- NULL, &usbdevfs_devices_fops,
+ NULL, &usbfs_devices_fops,
listuid, listgid);
if (devices_usbfs_dentry == NULL) {
err ("Unable to create devices usbfs file");
@@ -628,42 +601,19 @@ static int create_special_files (void)
goto error_clean_mounts;
}
- parent = usbdevfs_mount->mnt_sb->s_root;
- devices_usbdevfs_dentry = fs_create_file ("devices",
- listmode | S_IFREG, parent,
- NULL, &usbdevfs_devices_fops,
- listuid, listgid);
- if (devices_usbdevfs_dentry == NULL) {
- err ("Unable to create devices usbfs file");
- retval = -ENODEV;
- goto error_remove_file;
- }
-
goto exit;
-error_remove_file:
- fs_remove_file (devices_usbfs_dentry);
- devices_usbfs_dentry = NULL;
-
error_clean_mounts:
simple_release_fs(&usbfs_mount, &usbfs_mount_count);
-
-error_clean_usbdevfs_mount:
- simple_release_fs(&usbdevfs_mount, &usbdevfs_mount_count);
-
exit:
return retval;
}
static void remove_special_files (void)
{
- if (devices_usbdevfs_dentry)
- fs_remove_file (devices_usbdevfs_dentry);
if (devices_usbfs_dentry)
fs_remove_file (devices_usbfs_dentry);
- devices_usbdevfs_dentry = NULL;
devices_usbfs_dentry = NULL;
- simple_release_fs(&usbdevfs_mount, &usbdevfs_mount_count);
simple_release_fs(&usbfs_mount, &usbfs_mount_count);
}
@@ -671,11 +621,6 @@ void usbfs_update_special (void)
{
struct inode *inode;
- if (devices_usbdevfs_dentry) {
- inode = devices_usbdevfs_dentry->d_inode;
- if (inode)
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- }
if (devices_usbfs_dentry) {
inode = devices_usbfs_dentry->d_inode;
if (inode)
@@ -707,29 +652,16 @@ void usbfs_add_bus(struct usb_bus *bus)
return;
}
- parent = usbdevfs_mount->mnt_sb->s_root;
- bus->usbdevfs_dentry = fs_create_file (name, busmode | S_IFDIR, parent,
- bus, NULL, busuid, busgid);
- if (bus->usbdevfs_dentry == NULL) {
- err ("error creating usbdevfs bus entry");
- return;
- }
-
usbfs_update_special();
- usbdevfs_conn_disc_event();
+ usbfs_conn_disc_event();
}
-
void usbfs_remove_bus(struct usb_bus *bus)
{
if (bus->usbfs_dentry) {
fs_remove_file (bus->usbfs_dentry);
bus->usbfs_dentry = NULL;
}
- if (bus->usbdevfs_dentry) {
- fs_remove_file (bus->usbdevfs_dentry);
- bus->usbdevfs_dentry = NULL;
- }
--num_buses;
if (num_buses <= 0) {
@@ -738,7 +670,7 @@ void usbfs_remove_bus(struct usb_bus *bus)
}
usbfs_update_special();
- usbdevfs_conn_disc_event();
+ usbfs_conn_disc_event();
}
void usbfs_add_device(struct usb_device *dev)
@@ -750,20 +682,12 @@ void usbfs_add_device(struct usb_device *dev)
sprintf (name, "%03d", dev->devnum);
dev->usbfs_dentry = fs_create_file (name, devmode | S_IFREG,
dev->bus->usbfs_dentry, dev,
- &usbdevfs_device_file_operations,
+ &usbfs_device_file_operations,
devuid, devgid);
if (dev->usbfs_dentry == NULL) {
err ("error creating usbfs device entry");
return;
}
- dev->usbdevfs_dentry = fs_create_file (name, devmode | S_IFREG,
- dev->bus->usbdevfs_dentry, dev,
- &usbdevfs_device_file_operations,
- devuid, devgid);
- if (dev->usbdevfs_dentry == NULL) {
- err ("error creating usbdevfs device entry");
- return;
- }
/* Set the size of the device's file to be
* equal to the size of the device descriptors. */
@@ -775,11 +699,9 @@ void usbfs_add_device(struct usb_device *dev)
}
if (dev->usbfs_dentry->d_inode)
dev->usbfs_dentry->d_inode->i_size = i_size;
- if (dev->usbdevfs_dentry->d_inode)
- dev->usbdevfs_dentry->d_inode->i_size = i_size;
usbfs_update_special();
- usbdevfs_conn_disc_event();
+ usbfs_conn_disc_event();
}
void usbfs_remove_device(struct usb_device *dev)
@@ -791,10 +713,6 @@ void usbfs_remove_device(struct usb_device *dev)
fs_remove_file (dev->usbfs_dentry);
dev->usbfs_dentry = NULL;
}
- if (dev->usbdevfs_dentry) {
- fs_remove_file (dev->usbdevfs_dentry);
- dev->usbdevfs_dentry = NULL;
- }
while (!list_empty(&dev->filelist)) {
ds = list_entry(dev->filelist.next, struct dev_state, list);
list_del_init(&ds->list);
@@ -807,51 +725,38 @@ void usbfs_remove_device(struct usb_device *dev)
}
}
usbfs_update_special();
- usbdevfs_conn_disc_event();
+ usbfs_conn_disc_event();
}
/* --------------------------------------------------------------------- */
-#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *usbdir = NULL;
-#endif
int __init usbfs_init(void)
{
int retval;
- retval = usb_register(&usbdevfs_driver);
+ retval = usb_register(&usbfs_driver);
if (retval)
return retval;
retval = register_filesystem(&usb_fs_type);
if (retval) {
- usb_deregister(&usbdevfs_driver);
- return retval;
- }
- retval = register_filesystem(&usbdevice_fs_type);
- if (retval) {
- unregister_filesystem(&usb_fs_type);
- usb_deregister(&usbdevfs_driver);
+ usb_deregister(&usbfs_driver);
return retval;
}
-#ifdef CONFIG_PROC_FS
- /* create mount point for usbdevfs */
+ /* create mount point for usbfs */
usbdir = proc_mkdir("usb", proc_bus);
-#endif
return 0;
}
void usbfs_cleanup(void)
{
- usb_deregister(&usbdevfs_driver);
+ usb_deregister(&usbfs_driver);
unregister_filesystem(&usb_fs_type);
- unregister_filesystem(&usbdevice_fs_type);
-#ifdef CONFIG_PROC_FS
if (usbdir)
remove_proc_entry("usb", proc_bus);
-#endif
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 98695c68d687..f2cd4770eb4f 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -17,6 +17,8 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
#include <asm/byteorder.h>
#include "hcd.h" /* for usbcore internals */
@@ -623,6 +625,20 @@ int usb_get_string(struct usb_device *dev, unsigned short langid,
return result;
}
+static void usb_try_string_workarounds(unsigned char *buf, int *length)
+{
+ int newlength, oldlength = *length;
+
+ for (newlength = 2; newlength + 1 < oldlength; newlength += 2)
+ if (!isprint(buf[newlength]) || buf[newlength + 1])
+ break;
+
+ if (newlength > 2) {
+ buf[0] = newlength;
+ *length = newlength;
+ }
+}
+
static int usb_string_sub(struct usb_device *dev, unsigned int langid,
unsigned int index, unsigned char *buf)
{
@@ -634,19 +650,26 @@ static int usb_string_sub(struct usb_device *dev, unsigned int langid,
/* If that failed try to read the descriptor length, then
* ask for just that many bytes */
- if (rc < 0) {
+ if (rc < 2) {
rc = usb_get_string(dev, langid, index, buf, 2);
if (rc == 2)
rc = usb_get_string(dev, langid, index, buf, buf[0]);
}
- if (rc >= 0) {
+ if (rc >= 2) {
+ if (!buf[0] && !buf[1])
+ usb_try_string_workarounds(buf, &rc);
+
/* There might be extra junk at the end of the descriptor */
if (buf[0] < rc)
rc = buf[0];
- if (rc < 2)
- rc = -EINVAL;
+
+ rc = rc - (rc & 1); /* force a multiple of two */
}
+
+ if (rc < 2)
+ rc = (rc < 0 ? rc : -EINVAL);
+
return rc;
}
@@ -724,6 +747,9 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
buf[idx] = 0;
err = idx;
+ if (tbuf[1] != USB_DT_STRING)
+ dev_dbg(&dev->dev, "wrong descriptor type %02x for string %d (\"%s\")\n", tbuf[1], index, buf);
+
errout:
kfree(tbuf);
return err;
@@ -1132,6 +1158,8 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
* use usb_set_interface() on the interfaces it claims. Resetting the whole
* configuration would affect other drivers' interfaces.
*
+ * The caller must own the device lock.
+ *
* Returns zero on success, else a negative error code.
*/
int usb_reset_configuration(struct usb_device *dev)
@@ -1142,9 +1170,9 @@ int usb_reset_configuration(struct usb_device *dev)
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
- /* caller must own dev->serialize (config won't change)
- * and the usb bus readlock (so driver bindings are stable);
- * so calls during probe() are fine
+ /* caller must have locked the device and must own
+ * the usb bus readlock (so driver bindings are stable);
+ * calls during probe() are fine
*/
for (i = 1; i < 16; ++i) {
@@ -1199,7 +1227,7 @@ static void release_interface(struct device *dev)
* usb_set_configuration - Makes a particular device setting be current
* @dev: the device whose configuration is being updated
* @configuration: the configuration being chosen.
- * Context: !in_interrupt(), caller holds dev->serialize
+ * Context: !in_interrupt(), caller owns the device lock
*
* This is used to enable non-default device modes. Not all devices
* use this kind of configurability; many devices only have one
@@ -1220,8 +1248,8 @@ static void release_interface(struct device *dev)
* usb_set_interface().
*
* This call is synchronous. The calling context must be able to sleep,
- * and must not hold the driver model lock for USB; usb device driver
- * probe() methods may not use this routine.
+ * must own the device lock, and must not hold the driver model's USB
+ * bus rwsem; usb device driver probe() methods cannot use this routine.
*
* Returns zero on success, or else the status code returned by the
* underlying call that failed. On succesful completion, each interface
@@ -1236,8 +1264,6 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
struct usb_interface **new_interfaces = NULL;
int n, nintf;
- /* dev->serialize guards all config changes */
-
for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
if (dev->config[i].desc.bConfigurationValue == configuration) {
cp = &dev->config[i];
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 78c5ca2f1051..bae974d587ae 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -27,11 +27,13 @@
static ssize_t show_##field (struct device *dev, char *buf) \
{ \
struct usb_device *udev; \
+ struct usb_host_config *actconfig; \
\
udev = to_usb_device (dev); \
- if (udev->actconfig) \
+ actconfig = udev->actconfig; \
+ if (actconfig) \
return sprintf (buf, format_string, \
- udev->actconfig->desc.field * multiplier); \
+ actconfig->desc.field * multiplier); \
else \
return 0; \
} \
@@ -44,6 +46,28 @@ usb_actconfig_attr (bNumInterfaces, 1, "%2d\n")
usb_actconfig_attr (bmAttributes, 1, "%2x\n")
usb_actconfig_attr (bMaxPower, 2, "%3dmA\n")
+#define usb_actconfig_str(name, field) \
+static ssize_t show_##name(struct device *dev, char *buf) \
+{ \
+ struct usb_device *udev; \
+ struct usb_host_config *actconfig; \
+ int len; \
+ \
+ udev = to_usb_device (dev); \
+ actconfig = udev->actconfig; \
+ if (!actconfig) \
+ return 0; \
+ len = usb_string(udev, actconfig->desc.field, buf, PAGE_SIZE); \
+ if (len < 0) \
+ return 0; \
+ buf[len] = '\n'; \
+ buf[len+1] = 0; \
+ return len+1; \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+usb_actconfig_str (configuration, iConfiguration)
+
/* configuration value is always present, and r/w */
usb_actconfig_show(bConfigurationValue, 1, "%u\n");
@@ -55,9 +79,9 @@ set_bConfigurationValue (struct device *dev, const char *buf, size_t count)
if (sscanf (buf, "%u", &config) != 1 || config > 255)
return -EINVAL;
- down(&udev->serialize);
+ usb_lock_device(udev);
value = usb_set_configuration (udev, config);
- up(&udev->serialize);
+ usb_unlock_device(udev);
return (value < 0) ? value : count;
}
@@ -198,6 +222,7 @@ void usb_create_sysfs_dev_files (struct usb_device *udev)
device_create_file (dev, &dev_attr_product);
if (udev->descriptor.iSerialNumber)
device_create_file (dev, &dev_attr_serial);
+ device_create_file (dev, &dev_attr_configuration);
}
void usb_remove_sysfs_dev_files (struct usb_device *udev)
@@ -212,6 +237,7 @@ void usb_remove_sysfs_dev_files (struct usb_device *udev)
device_remove_file(dev, &dev_attr_product);
if (udev->descriptor.iSerialNumber)
device_remove_file(dev, &dev_attr_serial);
+ device_remove_file (dev, &dev_attr_configuration);
}
/* Interface fields */
@@ -231,7 +257,26 @@ usb_intf_attr (bNumEndpoints, "%02x\n")
usb_intf_attr (bInterfaceClass, "%02x\n")
usb_intf_attr (bInterfaceSubClass, "%02x\n")
usb_intf_attr (bInterfaceProtocol, "%02x\n")
-usb_intf_attr (iInterface, "%02x\n")
+
+#define usb_intf_str(name, field) \
+static ssize_t show_##name(struct device *dev, char *buf) \
+{ \
+ struct usb_interface *intf; \
+ struct usb_device *udev; \
+ int len; \
+ \
+ intf = to_usb_interface (dev); \
+ udev = interface_to_usbdev (intf); \
+ len = usb_string(udev, intf->cur_altsetting->desc.field, buf, PAGE_SIZE);\
+ if (len < 0) \
+ return 0; \
+ buf[len] = '\n'; \
+ buf[len+1] = 0; \
+ return len+1; \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+usb_intf_str (interface, iInterface);
static struct attribute *intf_attrs[] = {
&dev_attr_bInterfaceNumber.attr,
@@ -240,7 +285,6 @@ static struct attribute *intf_attrs[] = {
&dev_attr_bInterfaceClass.attr,
&dev_attr_bInterfaceSubClass.attr,
&dev_attr_bInterfaceProtocol.attr,
- &dev_attr_iInterface.attr,
NULL,
};
static struct attribute_group intf_attr_grp = {
@@ -250,9 +294,17 @@ static struct attribute_group intf_attr_grp = {
void usb_create_sysfs_intf_files (struct usb_interface *intf)
{
sysfs_create_group(&intf->dev.kobj, &intf_attr_grp);
+
+ if (intf->cur_altsetting->desc.iInterface)
+ device_create_file(&intf->dev, &dev_attr_interface);
+
}
void usb_remove_sysfs_intf_files (struct usb_interface *intf)
{
sysfs_remove_group(&intf->dev.kobj, &intf_attr_grp);
+
+ if (intf->cur_altsetting->desc.iInterface)
+ device_remove_file(&intf->dev, &dev_attr_interface);
+
}
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 3c14361bbeb3..f57de4dca2bf 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -451,6 +451,11 @@ int usb_unlink_urb(struct urb *urb)
if (!urb)
return -EINVAL;
if (!(urb->transfer_flags & URB_ASYNC_UNLINK)) {
+#ifdef CONFIG_DEBUG_KERNEL
+ printk(KERN_NOTICE "usb_unlink_urb() is deprecated for "
+ "synchronous unlinks. Use usb_kill_urb() instead.\n");
+ WARN_ON(1);
+#endif
usb_kill_urb(urb);
return 0;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 138541225604..ca0b29d0ac1f 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -39,6 +39,7 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/smp_lock.h>
+#include <linux/rwsem.h>
#include <linux/usb.h>
#include <asm/io.h>
@@ -62,6 +63,8 @@ const char *usbcore_name = "usbcore";
int nousb; /* Disable USB when built into kernel image */
/* Not honored on modular build */
+static DECLARE_RWSEM(usb_all_devices_rwsem);
+
static int generic_probe (struct device *dev)
{
@@ -100,7 +103,10 @@ int usb_probe_interface(struct device *dev)
id = usb_match_id (intf, driver->id_table);
if (id) {
dev_dbg (dev, "%s - got id\n", __FUNCTION__);
+ intf->condition = USB_INTERFACE_BINDING;
error = driver->probe (intf, id);
+ intf->condition = error ? USB_INTERFACE_UNBOUND :
+ USB_INTERFACE_BOUND;
}
return error;
@@ -112,6 +118,8 @@ int usb_unbind_interface(struct device *dev)
struct usb_interface *intf = to_usb_interface(dev);
struct usb_driver *driver = to_usb_driver(intf->dev.driver);
+ intf->condition = USB_INTERFACE_UNBINDING;
+
/* release all urbs for this interface */
usb_disable_interface(interface_to_usbdev(intf), intf);
@@ -123,6 +131,7 @@ int usb_unbind_interface(struct device *dev)
intf->altsetting[0].desc.bInterfaceNumber,
0);
usb_set_intfdata(intf, NULL);
+ intf->condition = USB_INTERFACE_UNBOUND;
return 0;
}
@@ -153,7 +162,9 @@ int usb_register(struct usb_driver *new_driver)
new_driver->driver.remove = usb_unbind_interface;
new_driver->driver.owner = new_driver->owner;
+ usb_lock_all_devices();
retval = driver_register(&new_driver->driver);
+ usb_unlock_all_devices();
if (!retval) {
pr_info("%s: registered new driver %s\n",
@@ -182,7 +193,9 @@ void usb_deregister(struct usb_driver *driver)
{
pr_info("%s: deregistering driver %s\n", usbcore_name, driver->name);
+ usb_lock_all_devices();
driver_unregister (&driver->driver);
+ usb_unlock_all_devices();
usbfs_update_special();
}
@@ -204,7 +217,7 @@ void usb_deregister(struct usb_driver *driver)
* alternate settings available for this interfaces.
*
* Don't call this function unless you are bound to one of the interfaces
- * on this device or you own the dev->serialize semaphore!
+ * on this device or you have locked the device!
*/
struct usb_interface *usb_ifnum_to_if(struct usb_device *dev, unsigned ifnum)
{
@@ -237,7 +250,7 @@ struct usb_interface *usb_ifnum_to_if(struct usb_device *dev, unsigned ifnum)
* drivers avoid such mistakes.
*
* Don't call this function unless you are bound to the intf interface
- * or you own the device's ->serialize semaphore!
+ * or you have locked the device!
*/
struct usb_host_interface *usb_altnum_to_altsetting(struct usb_interface *intf,
unsigned int altnum)
@@ -305,11 +318,12 @@ usb_epnum_to_ep_desc(struct usb_device *dev, unsigned epnum)
* way to bind to an interface is to return the private data from
* the driver's probe() method.
*
- * Callers must own the driver model's usb bus writelock. So driver
- * probe() entries don't need extra locking, but other call contexts
- * may need to explicitly claim that lock.
+ * Callers must own the device lock and the driver model's usb_bus_type.subsys
+ * writelock. So driver probe() entries don't need extra locking,
+ * but other call contexts may need to explicitly claim those locks.
*/
-int usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *iface, void* priv)
+int usb_driver_claim_interface(struct usb_driver *driver,
+ struct usb_interface *iface, void* priv)
{
struct device *dev = &iface->dev;
@@ -318,6 +332,7 @@ int usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *
dev->driver = &driver->driver;
usb_set_intfdata(iface, priv);
+ iface->condition = USB_INTERFACE_BOUND;
/* if interface was already added, bind now; else let
* the future device_add() bind it, bypassing probe()
@@ -338,8 +353,8 @@ int usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *
* also causes the driver disconnect() method to be called.
*
* This call is synchronous, and may not be used in an interrupt context.
- * Callers must own the usb_device serialize semaphore and the driver model's
- * usb bus writelock. So driver disconnect() entries don't need extra locking,
+ * Callers must own the device lock and the driver model's usb_bus_type.subsys
+ * writelock. So driver disconnect() entries don't need extra locking,
* but other call contexts may need to explicitly claim those locks.
*/
void usb_driver_release_interface(struct usb_driver *driver,
@@ -357,6 +372,7 @@ void usb_driver_release_interface(struct usb_driver *driver,
dev->driver = NULL;
usb_set_intfdata(iface, NULL);
+ iface->condition = USB_INTERFACE_UNBOUND;
}
/**
@@ -748,7 +764,10 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port)
init_MUTEX(&dev->serialize);
if (dev->bus->op->allocate)
- dev->bus->op->allocate(dev);
+ if (dev->bus->op->allocate(dev)) {
+ kfree(dev);
+ return NULL;
+ }
return dev;
}
@@ -819,6 +838,160 @@ void usb_put_intf(struct usb_interface *intf)
put_device(&intf->dev);
}
+
+/* USB device locking
+ *
+ * Although locking USB devices should be straightforward, it is
+ * complicated by the way the driver-model core works. When a new USB
+ * driver is registered or unregistered, the core will automatically
+ * probe or disconnect all matching interfaces on all USB devices while
+ * holding the USB subsystem writelock. There's no good way for us to
+ * tell which devices will be used or to lock them beforehand; our only
+ * option is to effectively lock all the USB devices.
+ *
+ * We do that by using a private rw-semaphore, usb_all_devices_rwsem.
+ * When locking an individual device you must first acquire the rwsem's
+ * readlock. When a driver is registered or unregistered the writelock
+ * must be held. These actions are encapsulated in the subroutines
+ * below, so all a driver needs to do is call usb_lock_device() and
+ * usb_unlock_device().
+ *
+ * Complications arise when several devices are to be locked at the same
+ * time. Only hub-aware drivers that are part of usbcore ever have to
+ * do this; nobody else needs to worry about it. The problem is that
+ * usb_lock_device() must not be called to lock a second device since it
+ * would acquire the rwsem's readlock reentrantly, leading to deadlock if
+ * another thread was waiting for the writelock. The solution is simple:
+ *
+ * When locking more than one device, call usb_lock_device()
+ * to lock the first one. Lock the others by calling
+ * down(&udev->serialize) directly.
+ *
+ * When unlocking multiple devices, use up(&udev->serialize)
+ * to unlock all but the last one. Unlock the last one by
+ * calling usb_unlock_device().
+ *
+ * When locking both a device and its parent, always lock the
+ * the parent first.
+ */
+
+/**
+ * usb_lock_device - acquire the lock for a usb device structure
+ * @udev: device that's being locked
+ *
+ * Use this routine when you don't hold any other device locks;
+ * to acquire nested inner locks call down(&udev->serialize) directly.
+ * This is necessary for proper interaction with usb_lock_all_devices().
+ */
+void usb_lock_device(struct usb_device *udev)
+{
+ down_read(&usb_all_devices_rwsem);
+ down(&udev->serialize);
+}
+
+/**
+ * usb_trylock_device - attempt to acquire the lock for a usb device structure
+ * @udev: device that's being locked
+ *
+ * Don't use this routine if you already hold a device lock;
+ * use down_trylock(&udev->serialize) instead.
+ * This is necessary for proper interaction with usb_lock_all_devices().
+ *
+ * Returns 1 if successful, 0 if contention.
+ */
+int usb_trylock_device(struct usb_device *udev)
+{
+ if (!down_read_trylock(&usb_all_devices_rwsem))
+ return 0;
+ if (down_trylock(&udev->serialize)) {
+ up_read(&usb_all_devices_rwsem);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * usb_lock_device_for_reset - cautiously acquire the lock for a
+ * usb device structure
+ * @udev: device that's being locked
+ * @iface: interface bound to the driver making the request (optional)
+ *
+ * Attempts to acquire the device lock, but fails if the device is
+ * NOTATTACHED or SUSPENDED, or if iface is specified and the interface
+ * is neither BINDING nor BOUND. Rather than sleeping to wait for the
+ * lock, the routine polls repeatedly. This is to prevent deadlock with
+ * disconnect; in some drivers (such as usb-storage) the disconnect()
+ * callback will block waiting for a device reset to complete.
+ *
+ * Returns a negative error code for failure, otherwise 1 or 0 to indicate
+ * that the device will or will not have to be unlocked. (0 can be
+ * returned when an interface is given and is BINDING, because in that
+ * case the driver already owns the device lock.)
+ */
+int usb_lock_device_for_reset(struct usb_device *udev,
+ struct usb_interface *iface)
+{
+ if (udev->state == USB_STATE_NOTATTACHED)
+ return -ENODEV;
+ if (udev->state == USB_STATE_SUSPENDED)
+ return -EHOSTUNREACH;
+ if (iface) {
+ switch (iface->condition) {
+ case USB_INTERFACE_BINDING:
+ return 0;
+ case USB_INTERFACE_BOUND:
+ break;
+ default:
+ return -EINTR;
+ }
+ }
+
+ while (!usb_trylock_device(udev)) {
+ msleep(15);
+ if (udev->state == USB_STATE_NOTATTACHED)
+ return -ENODEV;
+ if (udev->state == USB_STATE_SUSPENDED)
+ return -EHOSTUNREACH;
+ if (iface && iface->condition != USB_INTERFACE_BOUND)
+ return -EINTR;
+ }
+ return 1;
+}
+
+/**
+ * usb_unlock_device - release the lock for a usb device structure
+ * @udev: device that's being unlocked
+ *
+ * Use this routine when releasing the only device lock you hold;
+ * to release inner nested locks call up(&udev->serialize) directly.
+ * This is necessary for proper interaction with usb_lock_all_devices().
+ */
+void usb_unlock_device(struct usb_device *udev)
+{
+ up(&udev->serialize);
+ up_read(&usb_all_devices_rwsem);
+}
+
+/**
+ * usb_lock_all_devices - acquire the lock for all usb device structures
+ *
+ * This is necessary when registering a new driver or probing a bus,
+ * since the driver-model core may try to use any usb_device.
+ */
+void usb_lock_all_devices(void)
+{
+ down_write(&usb_all_devices_rwsem);
+}
+
+/**
+ * usb_unlock_all_devices - release the lock for all usb device structures
+ */
+void usb_unlock_all_devices(void)
+{
+ up_write(&usb_all_devices_rwsem);
+}
+
+
static struct usb_device *match_device(struct usb_device *dev,
u16 vendor_id, u16 product_id)
{
@@ -840,8 +1013,10 @@ static struct usb_device *match_device(struct usb_device *dev,
/* look through all of the children of this device */
for (child = 0; child < dev->maxchild; ++child) {
if (dev->children[child]) {
+ down(&dev->children[child]->serialize);
ret_dev = match_device(dev->children[child],
vendor_id, product_id);
+ up(&dev->children[child]->serialize);
if (ret_dev)
goto exit;
}
@@ -876,7 +1051,9 @@ struct usb_device *usb_find_device(u16 vendor_id, u16 product_id)
bus = container_of(buslist, struct usb_bus, bus_list);
if (!bus->root_hub)
continue;
+ usb_lock_device(bus->root_hub);
dev = match_device(bus->root_hub, vendor_id, product_id);
+ usb_unlock_device(bus->root_hub);
if (dev)
goto exit;
}
@@ -1362,6 +1539,11 @@ EXPORT_SYMBOL(usb_put_dev);
EXPORT_SYMBOL(usb_get_dev);
EXPORT_SYMBOL(usb_hub_tt_clear_buffer);
+EXPORT_SYMBOL(usb_lock_device);
+EXPORT_SYMBOL(usb_trylock_device);
+EXPORT_SYMBOL(usb_lock_device_for_reset);
+EXPORT_SYMBOL(usb_unlock_device);
+
EXPORT_SYMBOL(usb_driver_claim_interface);
EXPORT_SYMBOL(usb_driver_release_interface);
EXPORT_SYMBOL(usb_match_id);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index f1dff4f4d5d6..30d2cf7dd762 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -22,8 +22,14 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
-extern void usb_set_device_state(struct usb_device *udev,
- enum usb_device_state new_state);
+extern void usb_lock_all_devices(void);
+extern void usb_unlock_all_devices(void);
/* for labeling diagnostics */
extern const char *usbcore_name;
+
+/* usbfs stuff */
+extern struct usb_driver usbfs_driver;
+extern struct file_operations usbfs_devices_fops;
+extern struct file_operations usbfs_device_file_operations;
+extern void usbfs_conn_disc_event(void);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index decbca335389..c6e0693ed1d6 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -39,6 +39,17 @@ config USB_GADGET
If in doubt, say "N" and don't enable these drivers; most people
don't have this kind of hardware (except maybe inside Linux PDAs).
+config USB_GADGET_DEBUG_FILES
+ boolean "Debugging information files"
+ depends on USB_GADGET && PROC_FS
+ help
+ Some of the drivers in the "gadget" framework can expose
+ debugging information in files such as /proc/driver/udc
+ (for a peripheral controller). The information in these
+ files may help when you're troubleshooting or bringing up a
+ driver on a new board. Enable these files by choosing "Y"
+ here. If in doubt, or to conserve kernel memory, say "N".
+
#
# USB Peripheral Controller Support
#
@@ -206,10 +217,6 @@ config USB_OTG
Select this only if your OMAP board has a Mini-AB connector.
-config USB_OMAP_PROC
- boolean "/proc/driver/udc file"
- depends on USB_GADGET_OMAP
-
endchoice
config USB_GADGET_DUALSPEED
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 91e878642b9f..ed532f7176f9 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -770,7 +770,8 @@ usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
spin_lock_irqsave (&dum->lock, flags);
stop_activity (dum, driver);
- dum->port_status &= ~USB_PORT_STAT_CONNECTION;
+ dum->port_status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED);
dum->port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
spin_unlock_irqrestore (&dum->lock, flags);
@@ -815,8 +816,8 @@ static int dummy_urb_enqueue (
struct dummy *dum;
unsigned long flags;
- /* patch to usb_sg_init() is in 2.5.60 */
- BUG_ON (!urb->transfer_buffer && urb->transfer_buffer_length);
+ if (!urb->transfer_buffer && urb->transfer_buffer_length)
+ return -EINVAL;
dum = container_of (hcd, struct dummy, hcd);
spin_lock_irqsave (&dum->lock, flags);
@@ -1102,10 +1103,10 @@ restart:
ep = find_endpoint(dum, address);
if (!ep) {
/* set_configuration() disagreement */
- dev_err (hardware,
+ dev_dbg (hardware,
"no ep configured for urb %p\n",
urb);
- maybe_set_status (urb, -ETIMEDOUT);
+ maybe_set_status (urb, -EPROTO);
goto return_urb;
}
@@ -1409,9 +1410,12 @@ static int dummy_hub_control (
case ClearPortFeature:
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- /* 20msec resume signaling */
- dum->resuming = 1;
- dum->re_timeout = jiffies + ((HZ * 20)/1000);
+ if (dum->port_status & (1 << USB_PORT_FEAT_SUSPEND)) {
+ /* 20msec resume signaling */
+ dum->resuming = 1;
+ dum->re_timeout = jiffies +
+ msecs_to_jiffies(20);
+ }
break;
case USB_PORT_FEAT_POWER:
dum->port_status = 0;
@@ -1440,7 +1444,7 @@ static int dummy_hub_control (
dum->port_status &= ~(1 << USB_PORT_FEAT_SUSPEND);
dum->resuming = 0;
dum->re_timeout = 0;
- if (dum->driver->resume) {
+ if (dum->driver && dum->driver->resume) {
spin_unlock (&dum->lock);
dum->driver->resume (&dum->gadget);
spin_lock (&dum->lock);
@@ -1481,11 +1485,15 @@ static int dummy_hub_control (
case SetPortFeature:
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- dum->port_status |= (1 << USB_PORT_FEAT_SUSPEND);
- if (dum->driver->suspend) {
- spin_unlock (&dum->lock);
- dum->driver->suspend (&dum->gadget);
- spin_lock (&dum->lock);
+ if ((dum->port_status & (1 << USB_PORT_FEAT_SUSPEND))
+ == 0) {
+ dum->port_status |=
+ (1 << USB_PORT_FEAT_SUSPEND);
+ if (dum->driver && dum->driver->suspend) {
+ spin_unlock (&dum->lock);
+ dum->driver->suspend (&dum->gadget);
+ spin_lock (&dum->lock);
+ }
}
break;
case USB_PORT_FEAT_RESET:
@@ -1502,7 +1510,7 @@ static int dummy_hub_control (
/* FIXME test that code path! */
}
/* 50msec reset signaling */
- dum->re_timeout = jiffies + ((HZ * 50)/1000);
+ dum->re_timeout = jiffies + msecs_to_jiffies(50);
/* FALLTHROUGH */
default:
dum->port_status |= (1 << wValue);
@@ -1790,4 +1798,3 @@ static void __exit cleanup (void)
the_controller = 0;
}
module_exit (cleanup);
-
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 763d0552146a..e3fec4a5a7f8 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -84,7 +84,7 @@
*/
#define DRIVER_DESC "Ethernet Gadget"
-#define DRIVER_VERSION "St Patrick's Day 2004"
+#define DRIVER_VERSION "Equinox 2004"
static const char shortname [] = "ether";
static const char driver_desc [] = DRIVER_DESC;
@@ -231,6 +231,10 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
#define DEV_CONFIG_CDC
#endif
+#ifdef CONFIG_USB_GADGET_N9604
+#define DEV_CONFIG_CDC
+#endif
+
/* For CDC-incapable hardware, choose the simple cdc subset.
* Anything that talks bulk (without notable bugs) can do this.
@@ -387,7 +391,7 @@ eth_config = {
.bConfigurationValue = DEV_CONFIG_VALUE,
.iConfiguration = STRING_CDC,
.bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
- .bMaxPower = 1,
+ .bMaxPower = 50,
};
#ifdef CONFIG_USB_ETH_RNDIS
@@ -401,7 +405,7 @@ rndis_config = {
.bConfigurationValue = DEV_RNDIS_CONFIG_VALUE,
.iConfiguration = STRING_RNDIS,
.bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
- .bMaxPower = 1,
+ .bMaxPower = 50,
};
#endif
@@ -1198,13 +1202,20 @@ eth_set_config (struct eth_dev *dev, unsigned number, int gfp_flags)
result = -EINVAL;
/* FALL THROUGH */
case 0:
- return result;
+ break;
}
- if (result)
- eth_reset_config (dev);
- else {
+ if (result) {
+ if (number)
+ eth_reset_config (dev);
+ usb_gadget_vbus_draw(dev->gadget,
+ dev->gadget->is_otg ? 8 : 100);
+ } else {
char *speed;
+ unsigned power;
+
+ power = 2 * eth_config.bMaxPower;
+ usb_gadget_vbus_draw(dev->gadget, power);
switch (gadget->speed) {
case USB_SPEED_FULL: speed = "full"; break;
@@ -1215,8 +1226,8 @@ eth_set_config (struct eth_dev *dev, unsigned number, int gfp_flags)
}
dev->config = number;
- INFO (dev, "%s speed config #%d: %s, using %s\n",
- speed, number, driver_desc,
+ INFO (dev, "%s speed config #%d: %d mA, %s, using %s\n",
+ speed, number, power, driver_desc,
dev->rndis
? "RNDIS"
: (dev->cdc
@@ -1375,8 +1386,9 @@ static void eth_setup_complete (struct usb_ep *ep, struct usb_request *req)
static void rndis_response_complete (struct usb_ep *ep, struct usb_request *req)
{
if (req->status || req->actual != req->length)
- DEBUG (dev, "rndis response complete --> %d, %d/%d\n",
- req->status, req->actual, req->length);
+ DEBUG ((struct eth_dev *) ep->driver_data,
+ "rndis response complete --> %d, %d/%d\n",
+ req->status, req->actual, req->length);
/* done sending after CDC_GET_ENCAPSULATED_RESPONSE */
}
@@ -2098,11 +2110,13 @@ static void rndis_send_media_state (struct eth_dev *dev, int connect)
}
}
-static void rndis_control_ack_complete (struct usb_ep *ep, struct usb_request *req)
+static void
+rndis_control_ack_complete (struct usb_ep *ep, struct usb_request *req)
{
if (req->status || req->actual != req->length)
- DEBUG (dev, "rndis control ack complete --> %d, %d/%d\n",
- req->status, req->actual, req->length);
+ DEBUG ((struct eth_dev *) ep->driver_data,
+ "rndis control ack complete --> %d, %d/%d\n",
+ req->status, req->actual, req->length);
usb_ep_free_buffer(ep, req->buf, req->dma, 8);
usb_ep_free_request(ep, req);
@@ -2334,6 +2348,8 @@ eth_bind (struct usb_gadget *gadget)
device_desc.bcdDevice = __constant_cpu_to_le16 (0x0208);
} else if (gadget_is_lh7a40x(gadget)) {
device_desc.bcdDevice = __constant_cpu_to_le16 (0x0209);
+ } else if (gadget_is_n9604(gadget)) {
+ device_desc.bcdDevice = __constant_cpu_to_le16 (0x020a);
} else {
/* can't assume CDC works. don't want to default to
* anything less functional on CDC-capable hardware,
@@ -2466,8 +2482,10 @@ autoconf_fail:
if (gadget->is_otg) {
otg_descriptor.bmAttributes |= USB_OTG_HNP,
eth_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ eth_config.bMaxPower = 4;
#ifdef CONFIG_USB_ETH_RNDIS
rndis_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ rndis_config.bMaxPower = 4;
#endif
}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 6e8008fa43cb..d8c8ab7750ec 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -217,6 +217,7 @@
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/dcache.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fcntl.h>
#include <linux/file.h>
@@ -234,6 +235,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/suspend.h>
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/wait.h>
@@ -248,7 +250,7 @@
#define DRIVER_DESC "File-backed Storage Gadget"
#define DRIVER_NAME "g_file_storage"
-#define DRIVER_VERSION "28 July 2004"
+#define DRIVER_VERSION "31 August 2004"
static const char longname[] = DRIVER_DESC;
static const char shortname[] = DRIVER_NAME;
@@ -866,6 +868,14 @@ config_desc = {
.bMaxPower = 1, // self-powered
};
+static struct usb_otg_descriptor
+otg_desc = {
+ .bLength = sizeof(otg_desc),
+ .bDescriptorType = USB_DT_OTG,
+
+ .bmAttributes = USB_OTG_SRP,
+};
+
/* There is only one interface. */
static struct usb_interface_descriptor
@@ -914,12 +924,14 @@ fs_intr_in_desc = {
};
static const struct usb_descriptor_header *fs_function[] = {
+ (struct usb_descriptor_header *) &otg_desc,
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &fs_bulk_in_desc,
(struct usb_descriptor_header *) &fs_bulk_out_desc,
(struct usb_descriptor_header *) &fs_intr_in_desc,
NULL,
};
+#define FS_FUNCTION_PRE_EP_ENTRIES 2
#ifdef CONFIG_USB_GADGET_DUALSPEED
@@ -976,12 +988,14 @@ hs_intr_in_desc = {
};
static const struct usb_descriptor_header *hs_function[] = {
+ (struct usb_descriptor_header *) &otg_desc,
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &hs_bulk_in_desc,
(struct usb_descriptor_header *) &hs_bulk_out_desc,
(struct usb_descriptor_header *) &hs_intr_in_desc,
NULL,
};
+#define HS_FUNCTION_PRE_EP_ENTRIES 2
/* Maxpacket and other transfer characteristics vary by speed. */
#define ep_desc(g,fs,hs) (((g)->speed==USB_SPEED_HIGH) ? (hs) : (fs))
@@ -1018,9 +1032,10 @@ static struct usb_gadget_strings stringtab = {
* and with code managing interfaces and their altsettings. They must
* also handle different speeds and other-speed requests.
*/
-static int populate_config_buf(enum usb_device_speed speed,
+static int populate_config_buf(struct usb_gadget *gadget,
u8 *buf, u8 type, unsigned index)
{
+ enum usb_device_speed speed = gadget->speed;
int len;
const struct usb_descriptor_header **function;
@@ -1036,6 +1051,10 @@ static int populate_config_buf(enum usb_device_speed speed,
#endif
function = fs_function;
+ /* for now, don't advertise srp-only devices */
+ if (!gadget->is_otg)
+ function++;
+
len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
((struct usb_config_descriptor *) buf)->bDescriptorType = type;
return len;
@@ -1366,7 +1385,7 @@ static int standard_setup_req(struct fsg_dev *fsg,
#ifdef CONFIG_USB_GADGET_DUALSPEED
get_config:
#endif
- value = populate_config_buf(fsg->gadget->speed,
+ value = populate_config_buf(fsg->gadget,
req->buf,
ctrl->wValue >> 8,
ctrl->wValue & 0xff);
@@ -1523,6 +1542,8 @@ static int sleep_thread(struct fsg_dev *fsg)
rc = wait_event_interruptible(fsg->thread_wqh,
fsg->thread_wakeup_needed);
fsg->thread_wakeup_needed = 0;
+ if (current->flags & PF_FREEZE)
+ refrigerator(PF_FREEZE);
return (rc ? -EINTR : 0);
}
@@ -2280,8 +2301,7 @@ static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
}
/* Wait for a short time and then try again */
- set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(HZ / 10) != 0)
+ if (msleep_interruptible(100) != 0)
return -EINTR;
rc = usb_ep_set_halt(fsg->bulk_in);
}
@@ -3713,8 +3733,10 @@ static int __init check_parameters(struct fsg_dev *fsg)
mod_data.release = __constant_cpu_to_le16(0x0307);
else if (gadget_is_omap(fsg->gadget))
mod_data.release = __constant_cpu_to_le16(0x0308);
- else if (gadget_is_lh7a40x(gadget))
+ else if (gadget_is_lh7a40x(fsg->gadget))
mod_data.release = __constant_cpu_to_le16 (0x0309);
+ else if (gadget_is_n9604(fsg->gadget))
+ mod_data.release = __constant_cpu_to_le16 (0x030a);
else {
WARN(fsg, "controller '%s' not recognized\n",
fsg->gadget->name);
@@ -3882,10 +3904,10 @@ static int __init fsg_bind(struct usb_gadget *gadget)
intf_desc.bNumEndpoints = i;
intf_desc.bInterfaceSubClass = mod_data.protocol_type;
intf_desc.bInterfaceProtocol = mod_data.transport_type;
- fs_function[i+1] = NULL;
+ fs_function[i + FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
#ifdef CONFIG_USB_GADGET_DUALSPEED
- hs_function[i+1] = NULL;
+ hs_function[i + HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
/* Assume ep0 uses the same maxpacket value for both speeds */
dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
@@ -3896,6 +3918,11 @@ static int __init fsg_bind(struct usb_gadget *gadget)
hs_intr_in_desc.bEndpointAddress = fs_intr_in_desc.bEndpointAddress;
#endif
+ if (gadget->is_otg) {
+ otg_desc.bmAttributes |= USB_OTG_HNP,
+ config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
rc = -ENOMEM;
/* Allocate the request and buffer for endpoint 0 */
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index e24e2f9ad28e..f6273701fec6 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -62,6 +62,12 @@
#define gadget_is_omap(g) 0
#endif
+#ifdef CONFIG_USB_GADGET_N9604
+#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name)
+#else
+#define gadget_is_n9604(g) 0
+#endif
+
// CONFIG_USB_GADGET_AT91RM9200
// CONFIG_USB_GADGET_SX2
// CONFIG_USB_GADGET_AU1X00
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 61ce5b2eea41..f9cdb23da15b 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1092,13 +1092,7 @@ static inline char *dmastr(void)
return "(dma IN)";
}
-/* if we're trying to save space, don't bother with this proc file */
-
-#if defined(CONFIG_PROC_FS) && !defined(CONFIG_EMBEDDED)
-# define UDC_PROC_FILE
-#endif
-
-#ifdef UDC_PROC_FILE
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static const char proc_node_name [] = "driver/udc";
@@ -1312,7 +1306,7 @@ done:
return count - size;
}
-#endif /* UDC_PROC_FILE */
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
@@ -1815,7 +1809,7 @@ static void goku_remove(struct pci_dev *pdev)
usb_gadget_unregister_driver(dev->driver);
}
-#ifdef UDC_PROC_FILE
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
remove_proc_entry(proc_node_name, NULL);
#endif
if (dev->regs)
@@ -1933,7 +1927,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
-#ifdef UDC_PROC_FILE
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
#endif
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
index 772627e97a00..0def9f70e889 100644
--- a/drivers/usb/gadget/lh7a40x_udc.c
+++ b/drivers/usb/gadget/lh7a40x_udc.c
@@ -54,7 +54,6 @@ static const char ep0name[] = "ep0-control";
/*
Local definintions.
*/
-#define UDC_PROC_FILE
#ifndef NO_STATES
static char *state_names[] = {
@@ -192,7 +191,7 @@ static __inline__ void usb_clear(u32 val, u32 port)
*/
#define is_usb_connected() get_portc_pdr(2)
-#ifdef UDC_PROC_FILE
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static const char proc_node_name[] = "driver/udc";
@@ -248,12 +247,12 @@ udc_proc_read(char *page, char **start, off_t off, int count,
#define create_proc_files() create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
#define remove_proc_files() remove_proc_entry(proc_node_name, NULL)
-#else /* !UDC_PROC_FILE */
+#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
#define create_proc_files() do {} while (0)
#define remove_proc_files() do {} while (0)
-#endif /* UDC_PROC_FILE */
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*
* udc_disable - disable USB device controller
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index d28de0b8ceba..ed6711d54c32 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -76,7 +76,6 @@
#define EP_DONTUSE 13 /* nonzero */
#define USE_RDK_LEDS /* GPIO pins control three LEDs */
-#define USE_SYSFS_DEBUG_FILES
static const char driver_name [] = "net2280";
@@ -117,7 +116,7 @@ module_param (fifo_mode, ushort, 0644);
#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
-#if defined(USE_SYSFS_DEBUG_FILES) || defined (DEBUG)
+#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG)
static char *type_string (u8 bmAttributes)
{
switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
@@ -1450,7 +1449,12 @@ static const struct usb_gadget_ops net2280_ops = {
/*-------------------------------------------------------------------------*/
-#ifdef USE_SYSFS_DEBUG_FILES
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+/* FIXME move these into procfs, and use seq_file.
+ * Sysfs _still_ doesn't behave for arbitrarily sized files,
+ * and also doesn't help products using this with 2.4 kernels.
+ */
/* "function" sysfs attribute */
static ssize_t
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index e40089d79365..c321c542f0df 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -1200,7 +1200,8 @@ static void pullup_enable(struct omap_udc *udc)
{
UDC_SYSCON1_REG |= UDC_PULLUP_EN;
#ifndef CONFIG_USB_OTG
- OTG_CTRL_REG |= OTG_BSESSVLD;
+ if (!cpu_is_omap15xx())
+ OTG_CTRL_REG |= OTG_BSESSVLD;
#endif
UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
}
@@ -1208,7 +1209,8 @@ static void pullup_enable(struct omap_udc *udc)
static void pullup_disable(struct omap_udc *udc)
{
#ifndef CONFIG_USB_OTG
- OTG_CTRL_REG &= ~OTG_BSESSVLD;
+ if (!cpu_is_omap15xx())
+ OTG_CTRL_REG &= ~OTG_BSESSVLD;
#endif
UDC_IRQ_EN_REG = UDC_DS_CHG_IE;
UDC_SYSCON1_REG &= ~UDC_PULLUP_EN;
@@ -1688,7 +1690,7 @@ static void devstate_irq(struct omap_udc *udc, u16 irq_src)
}
change &= ~UDC_SUS;
}
- if (change & OTG_FLAGS) {
+ if (!cpu_is_omap15xx() && (change & OTG_FLAGS)) {
update_otg(udc);
change &= ~OTG_FLAGS;
}
@@ -1974,7 +1976,7 @@ EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_USB_OMAP_PROC
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
#include <linux/seq_file.h>
@@ -2036,34 +2038,14 @@ static char *trx_mode(unsigned m)
}
}
-static int proc_udc_show(struct seq_file *s, void *_)
+static int proc_otg_show(struct seq_file *s)
{
u32 tmp;
- struct omap_ep *ep;
- unsigned long flags;
-
- spin_lock_irqsave(&udc->lock, flags);
- seq_printf(s, "%s, version: " DRIVER_VERSION
-#ifdef USE_ISO
- " (iso)"
-#endif
- "%s\n",
- driver_desc,
- use_dma ? " (dma)" : "");
-
- tmp = UDC_REV_REG & 0xff;
- seq_printf(s,
- "UDC rev %d.%d, OTG rev %d.%d, fifo mode %d, gadget %s\n"
- "hmc %d, transceiver %08x %s\n",
+ tmp = OTG_REV_REG;
+ seq_printf(s, "OTG rev %d.%d, transceiver_ctrl %08x\n",
tmp >> 4, tmp & 0xf,
- OTG_REV_REG >> 4, OTG_REV_REG & 0xf,
- fifo_mode,
- udc->driver ? udc->driver->driver.name : "(none)",
- HMC, USB_TRANSCEIVER_CTRL_REG,
- udc->transceiver ? udc->transceiver->label : "");
-
- /* OTG controller registers */
+ USB_TRANSCEIVER_CTRL_REG);
tmp = OTG_SYSCON_1_REG;
seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
FOURBITS "\n", tmp,
@@ -2117,6 +2099,37 @@ static int proc_udc_show(struct seq_file *s, void *_)
seq_printf(s, "otg_outctrl %04x" "\n", tmp);
tmp = OTG_TEST_REG;
seq_printf(s, "otg_test %04x" "\n", tmp);
+}
+
+static int proc_udc_show(struct seq_file *s, void *_)
+{
+ u32 tmp;
+ struct omap_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ seq_printf(s, "%s, version: " DRIVER_VERSION
+#ifdef USE_ISO
+ " (iso)"
+#endif
+ "%s\n",
+ driver_desc,
+ use_dma ? " (dma)" : "");
+
+ tmp = UDC_REV_REG & 0xff;
+ seq_printf(s,
+ "UDC rev %d.%d, fifo mode %d, gadget %s\n"
+ "hmc %d, transceiver %s\n",
+ tmp >> 4, tmp & 0xf,
+ fifo_mode,
+ udc->driver ? udc->driver->driver.name : "(none)",
+ HMC,
+ udc->transceiver ? udc->transceiver->label : "");
+
+ /* OTG controller registers */
+ if (!cpu_is_omap15xx())
+ proc_otg_show(s);
tmp = UDC_SYSCON1_REG;
seq_printf(s, "\nsyscon1 %04x" EIGHTBITS "\n", tmp,
@@ -2496,41 +2509,51 @@ static int __init omap_udc_probe(struct device *dev)
return -EBUSY;
}
- INFO("OMAP UDC rev %d.%d, OTG rev %d.%d, %s receptacle\n",
+ INFO("OMAP UDC rev %d.%d, %s receptacle\n",
UDC_REV_REG >> 4, UDC_REV_REG & 0xf,
- OTG_REV_REG >> 4, OTG_REV_REG & 0xf,
config->otg ? "Mini-AB" : "B/Mini-B");
/* use the mode given to us by board init code */
- hmc = HMC;
- switch (hmc) {
- case 3:
- case 11:
- case 19:
- case 25:
- xceiv = otg_get_transceiver();
- if (!xceiv) {
- DBG("external transceiver not registered!\n");
- goto cleanup0;
- }
- type = xceiv->label;
- break;
- case 0: /* POWERUP DEFAULT == 0 */
- case 4:
- case 12:
- case 20:
- type = "INTEGRATED";
- break;
- case 21: /* internal loopback */
- type = "(loopback)";
- break;
- case 14: /* transceiverless */
- type = "(none)";
- break;
+ if (cpu_is_omap15xx()) {
+ hmc = HMC_1510;
+ type = "(unknown)";
- default:
- ERR("unrecognized UDC HMC mode %d\n", hmc);
- return -ENODEV;
+ /* FIXME may need a GPIO-0 handler to call
+ * usb_gadget_vbus_{dis,}connect() on us...
+ */
+ } else {
+ hmc = HMC_1610;
+ switch (hmc) {
+ case 3:
+ case 11:
+ case 19:
+ case 25:
+ xceiv = otg_get_transceiver();
+ if (!xceiv) {
+ DBG("external transceiver not registered!\n");
+ if (config->otg)
+ goto cleanup0;
+ type = "(unknown external)";
+ } else
+ type = xceiv->label;
+ break;
+ case 0: /* POWERUP DEFAULT == 0 */
+ case 4:
+ case 12:
+ case 20:
+ type = "INTEGRATED";
+ break;
+ case 21: /* internal loopback */
+ type = "(loopback)";
+ break;
+ case 14: /* transceiverless */
+ type = "(none)";
+ break;
+
+ default:
+ ERR("unrecognized UDC HMC mode %d\n", hmc);
+ return -ENODEV;
+ }
}
INFO("hmc mode %d, transceiver %s\n", hmc, type);
@@ -2671,13 +2694,6 @@ static struct device_driver udc_driver = {
static int __init udc_init(void)
{
- /* should work on many OMAP systems with at most minor changes,
- * but the 1510 doesn't have an OTG controller.
- */
- if (cpu_is_omap1510()) {
- DBG("no OMAP1510 support yet\n");
- return -ENODEV;
- }
INFO("%s, version: " DRIVER_VERSION "%s\n", driver_desc,
use_dma ? " (dma)" : "");
return driver_register(&udc_driver);
diff --git a/drivers/usb/gadget/omap_udc.h b/drivers/usb/gadget/omap_udc.h
index bd5420cd0b05..ca8572314f95 100644
--- a/drivers/usb/gadget/omap_udc.h
+++ b/drivers/usb/gadget/omap_udc.h
@@ -193,7 +193,14 @@ struct omap_udc {
/*-------------------------------------------------------------------------*/
-// #define HMC_1510 ((MOD_CONF_CTRL_0_REG >> 1) & 0x3f)
+#define MOD_CONF_CTRL_0_REG __REG32(MOD_CONF_CTRL_0)
+#define VBUS_W2FC_1510 (1 << 17) /* 0 gpio0, 1 dvdd2 pin */
+
+#define FUNC_MUX_CTRL_0_REG __REG32(FUNC_MUX_CTRL_0)
+#define VBUS_CTRL_1510 (1 << 19) /* 1 connected (software) */
+#define VBUS_MODE_1510 (1 << 18) /* 0 hardware, 1 software */
+
+#define HMC_1510 ((MOD_CONF_CTRL_0_REG >> 1) & 0x3f)
#define HMC_1610 (OTG_SYSCON_2_REG & 0x3f)
-#define HMC HMC_1610
+#define HMC (cpu_is_omap15xx() ? HMC_1510 : HMC_1610)
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index c1139b51db46..710f7a435f9e 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -92,10 +92,6 @@ static const char ep0name [] = "ep0";
// #define USE_OUT_DMA
// #define DISABLE_TEST_MODE
-#ifdef CONFIG_PROC_FS
-#define UDC_PROC_FILE
-#endif
-
#ifdef CONFIG_ARCH_IXP4XX
#undef USE_DMA
@@ -109,12 +105,6 @@ static const char ep0name [] = "ep0";
#include "pxa2xx_udc.h"
-#ifdef CONFIG_EMBEDDED
-/* few strings, and little code to use them */
-#undef DEBUG
-#undef UDC_PROC_FILE
-#endif
-
#ifdef USE_DMA
static int use_dma = 1;
module_param(use_dma, bool, 0);
@@ -1212,7 +1202,7 @@ static const struct usb_gadget_ops pxa2xx_udc_ops = {
/*-------------------------------------------------------------------------*/
-#ifdef UDC_PROC_FILE
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static const char proc_node_name [] = "driver/udc";
@@ -1368,11 +1358,12 @@ done:
#define remove_proc_files() \
remove_proc_entry(proc_node_name, NULL)
-#else /* !UDC_PROC_FILE */
+#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
+
#define create_proc_files() do {} while (0)
#define remove_proc_files() do {} while (0)
-#endif /* UDC_PROC_FILE */
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/* "function" sysfs attribute */
static ssize_t
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 1cd445a01d13..561ca545e4e5 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -70,8 +70,6 @@ MODULE_PARM_DESC (rndis_debug, "enable debugging");
#define RNDIS_MAX_CONFIGS 1
-static struct proc_dir_entry *rndis_connect_dir;
-static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
static rndis_params rndis_per_dev_params [RNDIS_MAX_CONFIGS];
@@ -1275,6 +1273,8 @@ int rndis_rm_hdr (u8 *buf, u32 *length)
return 0;
}
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
int rndis_proc_read (char *page, char **start, off_t off, int count, int *eof,
void *data)
{
@@ -1365,43 +1365,40 @@ int rndis_proc_write (struct file *file, const char __user *buffer,
return count;
}
+#define NAME_TEMPLATE "driver/rndis-%03d"
+
+static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
+
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+
+
int __init rndis_init (void)
{
u8 i;
- char name [4];
- /* FIXME this should probably be /proc/driver/rndis,
- * and only if debugging is enabled
- */
-
- if (!(rndis_connect_dir = proc_mkdir ("rndis", NULL))) {
- printk (KERN_ERR "%s: couldn't create /proc/rndis entry",
- __FUNCTION__);
- return -EIO;
- }
-
for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
- sprintf (name, "%03d", i);
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+ char name [20];
+
+ sprintf (name, NAME_TEMPLATE, i);
if (!(rndis_connect_state [i]
- = create_proc_entry (name, 0660,
- rndis_connect_dir)))
+ = create_proc_entry (name, 0660, NULL)))
{
DEBUG ("%s :remove entries", __FUNCTION__);
- for (i--; i > 0; i--) {
- sprintf (name, "%03d", i);
- remove_proc_entry (name, rndis_connect_dir);
+ while (i) {
+ sprintf (name, NAME_TEMPLATE, --i);
+ remove_proc_entry (name, NULL);
}
DEBUG ("\n");
-
- remove_proc_entry ("000", rndis_connect_dir);
- remove_proc_entry ("rndis", NULL);
return -EIO;
}
+
rndis_connect_state [i]->nlink = 1;
rndis_connect_state [i]->write_proc = rndis_proc_write;
rndis_connect_state [i]->read_proc = rndis_proc_read;
rndis_connect_state [i]->data = (void *)
(rndis_per_dev_params + i);
+#endif
rndis_per_dev_params [i].confignr = i;
rndis_per_dev_params [i].used = 0;
rndis_per_dev_params [i].state = RNDIS_UNINITIALIZED;
@@ -1415,14 +1412,14 @@ int __init rndis_init (void)
void rndis_exit (void)
{
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
u8 i;
- char name [4];
+ char name [20];
for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
- sprintf (name, "%03d", i);
- remove_proc_entry (name, rndis_connect_dir);
+ sprintf (name, NAME_TEMPLATE, i);
+ remove_proc_entry (name, NULL);
}
- remove_proc_entry ("rndis", NULL);
- return;
+#endif
}
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index a415a33ed117..69962c30a3a9 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -1188,6 +1188,8 @@ autoconf_fail:
device_desc.bcdDevice = __constant_cpu_to_le16 (0x0208);
} else if (gadget_is_lh7a40x(gadget)) {
device_desc.bcdDevice = __constant_cpu_to_le16 (0x0209);
+ } else if (gadget_is_n9604(gadget)) {
+ device_desc.bcdDevice = __constant_cpu_to_le16 (0x020a);
} else {
/* gadget zero is so simple (for now, no altsettings) that
* it SHOULD NOT have problems with bulk-capable hardware.
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index e25adf501967..03d427a672a4 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -155,7 +155,7 @@ MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
-static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
+static int handshake (void __iomem *ptr, u32 mask, u32 done, int usec)
{
u32 result;
@@ -341,8 +341,7 @@ static int ehci_hc_reset (struct usb_hcd *hcd)
spin_lock_init (&ehci->lock);
ehci->caps = hcd->regs;
- ehci->regs = (hcd->regs +
- HC_LENGTH (readl (&ehci->caps->hc_capbase)));
+ ehci->regs = hcd->regs + HC_LENGTH (readl (&ehci->caps->hc_capbase));
dbg_hcs_params (ehci, "reset");
dbg_hcc_params (ehci, "reset");
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 4ff7f868b157..86872c8877cb 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -81,7 +81,7 @@ static int ehci_hub_suspend (struct usb_hcd *hcd)
}
-/* caller owns root->serialize, and should reset/reinit on error */
+/* caller has locked the root hub, and should reset/reinit on error */
static int ehci_hub_resume (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
diff --git a/drivers/usb/host/hc_sl811.c b/drivers/usb/host/hc_sl811.c
index b57f1fe8258d..baf0e8086352 100644
--- a/drivers/usb/host/hc_sl811.c
+++ b/drivers/usb/host/hc_sl811.c
@@ -1343,15 +1343,11 @@ static int __init hci_hcd_init (void)
*****************************************************************/
static void __exit hci_hcd_cleanup (void)
{
- struct list_head *hci_l;
- hci_t *hci;
+ hci_t *hci, *tmp;
DBGFUNC ("Enter hci_hcd_cleanup\n");
- for (hci_l = hci_hcd_list.next; hci_l != &hci_hcd_list;) {
- hci = list_entry (hci_l, hci_t, hci_hcd_list);
- hci_l = hci_l->next;
+ list_for_each_entry_safe(hci, tmp, &hci_hcd_list, hci_hcd_list)
hc_release_hci (hci);
- }
}
module_init (hci_hcd_init);
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 947bf4a5ea03..ff1f80fd59c8 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -640,14 +640,14 @@ show_registers (struct class_device *class_dev, char *buf)
rdata = ohci_readl (&regs->fminterval);
temp = scnprintf (next, size,
"fmintvl 0x%08x %sFSMPS=0x%04x FI=0x%04x\n",
- rdata, (rdata >> 31) ? " FIT" : "",
+ rdata, (rdata >> 31) ? "FIT " : "",
(rdata >> 16) & 0xefff, rdata & 0xffff);
size -= temp;
next += temp;
rdata = ohci_readl (&regs->fmremaining);
temp = scnprintf (next, size, "fmremaining 0x%08x %sFR=0x%04x\n",
- rdata, (rdata >> 31) ? " FRT" : "",
+ rdata, (rdata >> 31) ? "FRT " : "",
rdata & 0x3fff);
size -= temp;
next += temp;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 0609e12efeb2..09f37cce2c75 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -2,7 +2,7 @@
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
*
* [ Initialisation is based on Linus' ]
* [ uhci code and gregs ohci fragments ]
@@ -122,12 +122,27 @@
#define OHCI_INTR_INIT \
(OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | OHCI_INTR_WDH)
+#ifdef __hppa__
+/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
+#define IR_DISABLE
+#endif
+
+#ifdef CONFIG_ARCH_OMAP
+/* OMAP doesn't support IR (no SMM; not needed) */
+#define IR_DISABLE
+#endif
+
/*-------------------------------------------------------------------------*/
static const char hcd_name [] = "ohci_hcd";
#include "ohci.h"
+static void ohci_dump (struct ohci_hcd *ohci, int verbose);
+static int ohci_init (struct ohci_hcd *ohci);
+static int ohci_restart (struct ohci_hcd *ohci);
+static void ohci_stop (struct usb_hcd *hcd);
+
#include "ohci-hub.c"
#include "ohci-dbg.c"
#include "ohci-mem.c"
@@ -387,30 +402,30 @@ static int ohci_get_frame (struct usb_hcd *hcd)
return OHCI_FRAME_NO(ohci->hcca);
}
+static void ohci_usb_reset (struct ohci_hcd *ohci)
+{
+ ohci->hc_control = ohci_readl (&ohci->regs->control);
+ ohci->hc_control &= OHCI_CTRL_RWC;
+ writel (ohci->hc_control, &ohci->regs->control);
+}
+
/*-------------------------------------------------------------------------*
* HC functions
*-------------------------------------------------------------------------*/
-/* reset the HC and BUS */
+/* init memory, and kick BIOS/SMM off */
-static int hc_reset (struct ohci_hcd *ohci)
+static int ohci_init (struct ohci_hcd *ohci)
{
u32 temp;
+ int ret;
- /* boot firmware should have set this up (5.1.1.3.1) */
- if (!ohci->fminterval) {
- temp = ohci_readl (&ohci->regs->fminterval);
- if (temp & 0x3fff0000)
- ohci->fminterval = temp;
- else
- ohci->fminterval = DEFAULT_FMINTERVAL;
- /* also: power/overcurrent flags in roothub.a */
- }
+ disable (ohci);
+ ohci->regs = ohci->hcd.regs;
+ ohci->next_statechange = jiffies;
- /* SMM owns the HC? not for long!
- * On PA-RISC, PDC can leave IR set incorrectly; ignore it there.
- */
-#ifndef __hppa__
+#ifndef IR_DISABLE
+ /* SMM owns the HC? not for long! */
if (ohci_readl (&ohci->regs->control) & OHCI_CTRL_IR) {
ohci_dbg (ohci, "USB HC TakeOver from BIOS/SMM\n");
@@ -426,27 +441,95 @@ static int hc_reset (struct ohci_hcd *ohci)
msleep (10);
if (--temp == 0) {
ohci_err (ohci, "USB HC TakeOver failed!\n");
- return -1;
+ return -EBUSY;
}
}
+ ohci_usb_reset (ohci);
}
#endif
/* Disable HC interrupts */
writel (OHCI_INTR_MIE, &ohci->regs->intrdisable);
+ // flush the writes
+ (void) ohci_readl (&ohci->regs->control);
+
+ if (ohci->hcca)
+ return 0;
+
+ ohci->hcca = dma_alloc_coherent (ohci->hcd.self.controller,
+ sizeof *ohci->hcca, &ohci->hcca_dma, 0);
+ if (!ohci->hcca)
+ return -ENOMEM;
+
+ if ((ret = ohci_mem_init (ohci)) < 0)
+ ohci_stop (&ohci->hcd);
- ohci_dbg (ohci, "reset, control = 0x%x\n",
- ohci_readl (&ohci->regs->control));
+ return ret;
- /* Reset USB (needed by some controllers); RemoteWakeupConnected
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Start an OHCI controller, set the BUS operational
+ * resets USB and controller
+ * enable interrupts
+ * connect the virtual root hub
+ */
+static int ohci_run (struct ohci_hcd *ohci)
+{
+ u32 mask, temp;
+ struct usb_device *udev;
+ struct usb_bus *bus;
+ int first = ohci->fminterval == 0;
+
+ disable (ohci);
+
+ /* boot firmware should have set this up (5.1.1.3.1) */
+ if (first) {
+
+ temp = ohci_readl (&ohci->regs->fminterval);
+ ohci->fminterval = temp & 0x3fff;
+ if (ohci->fminterval != FI)
+ ohci_dbg (ohci, "fminterval delta %d\n",
+ ohci->fminterval - FI);
+ ohci->fminterval |= FSMP (ohci->fminterval) << 16;
+ /* also: power/overcurrent flags in roothub.a */
+ }
+
+ /* Reset USB nearly "by the book". RemoteWakeupConnected
* saved if boot firmware (BIOS/SMM/...) told us it's connected
* (for OHCI integrated on mainboard, it normally is)
*/
ohci->hc_control = ohci_readl (&ohci->regs->control);
- ohci->hc_control &= OHCI_CTRL_RWC; /* hcfs 0 = RESET */
- if (ohci->hc_control)
+ ohci_dbg (ohci, "resetting from state '%s', control = 0x%x\n",
+ hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS),
+ ohci->hc_control);
+
+ if (ohci->hc_control & OHCI_CTRL_RWC
+ && !(ohci->flags & OHCI_QUIRK_AMD756))
ohci->hcd.can_wakeup = 1;
+
+ switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+ case OHCI_USB_OPER:
+ temp = 0;
+ break;
+ case OHCI_USB_SUSPEND:
+ case OHCI_USB_RESUME:
+ ohci->hc_control &= OHCI_CTRL_RWC;
+ ohci->hc_control |= OHCI_USB_RESUME;
+ temp = 10 /* msec wait */;
+ break;
+ // case OHCI_USB_RESET:
+ default:
+ ohci->hc_control &= OHCI_CTRL_RWC;
+ ohci->hc_control |= OHCI_USB_RESET;
+ temp = 50 /* msec wait */;
+ break;
+ }
writel (ohci->hc_control, &ohci->regs->control);
+ // flush the writes
+ (void) ohci_readl (&ohci->regs->control);
+ msleep(temp);
if (power_switching) {
unsigned ports = roothub_a (ohci) & RH_A_NDP;
@@ -455,15 +538,20 @@ static int hc_reset (struct ohci_hcd *ohci)
writel (RH_PS_LSDA,
&ohci->regs->roothub.portstatus [temp]);
}
- // flush those pci writes
+ // flush those writes
(void) ohci_readl (&ohci->regs->control);
- msleep (50);
+ memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
+
+ /* 2msec timelimit here means no irqs/preempt */
+ spin_lock_irq (&ohci->lock);
+retry:
/* HC Reset requires max 10 us delay */
writel (OHCI_HCR, &ohci->regs->cmdstatus);
temp = 30; /* ... allow extra time */
while ((ohci_readl (&ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
if (--temp == 0) {
+ spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "USB HC reset timed out!\n");
return -1;
}
@@ -476,27 +564,15 @@ static int hc_reset (struct ohci_hcd *ohci)
* ... but some hardware won't init fmInterval "by the book"
* (SiS, OPTi ...), so reset again instead. SiS doesn't need
* this if we write fmInterval after we're OPERATIONAL.
+ * Unclear about ALi, ServerWorks, and others ... this could
+ * easily be a longstanding bug in chip init on Linux.
*/
- writel (ohci->hc_control, &ohci->regs->control);
- // flush those pci writes
- (void) ohci_readl (&ohci->regs->control);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* Start an OHCI controller, set the BUS operational
- * enable interrupts
- * connect the virtual root hub
- */
-static int hc_start (struct ohci_hcd *ohci)
-{
- u32 mask, tmp;
- struct usb_device *udev;
- struct usb_bus *bus;
-
- disable (ohci);
+ if (ohci->flags & OHCI_QUIRK_INITRESET) {
+ writel (ohci->hc_control, &ohci->regs->control);
+ // flush those writes
+ (void) ohci_readl (&ohci->regs->control);
+ }
+ writel (ohci->fminterval, &ohci->regs->fminterval);
/* Tell the controller where the control and bulk lists are
* The lists are empty now. */
@@ -513,7 +589,15 @@ static int hc_start (struct ohci_hcd *ohci)
*/
if ((ohci_readl (&ohci->regs->fminterval) & 0x3fff0000) == 0
|| !ohci_readl (&ohci->regs->periodicstart)) {
- ohci_err (ohci, "init err\n");
+ if (!(ohci->flags & OHCI_QUIRK_INITRESET)) {
+ ohci->flags |= OHCI_QUIRK_INITRESET;
+ ohci_dbg (ohci, "enabling initreset quirk\n");
+ goto retry;
+ }
+ spin_unlock_irq (&ohci->lock);
+ ohci_err (ohci, "init err (%08x %04x)\n",
+ ohci_readl (&ohci->regs->fminterval),
+ ohci_readl (&ohci->regs->periodicstart));
return -EOVERFLOW;
}
@@ -532,42 +616,48 @@ static int hc_start (struct ohci_hcd *ohci)
writel (mask, &ohci->regs->intrenable);
/* handle root hub init quirks ... */
- tmp = roothub_a (ohci);
- tmp &= ~(RH_A_PSM | RH_A_OCPM);
+ temp = roothub_a (ohci);
+ temp &= ~(RH_A_PSM | RH_A_OCPM);
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
/* NSC 87560 and maybe others */
- tmp |= RH_A_NOCP;
- tmp &= ~(RH_A_POTPGT | RH_A_NPS);
+ temp |= RH_A_NOCP;
+ temp &= ~(RH_A_POTPGT | RH_A_NPS);
} else if (power_switching) {
/* act like most external hubs: use per-port power
* switching and overcurrent reporting.
*/
- tmp &= ~(RH_A_NPS | RH_A_NOCP);
- tmp |= RH_A_PSM | RH_A_OCPM;
+ temp &= ~(RH_A_NPS | RH_A_NOCP);
+ temp |= RH_A_PSM | RH_A_OCPM;
} else {
/* hub power always on; required for AMD-756 and some
* Mac platforms. ganged overcurrent reporting, if any.
*/
- tmp |= RH_A_NPS;
+ temp |= RH_A_NPS;
}
- writel (tmp, &ohci->regs->roothub.a);
+ writel (temp, &ohci->regs->roothub.a);
writel (RH_HS_LPSC, &ohci->regs->roothub.status);
writel (power_switching ? RH_B_PPCM : 0, &ohci->regs->roothub.b);
- // flush those pci writes
+ // flush those writes
(void) ohci_readl (&ohci->regs->control);
+ spin_unlock_irq (&ohci->lock);
+
// POTPGT delay is bits 24-31, in 2 ms units.
mdelay ((roothub_a (ohci) >> 23) & 0x1fe);
bus = hcd_to_bus (&ohci->hcd);
+ ohci->hcd.state = USB_STATE_RUNNING;
+
+ ohci_dump (ohci, 1);
- if (bus->root_hub) {
- ohci->hcd.state = USB_STATE_RUNNING;
+ udev = hcd_to_bus (&ohci->hcd)->root_hub;
+ if (udev) {
+ udev->dev.power.power_state = 0;
+ usb_set_device_state (udev, USB_STATE_CONFIGURED);
return 0;
}
/* connect the virtual root hub */
udev = usb_alloc_dev (NULL, bus, 0);
- ohci->hcd.state = USB_STATE_RUNNING;
if (!udev) {
disable (ohci);
ohci->hc_control &= ~OHCI_CTRL_HCFS;
@@ -583,7 +673,10 @@ static int hc_start (struct ohci_hcd *ohci)
writel (ohci->hc_control, &ohci->regs->control);
return -ENODEV;
}
+ if (ohci->power_budget)
+ hub_set_power_budget(udev, ohci->power_budget);
+ create_debug_files (ohci);
return 0;
}
@@ -620,7 +713,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd, struct pt_regs *ptregs)
// e.g. due to PCI Master/Target Abort
ohci_dump (ohci, 1);
- hc_reset (ohci);
+ ohci_usb_reset (ohci);
}
if (ints & OHCI_INTR_RD) {
@@ -655,7 +748,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd, struct pt_regs *ptregs)
if (HCD_IS_RUNNING(ohci->hcd.state)) {
writel (ints, &regs->intrstatus);
writel (OHCI_INTR_MIE, &regs->intrenable);
- // flush those pci writes
+ // flush those writes
(void) ohci_readl (&ohci->regs->control);
}
@@ -674,10 +767,9 @@ static void ohci_stop (struct usb_hcd *hcd)
ohci_dump (ohci, 1);
flush_scheduled_work();
- if (HCD_IS_RUNNING(ohci->hcd.state))
- hc_reset (ohci);
- else
- writel (OHCI_INTR_MIE, &ohci->regs->intrdisable);
+
+ ohci_usb_reset (ohci);
+ writel (OHCI_INTR_MIE, &ohci->regs->intrdisable);
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
@@ -696,19 +788,7 @@ static void ohci_stop (struct usb_hcd *hcd)
#if defined(CONFIG_USB_SUSPEND) || defined(CONFIG_PM)
-static void mark_children_gone (struct usb_device *dev)
-{
- unsigned i;
-
- for (i = 0; i < dev->maxchild; i++) {
- if (dev->children [i] == 0)
- continue;
- dev->children [i]->state = USB_STATE_NOTATTACHED;
- mark_children_gone (dev->children [i]);
- }
-}
-
-static int hc_restart (struct ohci_hcd *ohci)
+static int ohci_restart (struct ohci_hcd *ohci)
{
int temp;
int i;
@@ -721,7 +801,7 @@ static int hc_restart (struct ohci_hcd *ohci)
*/
spin_lock_irq(&ohci->lock);
disable (ohci);
- mark_children_gone (ohci->hcd.self.root_hub);
+ usb_set_device_state (ohci->hcd.self.root_hub, USB_STATE_NOTATTACHED);
if (!list_empty (&ohci->pending))
ohci_dbg(ohci, "abort schedule...\n");
list_for_each_entry (priv, &ohci->pending, pending) {
@@ -765,7 +845,7 @@ static int hc_restart (struct ohci_hcd *ohci)
ohci->ed_controltail = NULL;
ohci->ed_bulktail = NULL;
- if ((temp = hc_reset (ohci)) < 0 || (temp = hc_start (ohci)) < 0) {
+ if ((temp = ohci_run (ohci)) < 0) {
ohci_err (ohci, "can't restart, %d\n", temp);
return temp;
} else {
@@ -777,10 +857,7 @@ static int hc_restart (struct ohci_hcd *ohci)
while (i--)
writel (RH_PS_PSS,
&ohci->regs->roothub.portstatus [temp]);
- ohci->hcd.self.root_hub->dev.power.power_state = 0;
- ohci->hcd.state = USB_STATE_RUNNING;
ohci_dbg (ohci, "restart complete\n");
- ohci_dump (ohci, 1);
}
return 0;
}
@@ -810,10 +887,25 @@ MODULE_LICENSE ("GPL");
#include "ohci-lh7a404.c"
#endif
+#ifdef CONFIG_PXA27x
+#include "ohci-pxa27x.c"
+#endif
+
#if !(defined(CONFIG_PCI) \
|| defined(CONFIG_SA1111) \
|| defined(CONFIG_ARCH_OMAP) \
|| defined (CONFIG_ARCH_LH7A404) \
+ || defined (CONFIG_PXA27x) \
)
#error "missing bus glue for ohci-hcd"
#endif
+
+#if !defined(HAVE_HNP) && defined(CONFIG_USB_OTG)
+
+#warning non-OTG configuration, too many HCDs
+
+static void start_hnp(struct ohci_hcd *ohci)
+{
+ /* "can't happen" */
+}
+#endif
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 424971c4bc27..84b133304edf 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -2,7 +2,7 @@
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
*
* This file is licenced under GPL
*/
@@ -11,34 +11,8 @@
/*
* OHCI Root Hub ... the nonsharable stuff
- *
- * Registers don't need cpu_to_le32, that happens transparently
*/
-/* AMD-756 (D2 rev) reports corrupt register contents in some cases.
- * The erratum (#4) description is incorrect. AMD's workaround waits
- * till some bits (mostly reserved) are clear; ok for all revs.
- */
-#define read_roothub(hc, register, mask) ({ \
- u32 temp = ohci_readl (&hc->regs->roothub.register); \
- if (temp == -1) \
- disable (hc); \
- else if (hc->flags & OHCI_QUIRK_AMD756) \
- while (temp & mask) \
- temp = ohci_readl (&hc->regs->roothub.register); \
- temp; })
-
-static u32 roothub_a (struct ohci_hcd *hc)
- { return read_roothub (hc, a, 0xfc0fe000); }
-static inline u32 roothub_b (struct ohci_hcd *hc)
- { return ohci_readl (&hc->regs->roothub.b); }
-static inline u32 roothub_status (struct ohci_hcd *hc)
- { return ohci_readl (&hc->regs->roothub.status); }
-static u32 roothub_portstatus (struct ohci_hcd *hc, int i)
- { return read_roothub (hc, portstatus [i], 0xffe0fce0); }
-
-/*-------------------------------------------------------------------------*/
-
#define dbg_port(hc,label,num,value) \
ohci_dbg (hc, \
"%s roothub.portstatus [%d] " \
@@ -146,10 +120,11 @@ static int ohci_hub_suspend (struct usb_hcd *hcd)
ohci->next_statechange = jiffies + msecs_to_jiffies (5);
succeed:
- /* it's not USB_STATE_SUSPENDED unless access to this
+ /* it's not HCD_STATE_SUSPENDED unless access to this
* hub from the non-usb side (PCI, SOC, etc) stopped
*/
root->dev.power.power_state = 3;
+ usb_set_device_state (root, USB_STATE_SUSPENDED);
done:
spin_unlock_irq (&ohci->lock);
return status;
@@ -163,9 +138,7 @@ static inline struct ed *find_head (struct ed *ed)
return ed;
}
-static int hc_restart (struct ohci_hcd *ohci);
-
-/* caller owns root->serialize */
+/* caller has locked the root hub */
static int ohci_hub_resume (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
@@ -180,7 +153,12 @@ static int ohci_hub_resume (struct usb_hcd *hcd)
spin_lock_irq (&ohci->lock);
ohci->hc_control = ohci_readl (&ohci->regs->control);
- switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+ if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
+ /* this can happen after suspend-to-disk */
+ ohci_dbg (ohci, "BIOS/SMM active, control %03x\n",
+ ohci->hc_control);
+ status = -EBUSY;
+ } else switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_SUSPEND:
ohci->hc_control &= ~(OHCI_CTRL_HCFS|OHCI_SCHED_ENABLES);
ohci->hc_control |= OHCI_USB_RESUME;
@@ -202,8 +180,10 @@ static int ohci_hub_resume (struct usb_hcd *hcd)
status = -EBUSY;
}
spin_unlock_irq (&ohci->lock);
- if (status == -EBUSY)
- return hc_restart (ohci);
+ if (status == -EBUSY) {
+ (void) ohci_init (ohci);
+ return ohci_restart (ohci);
+ }
if (status != -EINPROGRESS)
return status;
@@ -260,6 +240,7 @@ static int ohci_hub_resume (struct usb_hcd *hcd)
/* TRSMRCY */
msleep (10);
root->dev.power.power_state = 0;
+ usb_set_device_state (root, USB_STATE_CONFIGURED);
/* keep it alive for ~5x suspend + resume costs */
ohci->next_statechange = jiffies + msecs_to_jiffies (250);
@@ -289,7 +270,7 @@ static int ohci_hub_resume (struct usb_hcd *hcd)
ohci->hc_control |= enables;
writel (ohci->hc_control, &ohci->regs->control);
if (temp)
- writel (status, &ohci->regs->cmdstatus);
+ writel (temp, &ohci->regs->cmdstatus);
(void) ohci_readl (&ohci->regs->control);
}
@@ -301,9 +282,9 @@ static void ohci_rh_resume (void *_hcd)
{
struct usb_hcd *hcd = _hcd;
- down (&hcd->self.root_hub->serialize);
+ usb_lock_device (hcd->self.root_hub);
(void) ohci_hub_resume (hcd);
- up (&hcd->self.root_hub->serialize);
+ usb_unlock_device (hcd->self.root_hub);
}
#else
@@ -381,12 +362,12 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
&& ((OHCI_CTRL_HCFS | OHCI_SCHED_ENABLES)
& ohci->hc_control)
== OHCI_USB_OPER
- && down_trylock (&hcd->self.root_hub->serialize) == 0
+ && usb_trylock_device (hcd->self.root_hub)
) {
ohci_vdbg (ohci, "autosuspend\n");
(void) ohci_hub_suspend (&ohci->hcd);
ohci->hcd.state = USB_STATE_RUNNING;
- up (&hcd->self.root_hub->serialize);
+ usb_unlock_device (hcd->self.root_hub);
}
#endif
@@ -481,8 +462,8 @@ static void start_hnp(struct ohci_hcd *ohci);
/* this timer value might be vendor-specific ... */
#define PORT_RESET_HW_MSEC 10
-/* wrap-aware logic stolen from <linux/jiffies.h> */
-#define tick_before(t1,t2) ((((s16)(t1))-((s16)(t2))) < 0)
+/* wrap-aware logic morphed from <linux/jiffies.h> */
+#define tick_before(t1,t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
/* called from some task, normally khubd */
static inline void root_port_reset (struct ohci_hcd *ohci, unsigned port)
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index 4e11a8eae481..1594d1e635da 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -229,38 +229,14 @@ ohci_lh7a404_start (struct usb_hcd *hcd)
int ret;
ohci_dbg (ohci, "ohci_lh7a404_start, ohci:%p", ohci);
-
- ohci->hcca = dma_alloc_coherent (hcd->self.controller,
- sizeof *ohci->hcca, &ohci->hcca_dma, 0);
- if (!ohci->hcca)
- return -ENOMEM;
-
- ohci_dbg (ohci, "ohci_lh7a404_start, ohci->hcca:%p",
- ohci->hcca);
-
- memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
-
- if ((ret = ohci_mem_init (ohci)) < 0) {
- ohci_stop (hcd);
+ if ((ret = ohci_init(ohci)) < 0)
return ret;
- }
- ohci->regs = hcd->regs;
-
- if (hc_reset (ohci) < 0) {
- ohci_stop (hcd);
- return -ENODEV;
- }
- if (hc_start (ohci) < 0) {
+ if ((ret = ohci_run (ohci)) < 0) {
err ("can't start %s", ohci->hcd.self.bus_name);
ohci_stop (hcd);
- return -EBUSY;
+ return ret;
}
- create_debug_files (ohci);
-
-#ifdef DEBUG
- ohci_dump (ohci, 1);
-#endif /*DEBUG*/
return 0;
}
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index d133ff22a4a7..a8e641f595bb 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -428,40 +428,18 @@ ohci_omap_start (struct usb_hcd *hcd)
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int ret;
- config = hcd->self.controller->platform_data;
- ohci->hcca = dma_alloc_coherent (hcd->self.controller,
- sizeof *ohci->hcca, &ohci->hcca_dma, 0);
- if (!ohci->hcca)
- return -ENOMEM;
-
- memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
- if ((ret = ohci_mem_init (ohci)) < 0) {
- ohci_stop (hcd);
+ if ((ret = ohci_init(ohci)) < 0)
return ret;
- }
- ohci->regs = hcd->regs;
+ config = hcd->self.controller->platform_data;
if (config->otg || config->rwc)
writel(OHCI_CTRL_RWC, &ohci->regs->control);
- if (hc_reset (ohci) < 0) {
- ohci_stop (hcd);
- return -ENODEV;
- }
-
- if (hc_start (ohci) < 0) {
+ if ((ret = ohci_run (ohci)) < 0) {
err ("can't start %s", ohci->hcd.self.bus_name);
ohci_stop (hcd);
- return -EBUSY;
+ return ret;
}
- if (ohci->power_budget)
- hub_set_power_budget(ohci->hcd.self.root_hub,
- ohci->power_budget);
- create_debug_files (ohci);
-
-#ifdef DEBUG
- ohci_dump (ohci, 1);
-#endif
return 0;
}
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 73d9aee657d5..2211a69e0b9d 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -35,9 +35,7 @@ ohci_pci_reset (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- ohci->regs = hcd->regs;
- ohci->next_statechange = jiffies;
- return hc_reset (ohci);
+ return ohci_init (ohci);
}
static int __devinit
@@ -46,11 +44,6 @@ ohci_pci_start (struct usb_hcd *hcd)
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int ret;
- ohci->hcca = dma_alloc_coherent (hcd->self.controller,
- sizeof *ohci->hcca, &ohci->hcca_dma, 0);
- if (!ohci->hcca)
- return -ENOMEM;
-
if(hcd->self.controller && hcd->self.controller->bus == &pci_bus_type) {
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
@@ -61,6 +54,7 @@ ohci_pci_start (struct usb_hcd *hcd)
&& pdev->device == 0x740c) {
ohci->flags = OHCI_QUIRK_AMD756;
ohci_info (ohci, "AMD756 erratum 4 workaround\n");
+ // also somewhat erratum 10 (suspend/resume issues)
}
/* FIXME for some of the early AMD 760 southbridges, OHCI
@@ -92,25 +86,16 @@ ohci_pci_start (struct usb_hcd *hcd)
ohci_info (ohci, "Using NSC SuperIO setup\n");
}
}
-
- }
-
- memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
- if ((ret = ohci_mem_init (ohci)) < 0) {
- ohci_stop (hcd);
- return ret;
}
- if (hc_start (ohci) < 0) {
+ /* NOTE: there may have already been a first reset, to
+ * keep bios/smm irqs from making trouble
+ */
+ if ((ret = ohci_run (ohci)) < 0) {
ohci_err (ohci, "can't start\n");
ohci_stop (hcd);
- return -EBUSY;
+ return ret;
}
- create_debug_files (ohci);
-
-#ifdef DEBUG
- ohci_dump (ohci, 1);
-#endif
return 0;
}
@@ -127,9 +112,9 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, u32 state)
#ifdef CONFIG_USB_SUSPEND
(void) usb_suspend_device (hcd->self.root_hub, state);
#else
- down (&hcd->self.root_hub->serialize);
+ usb_lock_device (hcd->self.root_hub);
(void) ohci_hub_suspend (hcd);
- up (&hcd->self.root_hub->serialize);
+ usb_unlock_device (hcd->self.root_hub);
#endif
/* let things settle down a bit */
@@ -175,9 +160,9 @@ static int ohci_pci_resume (struct usb_hcd *hcd)
/* get extra cleanup even if remote wakeup isn't in use */
retval = usb_resume_device (hcd->self.root_hub);
#else
- down (&hcd->self.root_hub->serialize);
+ usb_lock_device (hcd->self.root_hub);
retval = ohci_hub_resume (hcd);
- up (&hcd->self.root_hub->serialize);
+ usb_unlock_device (hcd->self.root_hub);
#endif
if (retval == 0) {
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
new file mode 100644
index 000000000000..4bfe74123373
--- /dev/null
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -0,0 +1,460 @@
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * (C) Copyright 2002 Hewlett-Packard Company
+ *
+ * Bus Glue for pxa27x
+ *
+ * Written by Christopher Hoover <ch@hpl.hp.com>
+ * Based on fragments of previous driver by Russell King et al.
+ *
+ * Modified for LH7A404 from ohci-sa1111.c
+ * by Durgesh Pattamatta <pattamattad@sharpsec.com>
+ *
+ * Modified for pxa27x from ohci-lh7a404.c
+ * by Nick Bane <nick@cecomputing.co.uk> 26-8-2004
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/device.h>
+#include <asm/mach-types.h>
+#include <asm/hardware.h>
+
+
+#define PMM_NPS_MODE 1
+#define PMM_GLOBAL_MODE 2
+#define PMM_PERPORT_MODE 3
+
+#define PXA_UHC_MAX_PORTNUM 3
+
+#define UHCRHPS(x) __REG2( 0x4C000050, (x)<<2 )
+
+static int pxa27x_ohci_pmm_state;
+
+/*
+ PMM_NPS_MODE -- PMM Non-power switching mode
+ Ports are powered continuously.
+
+ PMM_GLOBAL_MODE -- PMM global switching mode
+ All ports are powered at the same time.
+
+ PMM_PERPORT_MODE -- PMM per port switching mode
+ Ports are powered individually.
+ */
+static int pxa27x_ohci_select_pmm( int mode )
+{
+ pxa27x_ohci_pmm_state = mode;
+
+ switch ( mode ) {
+ case PMM_NPS_MODE:
+ UHCRHDA |= RH_A_NPS;
+ break;
+ case PMM_GLOBAL_MODE:
+ UHCRHDA &= ~(RH_A_NPS & RH_A_PSM);
+ break;
+ case PMM_PERPORT_MODE:
+ UHCRHDA &= ~(RH_A_NPS);
+ UHCRHDA |= RH_A_PSM;
+
+ /* Set port power control mask bits, only 3 ports. */
+ UHCRHDB |= (0x7<<17);
+ break;
+ default:
+ printk( KERN_ERR
+ "Invalid mode %d, set to non-power switch mode.\n",
+ mode );
+
+ pxa27x_ohci_pmm_state = PMM_NPS_MODE;
+ UHCRHDA |= RH_A_NPS;
+ }
+
+ return 0;
+}
+
+/*
+ If you select PMM_PERPORT_MODE, you should set the port power
+ */
+static int pxa27x_ohci_set_port_power( int port )
+{
+ if ( (pxa27x_ohci_pmm_state==PMM_PERPORT_MODE)
+ && (port>0) && (port<PXA_UHC_MAX_PORTNUM) ) {
+ UHCRHPS(port) |= 0x100;
+ return 0;
+ }
+ return -1;
+}
+
+/*
+ If you select PMM_PERPORT_MODE, you should set the port power
+ */
+static int pxa27x_ohci_clear_port_power( int port )
+{
+ if ( (pxa27x_ohci_pmm_state==PMM_PERPORT_MODE)
+ && (port>0) && (port<PXA_UHC_MAX_PORTNUM) ) {
+ UHCRHPS(port) |= 0x200;
+ return 0;
+ }
+
+ return -1;
+}
+
+extern int usb_disabled(void);
+
+/*-------------------------------------------------------------------------*/
+
+static void pxa27x_start_hc(struct platform_device *dev)
+{
+ pxa_set_cken(CKEN10_USBHOST, 1);
+
+ UHCHR |= UHCHR_FHR;
+ udelay(11);
+ UHCHR &= ~UHCHR_FHR;
+
+ UHCHR |= UHCHR_FSBIR;
+ while (UHCHR & UHCHR_FSBIR)
+ cpu_relax();
+
+ /* This could be properly abstracted away through the
+ device data the day more machines are supported and
+ their differences can be figured out correctly. */
+ if (machine_is_mainstone()) {
+ /* setup Port1 GPIO pin. */
+ pxa_gpio_mode( 88 | GPIO_ALT_FN_1_IN); /* USBHPWR1 */
+ pxa_gpio_mode( 89 | GPIO_ALT_FN_2_OUT); /* USBHPEN1 */
+
+ /* Set the Power Control Polarity Low and Power Sense
+ Polarity Low to active low. Supply power to USB ports. */
+ UHCHR = (UHCHR | UHCHR_PCPL | UHCHR_PSPL) &
+ ~(UHCHR_SSEP1 | UHCHR_SSEP2 | UHCHR_SSEP3 | UHCHR_SSE);
+ }
+
+ UHCHR &= ~UHCHR_SSE;
+
+ UHCHIE = (UHCHIE_UPRIE | UHCHIE_RWIE);
+}
+
+static void pxa27x_stop_hc(struct platform_device *dev)
+{
+ UHCHR |= UHCHR_FHR;
+ udelay(11);
+ UHCHR &= ~UHCHR_FHR;
+
+ UHCCOMS |= 1;
+ udelay(10);
+
+ pxa_set_cken(CKEN10_USBHOST, 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+void usb_hcd_pxa27x_remove (struct usb_hcd *, struct platform_device *);
+
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+
+/**
+ * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ *
+ */
+int usb_hcd_pxa27x_probe (const struct hc_driver *driver,
+ struct usb_hcd **hcd_out,
+ struct platform_device *dev)
+{
+ int retval;
+ struct usb_hcd *hcd = 0;
+
+ unsigned int *addr = NULL;
+
+ if (!request_mem_region(dev->resource[0].start,
+ dev->resource[0].end
+ - dev->resource[0].start + 1, hcd_name)) {
+ pr_debug("request_mem_region failed");
+ return -EBUSY;
+ }
+
+ pxa27x_start_hc(dev);
+
+ /* Select Power Management Mode */
+ pxa27x_ohci_select_pmm( PMM_PERPORT_MODE );
+
+ /* If choosing PMM_PERPORT_MODE, we should set the port power before we use it. */
+ if (pxa27x_ohci_set_port_power(1) < 0)
+ printk(KERN_ERR "Setting port 1 power failed.\n");
+
+ if (pxa27x_ohci_clear_port_power(2) < 0)
+ printk(KERN_ERR "Setting port 2 power failed.\n");
+
+ if (pxa27x_ohci_clear_port_power(3) < 0)
+ printk(KERN_ERR "Setting port 3 power failed.\n");
+
+ addr = ioremap(dev->resource[0].start,
+ dev->resource[0].end - dev->resource[0].start + 1);
+ if (!addr) {
+ pr_debug("ioremap failed");
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ hcd = driver->hcd_alloc ();
+ if (hcd == NULL){
+ pr_debug ("hcd_alloc failed");
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ if(dev->resource[1].flags != IORESOURCE_IRQ){
+ pr_debug ("resource[1] is not IORESOURCE_IRQ");
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ hcd->driver = (struct hc_driver *) driver;
+ hcd->description = driver->description;
+ hcd->irq = dev->resource[1].start;
+ hcd->regs = addr;
+ hcd->self.controller = &dev->dev;
+
+ retval = hcd_buffer_create (hcd);
+ if (retval != 0) {
+ pr_debug ("pool alloc fail");
+ goto err1;
+ }
+
+ retval = request_irq (hcd->irq, usb_hcd_irq, SA_INTERRUPT,
+ hcd->description, hcd);
+ if (retval != 0) {
+ pr_debug("request_irq(%d) failed with retval %d\n",hcd->irq,retval);
+ retval = -EBUSY;
+ goto err2;
+ }
+
+ pr_debug ("%s (pxa27x) at 0x%p, irq %d",
+ hcd->description, hcd->regs, hcd->irq);
+
+ usb_bus_init (&hcd->self);
+ hcd->self.op = &usb_hcd_operations;
+ hcd->self.hcpriv = (void *) hcd;
+ hcd->self.bus_name = "pxa27x";
+ hcd->product_desc = "PXA27x OHCI";
+
+ INIT_LIST_HEAD (&hcd->dev_list);
+
+ usb_register_bus (&hcd->self);
+
+ if ((retval = driver->start (hcd)) < 0) {
+ usb_hcd_pxa27x_remove(hcd, dev);
+ return retval;
+ }
+
+ *hcd_out = hcd;
+ return 0;
+
+ err2:
+ hcd_buffer_destroy (hcd);
+ if (hcd)
+ driver->hcd_free(hcd);
+ err1:
+ pxa27x_stop_hc(dev);
+ release_mem_region(dev->resource[0].start,
+ dev->resource[0].end
+ - dev->resource[0].start + 1);
+ return retval;
+}
+
+
+/* may be called without controller electrically present */
+/* may be called with controller, bus, and devices active */
+
+/**
+ * usb_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_pxa27x_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ *
+ */
+void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *dev)
+{
+ void *base;
+
+ pr_debug ("remove: %s, state %x", hcd->self.bus_name, hcd->state);
+
+ if (in_interrupt ())
+ BUG ();
+
+ hcd->state = USB_STATE_QUIESCING;
+
+ pr_debug ("%s: roothub graceful disconnect", hcd->self.bus_name);
+ usb_disconnect (&hcd->self.root_hub);
+
+ hcd->driver->stop (hcd);
+ hcd->state = USB_STATE_HALT;
+
+ free_irq (hcd->irq, hcd);
+ hcd_buffer_destroy (hcd);
+
+ usb_deregister_bus (&hcd->self);
+
+ base = hcd->regs;
+ hcd->driver->hcd_free (hcd);
+
+ pxa27x_stop_hc(dev);
+ release_mem_region(dev->resource[0].start,
+ dev->resource[0].end - dev->resource[0].start + 1);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __devinit
+ohci_pxa27x_start (struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci (hcd);
+ int ret;
+
+ ohci_dbg (ohci, "ohci_pxa27x_start, ohci:%p", ohci);
+
+ if ((ret = ohci_init(ohci)) < 0)
+ return ret;
+
+ if ((ret = ohci_run (ohci)) < 0) {
+ err ("can't start %s", ohci->hcd.self.bus_name);
+ ohci_stop (hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver ohci_pxa27x_hc_driver = {
+ .description = hcd_name,
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11,
+
+ /*
+ * basic lifecycle operations
+ */
+ .start = ohci_pxa27x_start,
+ .stop = ohci_stop,
+
+ /*
+ * memory lifecycle (except per-request)
+ */
+ .hcd_alloc = ohci_hcd_alloc,
+ .hcd_free = ohci_hcd_free,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_USB_SUSPEND
+ .hub_suspend = ohci_hub_suspend,
+ .hub_resume = ohci_hub_resume,
+#endif
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int ohci_hcd_pxa27x_drv_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = NULL;
+ int ret;
+
+ pr_debug ("In ohci_hcd_pxa27x_drv_probe");
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ ret = usb_hcd_pxa27x_probe(&ohci_pxa27x_hc_driver, &hcd, pdev);
+
+ if (ret == 0)
+ dev_set_drvdata(dev, hcd);
+
+ return ret;
+}
+
+static int ohci_hcd_pxa27x_drv_remove(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ usb_hcd_pxa27x_remove(hcd, pdev);
+ dev_set_drvdata(dev, NULL);
+ return 0;
+}
+
+static int ohci_hcd_pxa27x_drv_suspend(struct device *dev, u32 state, u32 level)
+{
+// struct platform_device *pdev = to_platform_device(dev);
+// struct usb_hcd *hcd = dev_get_drvdata(dev);
+ printk("%s: not implemented yet\n", __FUNCTION__);
+
+ return 0;
+}
+
+static int ohci_hcd_pxa27x_drv_resume(struct device *dev, u32 state)
+{
+// struct platform_device *pdev = to_platform_device(dev);
+// struct usb_hcd *hcd = dev_get_drvdata(dev);
+ printk("%s: not implemented yet\n", __FUNCTION__);
+
+ return 0;
+}
+
+
+static struct device_driver ohci_hcd_pxa27x_driver = {
+ .name = "pxa27x-ohci",
+ .bus = &platform_bus_type,
+ .probe = ohci_hcd_pxa27x_drv_probe,
+ .remove = ohci_hcd_pxa27x_drv_remove,
+ .suspend = ohci_hcd_pxa27x_drv_suspend,
+ .resume = ohci_hcd_pxa27x_drv_resume,
+};
+
+static int __init ohci_hcd_pxa27x_init (void)
+{
+ pr_debug (DRIVER_INFO " (pxa27x)");
+ pr_debug ("block sizes: ed %d td %d\n",
+ sizeof (struct ed), sizeof (struct td));
+
+ return driver_register(&ohci_hcd_pxa27x_driver);
+}
+
+static void __exit ohci_hcd_pxa27x_cleanup (void)
+{
+ driver_unregister(&ohci_hcd_pxa27x_driver);
+}
+
+module_init (ohci_hcd_pxa27x_init);
+module_exit (ohci_hcd_pxa27x_cleanup);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index c3cd76aba163..35d71aceff63 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -272,33 +272,14 @@ ohci_sa1111_start (struct usb_hcd *hcd)
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int ret;
- ohci->hcca = dma_alloc_coherent (hcd->self.controller,
- sizeof *ohci->hcca, &ohci->hcca_dma, 0);
- if (!ohci->hcca)
- return -ENOMEM;
-
- memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
- if ((ret = ohci_mem_init (ohci)) < 0) {
- ohci_stop (hcd);
+ if ((ret = ohci_init(ohci)) < 0)
return ret;
- }
- ohci->regs = hcd->regs;
-
- if (hc_reset (ohci) < 0) {
- ohci_stop (hcd);
- return -ENODEV;
- }
- if (hc_start (ohci) < 0) {
+ if ((ret = ohci_run (ohci)) < 0) {
err ("can't start %s", ohci->hcd.self.bus_name);
ohci_stop (hcd);
- return -EBUSY;
+ return ret;
}
- create_debug_files (ohci);
-
-#ifdef DEBUG
- ohci_dump (ohci, 1);
-#endif
return 0;
}
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 19436cdd7a11..b2756dee9507 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -42,7 +42,6 @@ struct ed {
/* create --> IDLE --> OPER --> ... --> IDLE --> destroy
* usually: OPER --> UNLINK --> (IDLE | OPER) --> ...
- * some special cases : OPER --> IDLE ...
*/
u8 state; /* ED_{IDLE,UNLINK,OPER} */
#define ED_IDLE 0x00 /* NOT linked to HC */
@@ -387,6 +386,7 @@ struct ohci_hcd {
unsigned long flags; /* for HC bugs */
#define OHCI_QUIRK_AMD756 0x01 /* erratum #4 */
#define OHCI_QUIRK_SUPERIO 0x02 /* natsemi */
+#define OHCI_QUIRK_INITRESET 0x04 /* SiS, OPTi, ... */
// there are also chip quirks/bugs in init logic
/*
@@ -405,14 +405,14 @@ static inline void disable (struct ohci_hcd *ohci)
}
#define FI 0x2edf /* 12000 bits per frame (-1) */
-#define DEFAULT_FMINTERVAL ((((6 * (FI - 210)) / 7) << 16) | FI)
+#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7))
#define LSTHRESH 0x628 /* lowspeed bit threshold */
static inline void periodic_reinit (struct ohci_hcd *ohci)
{
- writel (ohci->fminterval, &ohci->regs->fminterval);
- writel (((9 * FI) / 10) & 0x3fff, &ohci->regs->periodicstart);
- writel (LSTHRESH, &ohci->regs->lsthresh);
+ u32 fi = ohci->fminterval & 0x0ffff;
+
+ writel (((9 * fi) / 10) & 0x3fff, &ohci->regs->periodicstart);
}
/*-------------------------------------------------------------------------*/
@@ -436,6 +436,8 @@ static inline void periodic_reinit (struct ohci_hcd *ohci)
# define ohci_vdbg(ohci, fmt, args...) do { } while (0)
#endif
+/*-------------------------------------------------------------------------*/
+
#ifdef CONFIG_ARCH_LH7A404
/* Marc Singer: at the time this code was written, the LH7A404
* had a problem reading the USB host registers. This
@@ -455,3 +457,25 @@ static inline unsigned int ohci_readl (void __iomem * regs)
return readl (regs);
}
#endif
+
+/* AMD-756 (D2 rev) reports corrupt register contents in some cases.
+ * The erratum (#4) description is incorrect. AMD's workaround waits
+ * till some bits (mostly reserved) are clear; ok for all revs.
+ */
+#define read_roothub(hc, register, mask) ({ \
+ u32 temp = ohci_readl (&hc->regs->roothub.register); \
+ if (temp == -1) \
+ disable (hc); \
+ else if (hc->flags & OHCI_QUIRK_AMD756) \
+ while (temp & mask) \
+ temp = ohci_readl (&hc->regs->roothub.register); \
+ temp; })
+
+static u32 roothub_a (struct ohci_hcd *hc)
+ { return read_roothub (hc, a, 0xfc0fe000); }
+static inline u32 roothub_b (struct ohci_hcd *hc)
+ { return ohci_readl (&hc->regs->roothub.b); }
+static inline u32 roothub_status (struct ohci_hcd *hc)
+ { return ohci_readl (&hc->regs->roothub.status); }
+static u32 roothub_portstatus (struct ohci_hcd *hc, int i)
+ { return read_roothub (hc, portstatus [i], 0xffe0fce0); }
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 20a42f914684..99b706b64052 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -230,42 +230,22 @@ static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
}
/*
- * Inserts a td into qh list at the top.
+ * Inserts a td list into qh.
*/
static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
{
- struct list_head *tmp, *head;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
- struct uhci_td *td, *ptd;
-
- if (list_empty(&urbp->td_list))
- return;
-
- head = &urbp->td_list;
- tmp = head->next;
+ struct uhci_td *td;
+ u32 *plink;
/* Ordering isn't important here yet since the QH hasn't been */
- /* inserted into the schedule yet */
- td = list_entry(tmp, struct uhci_td, list);
-
- /* Add the first TD to the QH element pointer */
- qh->element = cpu_to_le32(td->dma_handle) | breadth;
-
- ptd = td;
-
- /* Then link the rest of the TD's */
- tmp = tmp->next;
- while (tmp != head) {
- td = list_entry(tmp, struct uhci_td, list);
-
- tmp = tmp->next;
-
- ptd->link = cpu_to_le32(td->dma_handle) | breadth;
-
- ptd = td;
+ /* inserted into the schedule yet */
+ plink = &qh->element;
+ list_for_each_entry(td, &urbp->td_list, list) {
+ *plink = cpu_to_le32(td->dma_handle) | breadth;
+ plink = &td->link;
}
-
- ptd->link = UHCI_PTR_TERM;
+ *plink = UHCI_PTR_TERM;
}
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
@@ -330,7 +310,7 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
- struct list_head *tmp;
+ struct urb_priv *turbp;
struct uhci_qh *lqh;
/* Grab the last QH */
@@ -358,12 +338,8 @@ static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct
*/
lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
if (lqh->urbp) {
- list_for_each (tmp, &lqh->urbp->queue_list) {
- struct urb_priv *turbp =
- list_entry(tmp, struct urb_priv, queue_list);
-
+ list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
turbp->qh->link = lqh->link;
- }
}
list_add_tail(&urbp->qh->list, &skelqh->list);
@@ -405,18 +381,11 @@ static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
pqh = list_entry(qh->list.prev, struct uhci_qh, list);
pqh->link = newlink;
if (pqh->urbp) {
- struct list_head *head, *tmp;
-
- head = &pqh->urbp->queue_list;
- tmp = head->next;
- while (head != tmp) {
- struct urb_priv *turbp =
- list_entry(tmp, struct urb_priv, queue_list);
-
- tmp = tmp->next;
+ struct urb_priv *turbp;
+ list_for_each_entry(turbp, &pqh->urbp->queue_list,
+ queue_list)
turbp->qh->link = newlink;
- }
}
wmb();
@@ -447,21 +416,14 @@ static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
- struct list_head *head, *tmp;
-
- head = &urbp->td_list;
- tmp = head->next;
- while (head != tmp) {
- struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
-
- tmp = tmp->next;
+ struct uhci_td *td;
+ list_for_each_entry(td, &urbp->td_list, list) {
if (toggle)
td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
else
td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
-
toggle ^= 1;
}
@@ -473,30 +435,19 @@ static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
{
struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
- struct list_head *tmp;
struct uhci_td *lltd;
eurbp = eurb->hcpriv;
urbp = urb->hcpriv;
/* Find the first URB in the queue */
+ furbp = eurbp;
if (eurbp->queued) {
- struct list_head *head = &eurbp->queue_list;
-
- tmp = head->next;
- while (tmp != head) {
- struct urb_priv *turbp =
- list_entry(tmp, struct urb_priv, queue_list);
-
- if (!turbp->queued)
+ list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
+ if (!furbp->queued)
break;
+ }
- tmp = tmp->next;
- }
- } else
- tmp = &eurbp->queue_list;
-
- furbp = list_entry(tmp, struct urb_priv, queue_list);
lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
@@ -522,9 +473,7 @@ static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, stru
static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
{
- struct urb_priv *urbp, *nurbp;
- struct list_head *head, *tmp;
- struct urb_priv *purbp;
+ struct urb_priv *urbp, *nurbp, *purbp, *turbp;
struct uhci_td *pltd;
unsigned int toggle;
@@ -556,14 +505,7 @@ static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
toggle = uhci_toggle(td_token(pltd)) ^ 1;
}
- head = &urbp->queue_list;
- tmp = head->next;
- while (head != tmp) {
- struct urb_priv *turbp;
-
- turbp = list_entry(tmp, struct urb_priv, queue_list);
- tmp = tmp->next;
-
+ list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
if (!turbp->queued)
break;
toggle = uhci_fixup_toggle(turbp->urb, toggle);
@@ -637,7 +579,7 @@ static void uhci_remove_td_from_urb(struct uhci_td *td)
static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
{
- struct list_head *head, *tmp;
+ struct uhci_td *td, *tmp;
struct urb_priv *urbp;
unsigned int age;
@@ -660,13 +602,7 @@ static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
if (list_empty(&uhci->td_remove_list))
uhci_set_next_interrupt(uhci);
- head = &urbp->td_list;
- tmp = head->next;
- while (tmp != head) {
- struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
-
- tmp = tmp->next;
-
+ list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
uhci_remove_td_from_urb(td);
uhci_remove_td(uhci, td);
list_add(&td->remove_list, &uhci->td_remove_list);
@@ -1083,7 +1019,6 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb
*/
static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
{
- struct list_head *tmp, *head;
struct urb_priv *urbp = urb->hcpriv;
struct uhci_td *td;
unsigned int status = 0;
@@ -1091,13 +1026,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
urb->actual_length = 0;
- head = &urbp->td_list;
- tmp = head->next;
- while (tmp != head) {
- td = list_entry(tmp, struct uhci_td, list);
-
- tmp = tmp->next;
-
+ list_for_each_entry(td, &urbp->td_list, list) {
status = uhci_status_bits(td_status(td));
if (status & TD_CTRL_ACTIVE)
return -EINPROGRESS;
@@ -1176,17 +1105,12 @@ static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
{
struct urb *last_urb = NULL;
- struct list_head *tmp, *head;
+ struct urb_priv *up;
int ret = 0;
- head = &uhci->urb_list;
- tmp = head->next;
- while (tmp != head) {
- struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
+ list_for_each_entry(up, &uhci->urb_list, urb_list) {
struct urb *u = up->urb;
- tmp = tmp->next;
-
/* look for pending URB's with identical pipe handle */
if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
(u->status == -EINPROGRESS) && (u != urb)) {
@@ -1272,7 +1196,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
{
- struct list_head *tmp, *head;
+ struct uhci_td *td;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
int status;
int i, ret = 0;
@@ -1280,14 +1204,9 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
urb->actual_length = 0;
i = 0;
- head = &urbp->td_list;
- tmp = head->next;
- while (tmp != head) {
- struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
+ list_for_each_entry(td, &urbp->td_list, list) {
int actlength;
- tmp = tmp->next;
-
if (td_status(td) & TD_CTRL_ACTIVE)
return -EINPROGRESS;
@@ -1311,20 +1230,15 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
{
- struct list_head *tmp, *head;
+ struct urb_priv *up;
/* We don't match Isoc transfers since they are special */
if (usb_pipeisoc(urb->pipe))
return NULL;
- head = &uhci->urb_list;
- tmp = head->next;
- while (tmp != head) {
- struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
+ list_for_each_entry(up, &uhci->urb_list, urb_list) {
struct urb *u = up->urb;
- tmp = tmp->next;
-
if (u->dev == urb->dev && u->status == -EINPROGRESS) {
/* For control, ignore the direction */
if (usb_pipecontrol(urb->pipe) &&
@@ -1475,9 +1389,10 @@ out:
static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
{
- struct list_head *head, *tmp;
+ struct list_head *head;
+ struct uhci_td *td;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
- int prevactive = 1;
+ int prevactive = 0;
uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
@@ -1485,25 +1400,28 @@ static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
* Now we need to find out what the last successful toggle was
* so we can update the local data toggle for the next transfer
*
- * There's 3 way's the last successful completed TD is found:
+ * There are 2 ways the last successful completed TD is found:
*
* 1) The TD is NOT active and the actual length < expected length
* 2) The TD is NOT active and it's the last TD in the chain
+ *
+ * and a third way the first uncompleted TD is found:
+ *
* 3) The TD is active and the previous TD is NOT active
*
* Control and Isochronous ignore the toggle, so this is safe
* for all types
+ *
+ * FIXME: The toggle fixups won't be 100% reliable until we
+ * change over to using a single queue for each endpoint and
+ * stop the queue before unlinking.
*/
head = &urbp->td_list;
- tmp = head->next;
- while (tmp != head) {
- struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
-
- tmp = tmp->next;
-
+ list_for_each_entry(td, head, list) {
if (!(td_status(td) & TD_CTRL_ACTIVE) &&
- (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) ||
- tmp == head))
+ (uhci_actual_length(td_status(td)) <
+ uhci_expected_length(td_token(td)) ||
+ td->list.next == head))
usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
uhci_packetout(td_token(td)),
uhci_toggle(td_token(td)) ^ 1);
@@ -1556,7 +1474,8 @@ done:
static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
- struct list_head *head, *tmp;
+ struct list_head *head;
+ struct uhci_td *td;
int count = 0;
uhci_dec_fsbr(uhci, urb);
@@ -1570,18 +1489,14 @@ static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
*/
head = &urbp->td_list;
- tmp = head->next;
- while (tmp != head) {
- struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
-
- tmp = tmp->next;
-
+ list_for_each_entry(td, head, list) {
/*
* Make sure we don't do the last one (since it'll have the
* TERM bit set) as well as we skip every so many TD's to
* make sure it doesn't hog the bandwidth
*/
- if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
+ if (td->list.next != head && (count % DEPTH_INTERVAL) ==
+ (DEPTH_INTERVAL - 1))
td->link |= UHCI_PTR_DEPTH;
count++;
@@ -1606,12 +1521,10 @@ static void stall_callback(unsigned long ptr)
{
struct usb_hcd *hcd = (struct usb_hcd *)ptr;
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
- struct list_head list, *tmp, *head;
+ struct urb_priv *up;
unsigned long flags;
int called_uhci_finish_completion = 0;
- INIT_LIST_HEAD(&list);
-
spin_lock_irqsave(&uhci->schedule_lock, flags);
if (!list_empty(&uhci->urb_remove_list) &&
uhci_get_current_frame_number(uhci) != uhci->urb_remove_age) {
@@ -1620,14 +1533,9 @@ static void stall_callback(unsigned long ptr)
called_uhci_finish_completion = 1;
}
- head = &uhci->urb_list;
- tmp = head->next;
- while (tmp != head) {
- struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
+ list_for_each_entry(up, &uhci->urb_list, urb_list) {
struct urb *u = up->urb;
- tmp = tmp->next;
-
spin_lock(&u->lock);
/* Check if the FSBR timed out */
@@ -1642,17 +1550,6 @@ static void stall_callback(unsigned long ptr)
if (called_uhci_finish_completion)
wake_up_all(&uhci->waitqh);
- head = &list;
- tmp = head->next;
- while (tmp != head) {
- struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
- struct urb *u = up->urb;
-
- tmp = tmp->next;
-
- uhci_urb_dequeue(hcd, u);
- }
-
/* Really disable FSBR */
if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
uhci->fsbrtimeout = 0;
@@ -1661,6 +1558,8 @@ static void stall_callback(unsigned long ptr)
/* Poll for and perform state transitions */
hc_state_transitions(uhci);
+ if (unlikely(uhci->suspended_ports && uhci->state != UHCI_SUSPENDED))
+ uhci_check_resume(uhci);
init_stall_timer(hcd);
}
@@ -1680,15 +1579,9 @@ static int init_stall_timer(struct usb_hcd *hcd)
static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
{
- struct list_head *tmp, *head;
-
- head = &uhci->qh_remove_list;
- tmp = head->next;
- while (tmp != head) {
- struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
-
- tmp = tmp->next;
+ struct uhci_qh *qh, *tmp;
+ list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
list_del_init(&qh->remove_list);
uhci_free_qh(uhci, qh);
@@ -1697,15 +1590,9 @@ static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
static void uhci_free_pending_tds(struct uhci_hcd *uhci)
{
- struct list_head *tmp, *head;
-
- head = &uhci->td_remove_list;
- tmp = head->next;
- while (tmp != head) {
- struct uhci_td *td = list_entry(tmp, struct uhci_td, remove_list);
-
- tmp = tmp->next;
+ struct uhci_td *td, *tmp;
+ list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
list_del_init(&td->remove_list);
uhci_free_td(uhci, td);
@@ -1726,19 +1613,13 @@ static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs
static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
- struct list_head *tmp, *head;
+ struct urb_priv *urbp, *tmp;
- head = &uhci->complete_list;
- tmp = head->next;
- while (tmp != head) {
- struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
+ list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
struct urb *urb = urbp->urb;
list_del_init(&urbp->urb_list);
uhci_finish_urb(hcd, urb, regs);
-
- head = &uhci->complete_list;
- tmp = head->next;
}
}
@@ -1754,7 +1635,7 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long io_addr = uhci->io_addr;
unsigned short status;
- struct list_head *tmp, *head;
+ struct urb_priv *urbp, *tmp;
unsigned int age;
/*
@@ -1801,15 +1682,11 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
else
uhci_set_next_interrupt(uhci);
- /* Walk the list of pending URB's to see which ones completed */
- head = &uhci->urb_list;
- tmp = head->next;
- while (tmp != head) {
- struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
+ /* Walk the list of pending URBs to see which ones completed
+ * (must be _safe because uhci_transfer_result() dequeues URBs) */
+ list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
struct urb *urb = urbp->urb;
- tmp = tmp->next;
-
/* Checks the status and does all of the magic necessary */
uhci_transfer_result(uhci, urb);
}
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 785c0c2cca12..ea9bd98b5edc 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -352,6 +352,12 @@ struct uhci_hcd {
int resume_detect; /* Need a Global Resume */
unsigned int saved_framenumber; /* Save during PM suspend */
+ /* Support for port suspend/resume */
+ unsigned long port_c_suspend; /* Bit-arrays of ports */
+ unsigned long suspended_ports;
+ unsigned long resuming_ports;
+ unsigned long resume_timeout; /* Time to stop signalling */
+
/* Main list of URB's currently controlled by this HC */
struct list_head urb_list; /* P: uhci->schedule_lock */
@@ -385,12 +391,12 @@ struct urb_priv {
struct uhci_qh *qh; /* QH for this URB */
struct list_head td_list; /* P: urb->lock */
- int fsbr : 1; /* URB turned on FSBR */
- int fsbr_timeout : 1; /* URB timed out on FSBR */
- int queued : 1; /* QH was queued (not linked in) */
- int short_control_packet : 1; /* If we get a short packet during */
- /* a control transfer, retrigger */
- /* the status phase */
+ unsigned fsbr : 1; /* URB turned on FSBR */
+ unsigned fsbr_timeout : 1; /* URB timed out on FSBR */
+ unsigned queued : 1; /* QH was queued (not linked in) */
+ unsigned short_control_packet : 1; /* If we get a short packet during */
+ /* a control transfer, retrigger */
+ /* the status phase */
unsigned long inserttime; /* In jiffies */
unsigned long fsbrtime; /* In jiffies */
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 22bbd70063ce..f44d90e23df2 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -36,13 +36,13 @@ static __u8 root_hub_hub_des[] =
static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
- unsigned long io_addr = uhci->io_addr;
- int i;
+ int port;
*buf = 0;
- for (i = 0; i < uhci->rh_numports; i++) {
- if (inw(io_addr + USBPORTSC1 + i * 2) & RWC_BITS)
- *buf |= (1 << (i + 1));
+ for (port = 0; port < uhci->rh_numports; ++port) {
+ if ((inw(uhci->io_addr + USBPORTSC1 + port * 2) & RWC_BITS) ||
+ test_bit(port, &uhci->port_c_suspend))
+ *buf |= (1 << (port + 1));
}
return !!*buf;
}
@@ -62,31 +62,67 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
status &= ~(RWC_BITS|WZ_BITS); \
outw(status, port_addr)
+/* UHCI controllers don't automatically stop resume signalling after 20 msec,
+ * so we have to poll and check timeouts in order to take care of it.
+ * FIXME: Synchronize access to these fields by a spinlock.
+ */
+static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
+ unsigned int port_addr)
+{
+ int status;
+
+ if (test_bit(port, &uhci->suspended_ports)) {
+ CLR_RH_PORTSTAT(USBPORTSC_SUSP | USBPORTSC_RD);
+ clear_bit(port, &uhci->suspended_ports);
+ clear_bit(port, &uhci->resuming_ports);
+ set_bit(port, &uhci->port_c_suspend);
+ }
+}
+
+static void uhci_check_resume(struct uhci_hcd *uhci)
+{
+ unsigned int port;
+ unsigned int port_addr;
+
+ for (port = 0; port < uhci->rh_numports; ++port) {
+ port_addr = uhci->io_addr + USBPORTSC1 + 2 * port;
+ if (unlikely(inw(port_addr) & USBPORTSC_RD)) {
+ if (!test_bit(port, &uhci->resuming_ports)) {
+
+ /* Port received a wakeup request */
+ set_bit(port, &uhci->resuming_ports);
+ uhci->resume_timeout = jiffies +
+ msecs_to_jiffies(20);
+ } else if (time_after_eq(jiffies,
+ uhci->resume_timeout)) {
+ uhci_finish_suspend(uhci, port, port_addr);
+ }
+ }
+ }
+}
/* size of returned buffer is part of USB spec */
static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
- int status, retval = 0, len = 0;
- unsigned long port_addr = uhci->io_addr + USBPORTSC1 + 2 * (wIndex-1);
- __u16 wPortChange, wPortStatus;
+ int status, lstatus, retval = 0, len = 0;
+ unsigned int port = wIndex - 1;
+ unsigned long port_addr = uhci->io_addr + USBPORTSC1 + 2 * port;
+ u16 wPortChange, wPortStatus;
switch (typeReq) {
- /* Request Destination:
- without flags: Device,
- RH_INTERFACE: interface,
- RH_ENDPOINT: endpoint,
- RH_CLASS means HUB here,
- RH_OTHER | RH_CLASS almost ever means HUB_PORT here
- */
case GetHubStatus:
*(__le32 *)buf = cpu_to_le32(0);
OK(4); /* hub power */
case GetPortStatus:
- if (!wIndex || wIndex > uhci->rh_numports)
+ if (port >= uhci->rh_numports)
goto err;
+
+ if (uhci->resuming_ports)
+ uhci_check_resume(uhci);
+
status = inw(port_addr);
/* Intel controllers report the OverCurrent bit active on.
@@ -97,34 +133,43 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
PCI_VENDOR_ID_VIA)
status ^= USBPORTSC_OC;
- /* UHCI doesn't support C_SUSPEND and C_RESET (always false) */
- wPortChange = 0;
+ /* UHCI doesn't support C_RESET (always false) */
+ wPortChange = lstatus = 0;
if (status & USBPORTSC_CSC)
- wPortChange |= 1 << (USB_PORT_FEAT_C_CONNECTION - 16);
+ wPortChange |= USB_PORT_STAT_C_CONNECTION;
if (status & USBPORTSC_PEC)
- wPortChange |= 1 << (USB_PORT_FEAT_C_ENABLE - 16);
+ wPortChange |= USB_PORT_STAT_C_ENABLE;
if (status & USBPORTSC_OCC)
- wPortChange |= 1 << (USB_PORT_FEAT_C_OVER_CURRENT - 16);
+ wPortChange |= USB_PORT_STAT_C_OVERCURRENT;
+
+ if (test_bit(port, &uhci->port_c_suspend)) {
+ wPortChange |= USB_PORT_STAT_C_SUSPEND;
+ lstatus |= 1;
+ }
+ if (test_bit(port, &uhci->suspended_ports))
+ lstatus |= 2;
+ if (test_bit(port, &uhci->resuming_ports))
+ lstatus |= 4;
/* UHCI has no power switching (always on) */
- wPortStatus = 1 << USB_PORT_FEAT_POWER;
+ wPortStatus = USB_PORT_STAT_POWER;
if (status & USBPORTSC_CCS)
- wPortStatus |= 1 << USB_PORT_FEAT_CONNECTION;
+ wPortStatus |= USB_PORT_STAT_CONNECTION;
if (status & USBPORTSC_PE) {
- wPortStatus |= 1 << USB_PORT_FEAT_ENABLE;
+ wPortStatus |= USB_PORT_STAT_ENABLE;
if (status & (USBPORTSC_SUSP | USBPORTSC_RD))
- wPortStatus |= 1 << USB_PORT_FEAT_SUSPEND;
+ wPortStatus |= USB_PORT_STAT_SUSPEND;
}
if (status & USBPORTSC_OC)
- wPortStatus |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ wPortStatus |= USB_PORT_STAT_OVERCURRENT;
if (status & USBPORTSC_PR)
- wPortStatus |= 1 << USB_PORT_FEAT_RESET;
+ wPortStatus |= USB_PORT_STAT_RESET;
if (status & USBPORTSC_LSDA)
- wPortStatus |= 1 << USB_PORT_FEAT_LOWSPEED;
+ wPortStatus |= USB_PORT_STAT_LOW_SPEED;
if (wPortChange)
- dev_dbg(uhci_dev(uhci), "port %d portsc %04x\n",
- wIndex, status);
+ dev_dbg(uhci_dev(uhci), "port %d portsc %04x,%02x\n",
+ wIndex, status, lstatus);
*(__le16 *)buf = cpu_to_le16(wPortStatus);
*(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
@@ -140,11 +185,12 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
break;
case SetPortFeature:
- if (!wIndex || wIndex > uhci->rh_numports)
+ if (port >= uhci->rh_numports)
goto err;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
+ set_bit(port, &uhci->suspended_ports);
SET_RH_PORTSTAT(USBPORTSC_SUSP);
OK(0);
case USB_PORT_FEAT_RESET:
@@ -152,6 +198,9 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
mdelay(50); /* USB v1.1 7.1.7.3 */
CLR_RH_PORTSTAT(USBPORTSC_PR);
udelay(10);
+
+ /* Reset terminates Resume signalling */
+ uhci_finish_suspend(uhci, port, port_addr);
SET_RH_PORTSTAT(USBPORTSC_PE);
mdelay(10);
CLR_RH_PORTSTAT(USBPORTSC_PEC|USBPORTSC_CSC);
@@ -164,21 +213,38 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
break;
case ClearPortFeature:
- if (!wIndex || wIndex > uhci->rh_numports)
+ if (port >= uhci->rh_numports)
goto err;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
CLR_RH_PORTSTAT(USBPORTSC_PE);
+
+ /* Disable terminates Resume signalling */
+ uhci_finish_suspend(uhci, port, port_addr);
OK(0);
case USB_PORT_FEAT_C_ENABLE:
CLR_RH_PORTSTAT(USBPORTSC_PEC);
OK(0);
case USB_PORT_FEAT_SUSPEND:
- CLR_RH_PORTSTAT(USBPORTSC_SUSP);
+ if (test_bit(port, &uhci->suspended_ports) &&
+ !test_and_set_bit(port,
+ &uhci->resuming_ports)) {
+ uhci->resume_timeout = jiffies +
+ msecs_to_jiffies(20);
+ SET_RH_PORTSTAT(USBPORTSC_RD);
+
+ /* The controller won't allow RD to be set
+ * if the port is disabled. When this happens
+ * just skip the Resume signalling.
+ */
+ if (!(inw(port_addr) & USBPORTSC_RD))
+ uhci_finish_suspend(uhci, port,
+ port_addr);
+ }
OK(0);
case USB_PORT_FEAT_C_SUSPEND:
- /* this driver won't report these */
+ clear_bit(port, &uhci->port_c_suspend);
OK(0);
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
diff --git a/drivers/usb/image/Kconfig b/drivers/usb/image/Kconfig
index 0b0f80e4f9a7..b541b67d2eb6 100644
--- a/drivers/usb/image/Kconfig
+++ b/drivers/usb/image/Kconfig
@@ -30,11 +30,12 @@ config USB_MICROTEK
This driver can be compiled as a module, called microtek.
config USB_HPUSBSCSI
- tristate "HP53xx USB scanner support (EXPERIMENTAL)"
- depends on USB && SCSI && EXPERIMENTAL
+ tristate "HP53xx USB scanner support"
+ depends on USB && SCSI
help
Say Y here if you want support for the HP 53xx series of scanners
- and the Minolta Scan Dual. This driver is experimental.
+ and the Minolta Scan Dual.
The scanner will be accessible as a SCSI device.
+ Please note that recent versions of SANE use usbfs, not this driver.
This can be compiled as a module, called hpusbscsi.
diff --git a/drivers/usb/image/hpusbscsi.c b/drivers/usb/image/hpusbscsi.c
index dabc3666aee7..47a864b29f3d 100644
--- a/drivers/usb/image/hpusbscsi.c
+++ b/drivers/usb/image/hpusbscsi.c
@@ -106,7 +106,7 @@ hpusbscsi_usb_probe(struct usb_interface *intf,
/* In host->hostdata we store a pointer to desc */
new->host = scsi_host_alloc(&hpusbscsi_scsi_host_template, sizeof(new));
if (!new->host)
- goto out_unlink_controlurb;
+ goto out_kill_controlurb;
new->host->hostdata[0] = (unsigned long)new;
scsi_add_host(new->host, &intf->dev); /* XXX handle failure */
@@ -118,8 +118,8 @@ hpusbscsi_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, new);
return 0;
- out_unlink_controlurb:
- usb_unlink_urb(new->controlurb);
+ out_kill_controlurb:
+ usb_kill_urb(new->controlurb);
out_free_controlurb:
usb_free_urb(new->controlurb);
out_free_dataurb:
@@ -137,7 +137,7 @@ hpusbscsi_usb_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
scsi_remove_host(desc->host);
- usb_unlink_urb(desc->controlurb);
+ usb_kill_urb(desc->controlurb);
scsi_host_put(desc->host);
usb_free_urb(desc->controlurb);
@@ -280,8 +280,8 @@ static int hpusbscsi_scsi_abort (Scsi_Cmnd *srb)
struct hpusbscsi* hpusbscsi = (struct hpusbscsi*)(srb->device->host->hostdata[0]);
printk(KERN_DEBUG"Requested is canceled.\n");
- usb_unlink_urb(hpusbscsi->dataurb);
- usb_unlink_urb(hpusbscsi->controlurb);
+ usb_kill_urb(hpusbscsi->dataurb);
+ usb_kill_urb(hpusbscsi->controlurb);
hpusbscsi->state = HP_STATE_FREE;
return SCSI_ABORT_PENDING;
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index f401557f0356..66bb13307df5 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -317,7 +317,6 @@ static int mdc800_usb_waitForIRQ (int mode, int msec)
mdc800->camera_request_ready=1+mode;
add_wait_queue(&mdc800->irq_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
timeout = msec*HZ/1000;
while (!mdc800->irq_woken && timeout)
{
@@ -325,7 +324,6 @@ static int mdc800_usb_waitForIRQ (int mode, int msec)
timeout = schedule_timeout (timeout);
}
remove_wait_queue(&mdc800->irq_wait, &wait);
- set_current_state(TASK_RUNNING);
mdc800->irq_woken = 0;
if (mdc800->camera_request_ready>0)
@@ -543,9 +541,9 @@ static void mdc800_usb_disconnect (struct usb_interface *intf)
mdc800->state=NOT_CONNECTED;
- usb_unlink_urb (mdc800->irq_urb);
- usb_unlink_urb (mdc800->write_urb);
- usb_unlink_urb (mdc800->download_urb);
+ usb_kill_urb(mdc800->irq_urb);
+ usb_kill_urb(mdc800->write_urb);
+ usb_kill_urb(mdc800->download_urb);
mdc800->dev = NULL;
usb_set_intfdata(intf, NULL);
@@ -649,9 +647,9 @@ static int mdc800_device_release (struct inode* inode, struct file *file)
down (&mdc800->io_lock);
if (mdc800->open && (mdc800->state != NOT_CONNECTED))
{
- usb_unlink_urb (mdc800->irq_urb);
- usb_unlink_urb (mdc800->write_urb);
- usb_unlink_urb (mdc800->download_urb);
+ usb_kill_urb(mdc800->irq_urb);
+ usb_kill_urb(mdc800->write_urb);
+ usb_kill_urb(mdc800->download_urb);
mdc800->open=0;
}
else
@@ -725,7 +723,6 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l
set_current_state(TASK_UNINTERRUPTIBLE);
timeout = schedule_timeout (timeout);
}
- set_current_state(TASK_RUNNING);
remove_wait_queue(&mdc800->download_wait, &wait);
mdc800->downloaded = 0;
if (mdc800->download_urb->status != 0)
@@ -851,12 +848,11 @@ static ssize_t mdc800_device_write (struct file *file, const char __user *buf, s
set_current_state(TASK_UNINTERRUPTIBLE);
timeout = schedule_timeout (timeout);
}
- set_current_state(TASK_RUNNING);
remove_wait_queue(&mdc800->write_wait, &wait);
mdc800->written = 0;
if (mdc800->state == WORKING)
{
- usb_unlink_urb (mdc800->write_urb);
+ usb_kill_urb(mdc800->write_urb);
up (&mdc800->io_lock);
return -EIO;
}
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 9d3aa3075086..2a18c35629eb 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -324,7 +324,7 @@ static inline void mts_urb_abort(struct mts_desc* desc) {
MTS_DEBUG_GOT_HERE();
mts_debug_dump(desc);
- usb_unlink_urb( desc->urb );
+ usb_kill_urb( desc->urb );
}
static int mts_scsi_abort (Scsi_Cmnd *srb)
@@ -341,12 +341,18 @@ static int mts_scsi_abort (Scsi_Cmnd *srb)
static int mts_scsi_host_reset (Scsi_Cmnd *srb)
{
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
+ int result, rc;
MTS_DEBUG_GOT_HERE();
mts_debug_dump(desc);
- usb_reset_device(desc->usb_dev); /*FIXME: untested on new reset code */
- return 0; /* RANT why here 0 and not SUCCESS */
+ rc = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf);
+ if (rc < 0)
+ return FAILED;
+ result = usb_reset_device(desc->usb_dev);;
+ if (rc)
+ usb_unlock_device(desc->usb_dev);
+ return result ? FAILED : SUCCESS;
}
static
@@ -777,6 +783,7 @@ static int mts_usb_probe(struct usb_interface *intf,
goto out_kfree;
new_desc->usb_dev = dev;
+ new_desc->usb_intf = intf;
init_MUTEX(&new_desc->lock);
/* endpoints */
@@ -822,10 +829,10 @@ static void mts_usb_disconnect (struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
+ usb_kill_urb(desc->urb);
scsi_remove_host(desc->host);
- usb_unlink_urb(desc->urb);
- scsi_host_put(desc->host);
+ scsi_host_put(desc->host);
usb_free_urb(desc->urb);
kfree(desc);
}
diff --git a/drivers/usb/image/microtek.h b/drivers/usb/image/microtek.h
index 206994ddcedf..3271deb8c001 100644
--- a/drivers/usb/image/microtek.h
+++ b/drivers/usb/image/microtek.h
@@ -31,6 +31,7 @@ struct mts_desc {
struct mts_desc *prev;
struct usb_device *usb_dev;
+ struct usb_interface *usb_intf;
/* Endpoint addresses */
u8 ep_out;
diff --git a/drivers/usb/input/aiptek.c b/drivers/usb/input/aiptek.c
index 44b8faee6536..67a5b70a6daf 100644
--- a/drivers/usb/input/aiptek.c
+++ b/drivers/usb/input/aiptek.c
@@ -837,7 +837,7 @@ static void aiptek_close(struct input_dev *inputdev)
struct aiptek *aiptek = inputdev->private;
if (--aiptek->openCount == 0) {
- usb_unlink_urb(aiptek->urb);
+ usb_kill_urb(aiptek->urb);
}
}
@@ -2258,7 +2258,7 @@ static void aiptek_disconnect(struct usb_interface *intf)
if (aiptek != NULL) {
/* Free & unhook everything from the system.
*/
- usb_unlink_urb(aiptek->urb);
+ usb_kill_urb(aiptek->urb);
input_unregister_device(&aiptek->inputdev);
aiptek_delete_files(&intf->dev);
usb_free_urb(aiptek->urb);
diff --git a/drivers/usb/input/ati_remote.c b/drivers/usb/input/ati_remote.c
index 61a42bdae1d8..91f7f434dd48 100644
--- a/drivers/usb/input/ati_remote.c
+++ b/drivers/usb/input/ati_remote.c
@@ -418,13 +418,14 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
while (timeout && (ati_remote->out_urb->status == -EINPROGRESS)
&& !(ati_remote->send_flags & SEND_FLAG_COMPLETE)) {
+ set_current_state(TASK_INTERRUPTIBLE);
timeout = schedule_timeout(timeout);
rmb();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&ati_remote->wait, &wait);
- usb_unlink_urb(ati_remote->out_urb);
+ usb_kill_urb(ati_remote->out_urb);
return retval;
}
@@ -624,10 +625,10 @@ static void ati_remote_delete(struct ati_remote *ati_remote)
if (!ati_remote) return;
if (ati_remote->irq_urb)
- usb_unlink_urb(ati_remote->irq_urb);
+ usb_kill_urb(ati_remote->irq_urb);
if (ati_remote->out_urb)
- usb_unlink_urb(ati_remote->out_urb);
+ usb_kill_urb(ati_remote->out_urb);
input_unregister_device(&ati_remote->idev);
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index 35baacd7706d..c38e7fccf564 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -1260,8 +1260,10 @@ int hid_wait_io(struct hid_device *hid)
add_wait_queue(&hid->wait, &wait);
while (timeout && (test_bit(HID_CTRL_RUNNING, &hid->iofl) ||
- test_bit(HID_OUT_RUNNING, &hid->iofl)))
+ test_bit(HID_OUT_RUNNING, &hid->iofl))) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
timeout = schedule_timeout(timeout);
+ }
set_current_state(TASK_RUNNING);
remove_wait_queue(&hid->wait, &wait);
@@ -1350,9 +1352,9 @@ void hid_init_reports(struct hid_device *hid)
while (ret) {
err |= ret;
if (test_bit(HID_CTRL_RUNNING, &hid->iofl))
- usb_unlink_urb(hid->urbctrl);
+ usb_kill_urb(hid->urbctrl);
if (test_bit(HID_OUT_RUNNING, &hid->iofl))
- usb_unlink_urb(hid->urbout);
+ usb_kill_urb(hid->urbout);
ret = hid_wait_io(hid);
}
@@ -1455,10 +1457,11 @@ void hid_init_reports(struct hid_device *hid)
#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101
#define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104
-#define USB_VENDOR_ID_CODEMERCS 0x07c0
-#define USB_DEVICE_ID_CODEMERCS_IOW40 0x1500
-#define USB_DEVICE_ID_CODEMERCS_IOW24 0x1501
-
+#define USB_VENDOR_ID_CODEMERCS 0x07c0
+#define USB_DEVICE_ID_CODEMERCS_IOW40 0x1500
+#define USB_DEVICE_ID_CODEMERCS_IOW24 0x1501
+#define USB_DEVICE_ID_CODEMERCS_IOW48 0x1502
+#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1503
static struct hid_blacklist {
__u16 idVendor;
@@ -1542,6 +1545,11 @@ static struct hid_blacklist {
{ USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW48, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28, HID_QUIRK_IGNORE },
+
{ 0, 0 }
};
diff --git a/drivers/usb/input/kbtab.c b/drivers/usb/input/kbtab.c
index 39c0d38aefa4..2e4c0d246030 100644
--- a/drivers/usb/input/kbtab.c
+++ b/drivers/usb/input/kbtab.c
@@ -122,7 +122,7 @@ static void kbtab_close(struct input_dev *dev)
struct kbtab *kbtab = dev->private;
if (!--kbtab->open)
- usb_unlink_urb(kbtab->irq);
+ usb_kill_urb(kbtab->irq);
}
static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -205,7 +205,7 @@ static void kbtab_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (kbtab) {
- usb_unlink_urb(kbtab->irq);
+ usb_kill_urb(kbtab->irq);
input_unregister_device(&kbtab->dev);
usb_free_urb(kbtab->irq);
usb_buffer_free(interface_to_usbdev(intf), 10, kbtab->data, kbtab->data_dma);
diff --git a/drivers/usb/input/mtouchusb.c b/drivers/usb/input/mtouchusb.c
index ae5713876d4c..9dcfd7e2ffbf 100644
--- a/drivers/usb/input/mtouchusb.c
+++ b/drivers/usb/input/mtouchusb.c
@@ -155,7 +155,7 @@ static void mtouchusb_close (struct input_dev *input)
struct mtouch_usb *mtouch = input->private;
if (!--mtouch->open)
- usb_unlink_urb (mtouch->irq);
+ usb_kill_urb (mtouch->irq);
}
static int mtouchusb_alloc_buffers(struct usb_device *udev, struct mtouch_usb *mtouch)
@@ -320,7 +320,7 @@ static void mtouchusb_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (mtouch) {
dbg("%s - mtouch is initialized, cleaning up", __FUNCTION__);
- usb_unlink_urb(mtouch->irq);
+ usb_kill_urb(mtouch->irq);
input_unregister_device(&mtouch->input);
usb_free_urb(mtouch->irq);
mtouchusb_free_buffers(interface_to_usbdev(intf), mtouch);
diff --git a/drivers/usb/input/pid.c b/drivers/usb/input/pid.c
index a38b0db8dcea..d1ea1f056599 100644
--- a/drivers/usb/input/pid.c
+++ b/drivers/usb/input/pid.c
@@ -56,7 +56,7 @@ static void hid_pid_exit(struct hid_device* hid)
struct hid_ff_pid *private = hid->ff_private;
if (private->urbffout) {
- usb_unlink_urb(private->urbffout);
+ usb_kill_urb(private->urbffout);
usb_free_urb(private->urbffout);
}
}
diff --git a/drivers/usb/input/powermate.c b/drivers/usb/input/powermate.c
index 0ba1bcbfb431..852ebf8574a8 100644
--- a/drivers/usb/input/powermate.c
+++ b/drivers/usb/input/powermate.c
@@ -417,7 +417,7 @@ static void powermate_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (pm) {
pm->requires_update = 0;
- usb_unlink_urb(pm->irq);
+ usb_kill_urb(pm->irq);
input_unregister_device(&pm->input);
usb_free_urb(pm->irq);
usb_free_urb(pm->config);
diff --git a/drivers/usb/input/touchkitusb.c b/drivers/usb/input/touchkitusb.c
index 4917b042ef9a..14443f926ab0 100644
--- a/drivers/usb/input/touchkitusb.c
+++ b/drivers/usb/input/touchkitusb.c
@@ -141,7 +141,7 @@ static void touchkit_close(struct input_dev *input)
struct touchkit_usb *touchkit = input->private;
if (!--touchkit->open)
- usb_unlink_urb(touchkit->irq);
+ usb_kill_urb(touchkit->irq);
}
static int touchkit_alloc_buffers(struct usb_device *udev,
@@ -276,7 +276,7 @@ static void touchkit_disconnect(struct usb_interface *intf)
dbg("%s - touchkit is initialized, cleaning up", __FUNCTION__);
usb_set_intfdata(intf, NULL);
input_unregister_device(&touchkit->input);
- usb_unlink_urb(touchkit->irq);
+ usb_kill_urb(touchkit->irq);
usb_free_urb(touchkit->irq);
touchkit_free_buffers(interface_to_usbdev(intf), touchkit);
kfree(touchkit);
diff --git a/drivers/usb/input/usbkbd.c b/drivers/usb/input/usbkbd.c
index 0c51b1eb84cf..1700f405b00b 100644
--- a/drivers/usb/input/usbkbd.c
+++ b/drivers/usb/input/usbkbd.c
@@ -196,7 +196,7 @@ static void usb_kbd_close(struct input_dev *dev)
struct usb_kbd *kbd = dev->private;
if (!--kbd->open)
- usb_unlink_urb(kbd->irq);
+ usb_kill_urb(kbd->irq);
}
static int usb_kbd_alloc_mem(struct usb_device *dev, struct usb_kbd *kbd)
@@ -343,7 +343,7 @@ static void usb_kbd_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (kbd) {
- usb_unlink_urb(kbd->irq);
+ usb_kill_urb(kbd->irq);
input_unregister_device(&kbd->dev);
usb_kbd_free_mem(interface_to_usbdev(intf), kbd);
kfree(kbd);
diff --git a/drivers/usb/input/usbmouse.c b/drivers/usb/input/usbmouse.c
index 5bf27306ea3c..8c7381b74499 100644
--- a/drivers/usb/input/usbmouse.c
+++ b/drivers/usb/input/usbmouse.c
@@ -118,7 +118,7 @@ static void usb_mouse_close(struct input_dev *dev)
struct usb_mouse *mouse = dev->private;
if (!--mouse->open)
- usb_unlink_urb(mouse->irq);
+ usb_kill_urb(mouse->irq);
}
static int usb_mouse_probe(struct usb_interface * intf, const struct usb_device_id * id)
@@ -223,7 +223,7 @@ static void usb_mouse_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (mouse) {
- usb_unlink_urb(mouse->irq);
+ usb_kill_urb(mouse->irq);
input_unregister_device(&mouse->dev);
usb_free_urb(mouse->irq);
usb_buffer_free(interface_to_usbdev(intf), 8, mouse->data, mouse->data_dma);
diff --git a/drivers/usb/input/wacom.c b/drivers/usb/input/wacom.c
index 492a5cb12af4..471d1bf68bf0 100644
--- a/drivers/usb/input/wacom.c
+++ b/drivers/usb/input/wacom.c
@@ -608,7 +608,7 @@ static void wacom_close(struct input_dev *dev)
struct wacom *wacom = dev->private;
if (!--wacom->open)
- usb_unlink_urb(wacom->irq);
+ usb_kill_urb(wacom->irq);
}
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -729,7 +729,7 @@ static void wacom_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (wacom) {
- usb_unlink_urb(wacom->irq);
+ usb_kill_urb(wacom->irq);
input_unregister_device(&wacom->dev);
usb_free_urb(wacom->irq);
usb_buffer_free(interface_to_usbdev(intf), 10, wacom->data, wacom->data_dma);
diff --git a/drivers/usb/input/xpad.c b/drivers/usb/input/xpad.c
index 1956eb06b75c..c5fa87edb667 100644
--- a/drivers/usb/input/xpad.c
+++ b/drivers/usb/input/xpad.c
@@ -214,7 +214,7 @@ static void xpad_close (struct input_dev *dev)
struct usb_xpad *xpad = dev->private;
if (!--xpad->open_count)
- usb_unlink_urb(xpad->irq_in);
+ usb_kill_urb(xpad->irq_in);
}
static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -325,7 +325,7 @@ static void xpad_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (xpad) {
- usb_unlink_urb(xpad->irq_in);
+ usb_kill_urb(xpad->irq_in);
input_unregister_device(&xpad->dev);
usb_free_urb(xpad->irq_in);
usb_buffer_free(interface_to_usbdev(intf), XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
diff --git a/drivers/usb/media/Kconfig b/drivers/usb/media/Kconfig
index 4ac7ac4f3b1d..678e5fcbe29a 100644
--- a/drivers/usb/media/Kconfig
+++ b/drivers/usb/media/Kconfig
@@ -123,11 +123,11 @@ config USB_SE401
module will be called se401.
config USB_SN9C102
- tristate "USB SN9C10[12] PC Camera Controller support"
+ tristate "USB SN9C10x PC Camera Controller support"
depends on USB && VIDEO_DEV
---help---
- Say Y here if you want support for cameras based on SONiX SN9C101
- or SN9C102 PC Camera Controllers.
+ Say Y here if you want support for cameras based on SONiX SN9C101,
+ SN9C102 or SN9C103 PC Camera Controllers.
See <file:Documentation/usb/sn9c102.txt> for more informations.
diff --git a/drivers/usb/media/dabusb.c b/drivers/usb/media/dabusb.c
index 0e5425f328d0..0aae6ecc7ad6 100644
--- a/drivers/usb/media/dabusb.c
+++ b/drivers/usb/media/dabusb.c
@@ -109,16 +109,13 @@ static void dump_urb (struct urb *urb)
static int dabusb_cancel_queue (pdabusb_t s, struct list_head *q)
{
unsigned long flags;
- struct list_head *p;
pbuff_t b;
dbg("dabusb_cancel_queue");
spin_lock_irqsave (&s->lock, flags);
- for (p = q->next; p != q; p = p->next) {
- b = list_entry (p, buff_t, buff_list);
-
+ list_for_each_entry(b, q, buff_list) {
#ifdef DEBUG
dump_urb(b->purb);
#endif
@@ -598,6 +595,7 @@ static int dabusb_open (struct inode *inode, struct file *file)
if (file->f_flags & O_NONBLOCK) {
return -EBUSY;
}
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout (HZ / 2);
if (signal_pending (current)) {
diff --git a/drivers/usb/media/konicawc.c b/drivers/usb/media/konicawc.c
index 3376654ca051..aca3093c2b87 100644
--- a/drivers/usb/media/konicawc.c
+++ b/drivers/usb/media/konicawc.c
@@ -474,13 +474,8 @@ static void konicawc_stop_data(struct uvd *uvd)
/* Unschedule all of the iso td's */
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
- j = usb_unlink_urb(uvd->sbuf[i].urb);
- if (j < 0)
- err("usb_unlink_urb() error %d.", j);
-
- j = usb_unlink_urb(cam->sts_urb[i]);
- if (j < 0)
- err("usb_unlink_urb() error %d.", j);
+ usb_kill_urb(uvd->sbuf[i].urb);
+ usb_kill_urb(cam->sts_urb[i]);
}
if (!uvd->remove_pending) {
diff --git a/drivers/usb/media/ov511.c b/drivers/usb/media/ov511.c
index d040c8e30b6b..7de03331289e 100644
--- a/drivers/usb/media/ov511.c
+++ b/drivers/usb/media/ov511.c
@@ -3830,7 +3830,7 @@ ov51x_unlink_isoc(struct usb_ov511 *ov)
/* Unschedule all of the iso td's */
for (n = OV511_NUMSBUF - 1; n >= 0; n--) {
if (ov->sbuf[n].urb) {
- usb_unlink_urb(ov->sbuf[n].urb);
+ usb_kill_urb(ov->sbuf[n].urb);
usb_free_urb(ov->sbuf[n].urb);
ov->sbuf[n].urb = NULL;
}
diff --git a/drivers/usb/media/se401.c b/drivers/usb/media/se401.c
index c694caaac447..37d28640c790 100644
--- a/drivers/usb/media/se401.c
+++ b/drivers/usb/media/se401.c
@@ -514,7 +514,7 @@ static int se401_stop_stream(struct usb_se401 *se401)
se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 0, NULL, 0);
for (i=0; i<SE401_NUMSBUF; i++) if (se401->urb[i]) {
- usb_unlink_urb(se401->urb[i]);
+ usb_kill_urb(se401->urb[i]);
usb_free_urb(se401->urb[i]);
se401->urb[i]=NULL;
kfree(se401->sbuf[i].data);
@@ -883,7 +883,7 @@ static void usb_se401_remove_disconnected (struct usb_se401 *se401)
se401->dev = NULL;
for (i=0; i<SE401_NUMSBUF; i++) if (se401->urb[i]) {
- usb_unlink_urb(se401->urb[i]);
+ usb_kill_urb(se401->urb[i]);
usb_free_urb(se401->urb[i]);
se401->urb[i] = NULL;
kfree(se401->sbuf[i].data);
@@ -892,7 +892,7 @@ static void usb_se401_remove_disconnected (struct usb_se401 *se401)
kfree(se401->scratch[i].data);
}
if (se401->inturb) {
- usb_unlink_urb(se401->inturb);
+ usb_kill_urb(se401->inturb);
usb_free_urb(se401->inturb);
}
info("%s disconnected", se401->camera_name);
diff --git a/drivers/usb/media/sn9c102.h b/drivers/usb/media/sn9c102.h
index 62ff2144392c..a682dfc2f747 100644
--- a/drivers/usb/media/sn9c102.h
+++ b/drivers/usb/media/sn9c102.h
@@ -1,5 +1,5 @@
/***************************************************************************
- * V4L2 driver for SN9C10[12] PC Camera Controllers *
+ * V4L2 driver for SN9C10x PC Camera Controllers *
* *
* Copyright (C) 2004 by Luca Risolia <luca.risolia@studio.unibo.it> *
* *
@@ -49,15 +49,21 @@
/*****************************************************************************/
-#define SN9C102_MODULE_NAME "V4L2 driver for SN9C10[12] PC Camera Controllers"
+#define SN9C102_MODULE_NAME "V4L2 driver for SN9C10x PC Camera Controllers"
#define SN9C102_MODULE_AUTHOR "(C) 2004 Luca Risolia"
#define SN9C102_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
#define SN9C102_MODULE_LICENSE "GPL"
-#define SN9C102_MODULE_VERSION "1:1.08"
-#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 0, 8)
+#define SN9C102_MODULE_VERSION "1:1.12"
+#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 0, 12)
-SN9C102_ID_TABLE;
-SN9C102_SENSOR_TABLE;
+enum sn9c102_bridge {
+ BRIDGE_SN9C101 = 0x01,
+ BRIDGE_SN9C102 = 0x02,
+ BRIDGE_SN9C103 = 0x04,
+};
+
+SN9C102_ID_TABLE
+SN9C102_SENSOR_TABLE
enum sn9c102_frame_state {
F_UNUSED,
@@ -105,6 +111,7 @@ struct sn9c102_device {
struct video_device* v4ldev;
+ enum sn9c102_bridge bridge;
struct sn9c102_sensor* sensor;
struct usb_device* usbdev;
diff --git a/drivers/usb/media/sn9c102_core.c b/drivers/usb/media/sn9c102_core.c
index 9f775f74002c..6d428a5b8d03 100644
--- a/drivers/usb/media/sn9c102_core.c
+++ b/drivers/usb/media/sn9c102_core.c
@@ -1,5 +1,5 @@
/***************************************************************************
- * V4L2 driver for SN9C10[12] PC Camera Controllers *
+ * V4L2 driver for SN9C10x PC Camera Controllers *
* *
* Copyright (C) 2004 by Luca Risolia <luca.risolia@studio.unibo.it> *
* *
@@ -169,15 +169,15 @@ static u32 sn9c102_request_buffers(struct sn9c102_device* cam, u32 count)
cam->nbuffers = count;
while (cam->nbuffers > 0) {
- if ((buff = rvmalloc(cam->nbuffers * imagesize)))
+ if ((buff = rvmalloc(cam->nbuffers * PAGE_ALIGN(imagesize))))
break;
cam->nbuffers--;
}
for (i = 0; i < cam->nbuffers; i++) {
- cam->frame[i].bufmem = buff + i*imagesize;
+ cam->frame[i].bufmem = buff + i*PAGE_ALIGN(imagesize);
cam->frame[i].buf.index = i;
- cam->frame[i].buf.m.offset = i*imagesize;
+ cam->frame[i].buf.m.offset = i*PAGE_ALIGN(imagesize);
cam->frame[i].buf.length = imagesize;
cam->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
cam->frame[i].buf.sequence = 0;
@@ -388,7 +388,7 @@ sn9c102_i2c_try_raw_write(struct sn9c102_device* cam,
data[4] = data3;
data[5] = data4;
data[6] = data5;
- data[7] = 0x10;
+ data[7] = 0x14;
res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x08, 0x41,
0x08, 0, data, 8, SN9C102_CTRL_TIMEOUT);
if (res < 0)
@@ -400,7 +400,7 @@ sn9c102_i2c_try_raw_write(struct sn9c102_device* cam,
if (err)
DBG(3, "I2C write failed for %s image sensor", sensor->name)
- PDBGG("I2C write: %u bytes, data0 = 0x%02X, data1 = 0x%02X, "
+ PDBGG("I2C raw write: %u bytes, data0 = 0x%02X, data1 = 0x%02X, "
"data2 = 0x%02X, data3 = 0x%02X, data4 = 0x%02X, data5 = 0x%02X",
n, data0, data1, data2, data3, data4, data5)
@@ -634,7 +634,7 @@ static int sn9c102_start_transfer(struct sn9c102_device* cam)
struct usb_device *udev = cam->usbdev;
struct urb* urb;
const unsigned int wMaxPacketSize[] = {0, 128, 256, 384, 512,
- 680, 800, 900, 1023};
+ 680, 800, 900, 1023};
const unsigned int psz = wMaxPacketSize[SN9C102_ALTERNATE_SETTING];
s8 i, j;
int err = 0;
@@ -965,6 +965,11 @@ static ssize_t sn9c102_show_i2c_val(struct class_device* cd, char* buf)
return -ENODEV;
}
+ if (cam->sensor->slave_read_id == SN9C102_I2C_SLAVEID_UNAVAILABLE) {
+ up(&sn9c102_sysfs_lock);
+ return -ENOSYS;
+ }
+
if ((val = sn9c102_i2c_read(cam, cam->sysfs.i2c_reg)) < 0) {
up(&sn9c102_sysfs_lock);
return -EIO;
@@ -1022,15 +1027,79 @@ sn9c102_store_i2c_val(struct class_device* cd, const char* buf, size_t len)
static ssize_t
sn9c102_store_green(struct class_device* cd, const char* buf, size_t len)
{
+ struct sn9c102_device* cam;
+ enum sn9c102_bridge bridge;
ssize_t res = 0;
u8 value;
ssize_t count;
+ if (down_interruptible(&sn9c102_sysfs_lock))
+ return -ERESTARTSYS;
+
+ cam = video_get_drvdata(to_video_device(cd));
+ if (!cam) {
+ up(&sn9c102_sysfs_lock);
+ return -ENODEV;
+ }
+
+ bridge = cam->bridge;
+
+ up(&sn9c102_sysfs_lock);
+
value = sn9c102_strtou8(buf, len, &count);
- if (!count || value > 0x0f)
+ if (!count)
return -EINVAL;
- if ((res = sn9c102_store_reg(cd, "0x11", 4)) >= 0)
+ switch (bridge) {
+ case BRIDGE_SN9C101:
+ case BRIDGE_SN9C102:
+ if (value > 0x0f)
+ return -EINVAL;
+ if ((res = sn9c102_store_reg(cd, "0x11", 4)) >= 0)
+ res = sn9c102_store_val(cd, buf, len);
+ break;
+ case BRIDGE_SN9C103:
+ if (value > 0x7f)
+ return -EINVAL;
+ if ((res = sn9c102_store_reg(cd, "0x04", 4)) >= 0)
+ res = sn9c102_store_val(cd, buf, len);
+ break;
+ }
+
+ return res;
+}
+
+
+static ssize_t
+sn9c102_store_blue(struct class_device* cd, const char* buf, size_t len)
+{
+ ssize_t res = 0;
+ u8 value;
+ ssize_t count;
+
+ value = sn9c102_strtou8(buf, len, &count);
+ if (!count || value > 0x7f)
+ return -EINVAL;
+
+ if ((res = sn9c102_store_reg(cd, "0x06", 4)) >= 0)
+ res = sn9c102_store_val(cd, buf, len);
+
+ return res;
+}
+
+
+static ssize_t
+sn9c102_store_red(struct class_device* cd, const char* buf, size_t len)
+{
+ ssize_t res = 0;
+ u8 value;
+ ssize_t count;
+
+ value = sn9c102_strtou8(buf, len, &count);
+ if (!count || value > 0x7f)
+ return -EINVAL;
+
+ if ((res = sn9c102_store_reg(cd, "0x05", 4)) >= 0)
res = sn9c102_store_val(cd, buf, len);
return res;
@@ -1046,6 +1115,8 @@ static CLASS_DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR,
static CLASS_DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR,
sn9c102_show_i2c_val, sn9c102_store_i2c_val);
static CLASS_DEVICE_ATTR(green, S_IWUGO, NULL, sn9c102_store_green);
+static CLASS_DEVICE_ATTR(blue, S_IWUGO, NULL, sn9c102_store_blue);
+static CLASS_DEVICE_ATTR(red, S_IWUGO, NULL, sn9c102_store_red);
static void sn9c102_create_sysfs(struct sn9c102_device* cam)
@@ -1054,8 +1125,14 @@ static void sn9c102_create_sysfs(struct sn9c102_device* cam)
video_device_create_file(v4ldev, &class_device_attr_reg);
video_device_create_file(v4ldev, &class_device_attr_val);
- video_device_create_file(v4ldev, &class_device_attr_green);
- if (cam->sensor->slave_write_id && cam->sensor->slave_read_id) {
+ if (cam->bridge == BRIDGE_SN9C101 || cam->bridge == BRIDGE_SN9C102)
+ video_device_create_file(v4ldev, &class_device_attr_green);
+ else if (cam->bridge == BRIDGE_SN9C103) {
+ video_device_create_file(v4ldev, &class_device_attr_blue);
+ video_device_create_file(v4ldev, &class_device_attr_red);
+ }
+ if (cam->sensor->slave_write_id != SN9C102_I2C_SLAVEID_UNAVAILABLE ||
+ cam->sensor->slave_read_id != SN9C102_I2C_SLAVEID_UNAVAILABLE) {
video_device_create_file(v4ldev, &class_device_attr_i2c_reg);
video_device_create_file(v4ldev, &class_device_attr_i2c_val);
}
@@ -1092,21 +1169,13 @@ static int sn9c102_set_crop(struct sn9c102_device* cam, struct v4l2_rect* rect)
u8 h_start = (u8)(rect->left - s->cropcap.bounds.left),
v_start = (u8)(rect->top - s->cropcap.bounds.top),
h_size = (u8)(rect->width / 16),
- v_size = (u8)(rect->height / 16),
- ae_strx = 0x00,
- ae_stry = 0x00,
- ae_endx = h_size / 2,
- ae_endy = v_size / 2;
+ v_size = (u8)(rect->height / 16);
int err = 0;
err += sn9c102_write_reg(cam, h_start, 0x12);
err += sn9c102_write_reg(cam, v_start, 0x13);
err += sn9c102_write_reg(cam, h_size, 0x15);
err += sn9c102_write_reg(cam, v_size, 0x16);
- err += sn9c102_write_reg(cam, ae_strx, 0x1c);
- err += sn9c102_write_reg(cam, ae_stry, 0x1d);
- err += sn9c102_write_reg(cam, ae_endx, 0x1e);
- err += sn9c102_write_reg(cam, ae_endy, 0x1f);
if (err)
return -EIO;
@@ -1636,16 +1705,21 @@ static int sn9c102_v4l2_ioctl(struct inode* inode, struct file* filp,
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- if ((err = s->set_ctrl(cam, &ctrl)))
- return err;
-
n = sizeof(s->qctrl) / sizeof(s->qctrl[0]);
for (i = 0; i < n; i++)
if (ctrl.id == s->qctrl[i].id) {
- s->_qctrl[i].default_value = ctrl.value;
+ if (ctrl.value < s->qctrl[i].minimum ||
+ ctrl.value > s->qctrl[i].maximum)
+ return -ERANGE;
+ ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
+ if ((err = s->set_ctrl(cam, &ctrl)))
+ return err;
+
+ s->_qctrl[i].default_value = ctrl.value;
+
return 0;
}
@@ -1776,7 +1850,7 @@ static int sn9c102_v4l2_ioctl(struct inode* inode, struct file* filp,
DBG(1, "VIDIOC_S_CROP failed because of hardware "
"problems. To use the camera, close and open "
"/dev/video%d again.", cam->v4ldev->minor)
- return err;
+ return -EIO;
}
s->pix_format.width = rect->width/scale;
@@ -1951,7 +2025,7 @@ static int sn9c102_v4l2_ioctl(struct inode* inode, struct file* filp,
DBG(1, "VIDIOC_S_FMT failed because of hardware "
"problems. To use the camera, close and open "
"/dev/video%d again.", cam->v4ldev->minor)
- return err;
+ return -EIO;
}
memcpy(pfmt, pix, sizeof(*pix));
@@ -2286,16 +2360,28 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
r = sn9c102_read_reg(cam, 0x00);
if (r < 0 || r != 0x10) {
- DBG(1, "Sorry, this is not a SN9C10[12] based camera "
+ DBG(1, "Sorry, this is not a SN9C10x based camera "
"(vid/pid 0x%04X/0x%04X)",
sn9c102_id_table[i].idVendor,sn9c102_id_table[i].idProduct)
err = -ENODEV;
goto fail;
}
- DBG(2, "SN9C10[12] PC Camera Controller detected "
- "(vid/pid 0x%04X/0x%04X)",
- sn9c102_id_table[i].idVendor, sn9c102_id_table[i].idProduct)
+ cam->bridge = (sn9c102_id_table[i].idProduct & 0xffc0) == 0x6080 ?
+ BRIDGE_SN9C103 : BRIDGE_SN9C102;
+ switch (cam->bridge) {
+ case BRIDGE_SN9C101:
+ case BRIDGE_SN9C102:
+ DBG(2, "SN9C10[12] PC Camera Controller detected "
+ "(vid/pid 0x%04X/0x%04X)", sn9c102_id_table[i].idVendor,
+ sn9c102_id_table[i].idProduct)
+ break;
+ case BRIDGE_SN9C103:
+ DBG(2, "SN9C103 PC Camera Controller detected "
+ "(vid/pid 0x%04X/0x%04X)", sn9c102_id_table[i].idVendor,
+ sn9c102_id_table[i].idProduct)
+ break;
+ }
for (i = 0; sn9c102_sensor_table[i]; i++) {
err = sn9c102_sensor_table[i](cam);
@@ -2318,7 +2404,7 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
cam->state |= DEV_MISCONFIGURED;
}
- strcpy(cam->v4ldev->name, "SN9C10[12] PC Camera");
+ strcpy(cam->v4ldev->name, "SN9C10x PC Camera");
cam->v4ldev->owner = THIS_MODULE;
cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
cam->v4ldev->hardware = VID_HARDWARE_SN9C102;
diff --git a/drivers/usb/media/sn9c102_pas106b.c b/drivers/usb/media/sn9c102_pas106b.c
index a302a4845893..8453409ea125 100644
--- a/drivers/usb/media/sn9c102_pas106b.c
+++ b/drivers/usb/media/sn9c102_pas106b.c
@@ -1,5 +1,5 @@
/***************************************************************************
- * Driver for PAS106B image sensor connected to the SN9C10[12] PC Camera *
+ * Driver for PAS106B image sensor connected to the SN9C10x PC Camera *
* Controllers *
* *
* Copyright (C) 2004 by Luca Risolia <luca.risolia@studio.unibo.it> *
@@ -100,26 +100,26 @@ static int pas106b_set_ctrl(struct sn9c102_device* cam,
switch (ctrl->id) {
case V4L2_CID_RED_BALANCE:
- err += sn9c102_i2c_write(cam, 0x0c, ctrl->value & 0x1f);
+ err += sn9c102_i2c_write(cam, 0x0c, ctrl->value);
break;
case V4L2_CID_BLUE_BALANCE:
- err += sn9c102_i2c_write(cam, 0x09, ctrl->value & 0x1f);
+ err += sn9c102_i2c_write(cam, 0x09, ctrl->value);
break;
case V4L2_CID_GAIN:
- err += sn9c102_i2c_write(cam, 0x0e, ctrl->value & 0x1f);
+ err += sn9c102_i2c_write(cam, 0x0e, ctrl->value);
break;
case V4L2_CID_BRIGHTNESS:
- err += sn9c102_i2c_write(cam, 0x0d, 0x1f-(ctrl->value & 0x1f));
+ err += sn9c102_i2c_write(cam, 0x0d, 0x1f - ctrl->value);
break;
case V4L2_CID_CONTRAST:
- err += sn9c102_i2c_write(cam, 0x0f, ctrl->value & 0x03);
+ err += sn9c102_i2c_write(cam, 0x0f, ctrl->value);
break;
default:
return -EINVAL;
}
err += sn9c102_i2c_write(cam, 0x13, 0x01);
- return err;
+ return err ? -EIO : 0;
}
diff --git a/drivers/usb/media/sn9c102_pas202bcb.c b/drivers/usb/media/sn9c102_pas202bcb.c
index 26944eaf85d0..72063e885871 100644
--- a/drivers/usb/media/sn9c102_pas202bcb.c
+++ b/drivers/usb/media/sn9c102_pas202bcb.c
@@ -1,5 +1,5 @@
/***************************************************************************
- * Driver for PAS202BCB image sensor connected to the SN9C10[12] PC Camera *
+ * Driver for PAS202BCB image sensor connected to the SN9C10x PC Camera *
* Controllers *
* *
* Copyright (C) 2004 by Carlos Eduardo Medaglia Dyonisio *
@@ -36,18 +36,19 @@ static int pas202bcb_init(struct sn9c102_device* cam)
err += sn9c102_write_reg(cam, 0x00, 0x11);
err += sn9c102_write_reg(cam, 0x00, 0x14);
err += sn9c102_write_reg(cam, 0x20, 0x17);
- err += sn9c102_write_reg(cam, 0x20, 0x19);
+ err += sn9c102_write_reg(cam, 0x30, 0x19);
err += sn9c102_write_reg(cam, 0x09, 0x18);
- err += sn9c102_i2c_write(cam, 0x02, 0x0c);
+ err += sn9c102_i2c_write(cam, 0x02, 0x14);
err += sn9c102_i2c_write(cam, 0x03, 0x40);
err += sn9c102_i2c_write(cam, 0x04, 0x07);
err += sn9c102_i2c_write(cam, 0x05, 0x25);
err += sn9c102_i2c_write(cam, 0x0d, 0x2c);
err += sn9c102_i2c_write(cam, 0x0e, 0x01);
err += sn9c102_i2c_write(cam, 0x0f, 0xa9);
- err += sn9c102_i2c_write(cam, 0x08, 0x01);
+ err += sn9c102_i2c_write(cam, 0x10, 0x08);
err += sn9c102_i2c_write(cam, 0x0b, 0x01);
+ err += sn9c102_i2c_write(cam, 0x0c, 0x04);
err += sn9c102_i2c_write(cam, 0x13, 0x63);
err += sn9c102_i2c_write(cam, 0x15, 0x70);
err += sn9c102_i2c_write(cam, 0x11, 0x01);
@@ -95,23 +96,23 @@ static int pas202bcb_set_ctrl(struct sn9c102_device* cam,
switch (ctrl->id) {
case V4L2_CID_RED_BALANCE:
- err += sn9c102_i2c_write(cam, 0x09, ctrl->value & 0x0f);
+ err += sn9c102_i2c_write(cam, 0x09, ctrl->value);
break;
case V4L2_CID_BLUE_BALANCE:
- err += sn9c102_i2c_write(cam, 0x07, ctrl->value & 0x0f);
+ err += sn9c102_i2c_write(cam, 0x07, ctrl->value);
break;
case V4L2_CID_GAIN:
- err += sn9c102_i2c_write(cam, 0x10, ctrl->value & 0x1f);
+ err += sn9c102_i2c_write(cam, 0x10, ctrl->value);
break;
case V4L2_CID_BRIGHTNESS:
- err += sn9c102_i2c_write(cam, 0x06, 0x0f-(ctrl->value & 0x0f));
+ err += sn9c102_i2c_write(cam, 0x06, 0x0f - ctrl->value);
break;
default:
return -EINVAL;
}
err += sn9c102_i2c_write(cam, 0x11, 0x01);
- return err;
+ return err ? -EIO : 0;
}
@@ -217,7 +218,7 @@ int sn9c102_probe_pas202bcb(struct sn9c102_device* cam)
* NOTE: do NOT change the values!
*/
err += sn9c102_write_reg(cam, 0x01, 0x01); /* sensor power down */
- err += sn9c102_write_reg(cam, 0x00, 0x01); /* sensor power on */
+ err += sn9c102_write_reg(cam, 0x40, 0x01); /* sensor power on */
err += sn9c102_write_reg(cam, 0x28, 0x17); /* sensor clock at 24 MHz */
if (err)
return -EIO;
diff --git a/drivers/usb/media/sn9c102_sensor.h b/drivers/usb/media/sn9c102_sensor.h
index 3e7e4a2578bc..01bcad14ca6d 100644
--- a/drivers/usb/media/sn9c102_sensor.h
+++ b/drivers/usb/media/sn9c102_sensor.h
@@ -1,5 +1,5 @@
/***************************************************************************
- * API for image sensors connected to the SN9C10[12] PC Camera Controllers *
+ * API for image sensors connected to the SN9C10x PC Camera Controllers *
* *
* Copyright (C) 2004 by Luca Risolia <luca.risolia@studio.unibo.it> *
* *
@@ -89,17 +89,44 @@ sn9c102_attach_sensor(struct sn9c102_device* cam,
/* Each SN9C10X camera has proper PID/VID identifiers. Add them here in case.*/
#define SN9C102_ID_TABLE \
static const struct usb_device_id sn9c102_id_table[] = { \
- { USB_DEVICE(0xc45, 0x6001), }, /* TAS5110C1B */ \
- { USB_DEVICE(0xc45, 0x6005), }, /* TAS5110C1B */ \
- { USB_DEVICE(0xc45, 0x6009), }, /* PAS106B */ \
- { USB_DEVICE(0xc45, 0x600d), }, /* PAS106B */ \
- { USB_DEVICE(0xc45, 0x6024), }, \
- { USB_DEVICE(0xc45, 0x6025), }, /* TAS5130D1B and TAS5110C1B */ \
- { USB_DEVICE(0xc45, 0x6028), }, /* PAS202BCB */ \
- { USB_DEVICE(0xc45, 0x6029), }, /* PAS106B */ \
- { USB_DEVICE(0xc45, 0x602a), }, /* HV7131[D|E1] */ \
- { USB_DEVICE(0xc45, 0x602c), }, /* OV7620 */ \
- { USB_DEVICE(0xc45, 0x6030), }, /* MI03 */ \
+ { USB_DEVICE(0x0c45, 0x6001), }, /* TAS5110C1B */ \
+ { USB_DEVICE(0x0c45, 0x6005), }, /* TAS5110C1B */ \
+ { USB_DEVICE(0x0c45, 0x6009), }, /* PAS106B */ \
+ { USB_DEVICE(0x0c45, 0x600d), }, /* PAS106B */ \
+ { USB_DEVICE(0x0c45, 0x6024), }, \
+ { USB_DEVICE(0x0c45, 0x6025), }, /* TAS5130D1B and TAS5110C1B */ \
+ { USB_DEVICE(0x0c45, 0x6028), }, /* PAS202BCB */ \
+ { USB_DEVICE(0x0c45, 0x6029), }, /* PAS106B */ \
+ { USB_DEVICE(0x0c45, 0x602a), }, /* HV7131[D|E1] */ \
+ { USB_DEVICE(0x0c45, 0x602b), }, \
+ { USB_DEVICE(0x0c45, 0x602c), }, /* OV7620 */ \
+ { USB_DEVICE(0x0c45, 0x6030), }, /* MI03x */ \
+ { USB_DEVICE(0x0c45, 0x6080), }, \
+ { USB_DEVICE(0x0c45, 0x6082), }, /* MI0343 and MI0360 */ \
+ { USB_DEVICE(0x0c45, 0x6083), }, /* HV7131[D|E1] */ \
+ { USB_DEVICE(0x0c45, 0x6088), }, \
+ { USB_DEVICE(0x0c45, 0x608a), }, \
+ { USB_DEVICE(0x0c45, 0x608b), }, \
+ { USB_DEVICE(0x0c45, 0x608c), }, /* HV7131x */ \
+ { USB_DEVICE(0x0c45, 0x608e), }, /* CIS-VF10 */ \
+ { USB_DEVICE(0x0c45, 0x608f), }, /* OV7630 */ \
+ { USB_DEVICE(0x0c45, 0x60a0), }, \
+ { USB_DEVICE(0x0c45, 0x60a2), }, \
+ { USB_DEVICE(0x0c45, 0x60a3), }, \
+ { USB_DEVICE(0x0c45, 0x60a8), }, /* PAS106B */ \
+ { USB_DEVICE(0x0c45, 0x60aa), }, /* TAS5130D1B */ \
+ { USB_DEVICE(0x0c45, 0x60ab), }, /* TAS5110C1B */ \
+ { USB_DEVICE(0x0c45, 0x60ac), }, \
+ { USB_DEVICE(0x0c45, 0x60ae), }, \
+ { USB_DEVICE(0x0c45, 0x60af), }, /* PAS202BCB */ \
+ { USB_DEVICE(0x0c45, 0x60b0), }, \
+ { USB_DEVICE(0x0c45, 0x60b2), }, \
+ { USB_DEVICE(0x0c45, 0x60b3), }, \
+ { USB_DEVICE(0x0c45, 0x60b8), }, \
+ { USB_DEVICE(0x0c45, 0x60ba), }, \
+ { USB_DEVICE(0x0c45, 0x60bb), }, \
+ { USB_DEVICE(0x0c45, 0x60bc), }, \
+ { USB_DEVICE(0x0c45, 0x60be), }, \
{ } \
};
@@ -159,6 +186,9 @@ enum sn9c102_i2c_interface {
SN9C102_I2C_3WIRES,
};
+#define SN9C102_I2C_SLAVEID_FICTITIOUS 0xff
+#define SN9C102_I2C_SLAVEID_UNAVAILABLE 0x00
+
struct sn9c102_sensor {
char name[32], /* sensor name */
maintainer[64]; /* name of the mantainer <email> */
@@ -173,9 +203,7 @@ struct sn9c102_sensor {
/*
These identifiers must be provided if the image sensor implements
- the standard I2C protocol. TASC sensors don't, although they have a
- serial interface: so this is a case where the "raw" I2C version
- could be helpful.
+ the standard I2C protocol.
*/
u8 slave_read_id, slave_write_id; /* reg. 0x09 */
@@ -214,7 +242,8 @@ struct sn9c102_sensor {
the list above. The returned value must follow the V4L2
specifications for the VIDIOC_G|C_CTRL ioctls. V4L2_CID_H|VCENTER
are not supported by this driver, so do not implement them. Also,
- passed values are NOT checked to see if they are out of bounds.
+ you don't have to check whether the passed values are out of bounds,
+ given that this is done by the core module.
*/
struct v4l2_cropcap cropcap;
diff --git a/drivers/usb/media/sn9c102_tas5110c1b.c b/drivers/usb/media/sn9c102_tas5110c1b.c
index 68e1b2e0ce18..ce8b47b59a75 100644
--- a/drivers/usb/media/sn9c102_tas5110c1b.c
+++ b/drivers/usb/media/sn9c102_tas5110c1b.c
@@ -1,6 +1,6 @@
/***************************************************************************
- * Driver for TAS5110C1B image sensor connected to the SN9C10[12] PC *
- * Camera Controllers *
+ * Driver for TAS5110C1B image sensor connected to the SN9C10x PC Camera *
+ * Controllers *
* *
* Copyright (C) 2004 by Luca Risolia <luca.risolia@studio.unibo.it> *
* *
@@ -24,6 +24,8 @@
static struct sn9c102_sensor tas5110c1b;
+static struct v4l2_control tas5110c1b_gain;
+
static int tas5110c1b_init(struct sn9c102_device* cam)
{
@@ -38,25 +40,42 @@ static int tas5110c1b_init(struct sn9c102_device* cam)
err += sn9c102_write_reg(cam, 0x06, 0x18);
err += sn9c102_write_reg(cam, 0xfb, 0x19);
- err += sn9c102_i2c_try_raw_write(cam, &tas5110c1b, 4, 0x11, 0x00, 0xc0,
- 0x80, 0, 0);
+ err += sn9c102_i2c_write(cam, 0xc0, 0x80);
return err;
}
+static int tas5110c1b_get_ctrl(struct sn9c102_device* cam,
+ struct v4l2_control* ctrl)
+{
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ctrl->value = tas5110c1b_gain.value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
static int tas5110c1b_set_ctrl(struct sn9c102_device* cam,
const struct v4l2_control* ctrl)
{
+ int err = 0;
+
switch (ctrl->id) {
case V4L2_CID_GAIN:
- return sn9c102_i2c_try_raw_write(cam, &tas5110c1b, 4, 0x11,
- 0x02, 0x20,
- 0xff - (ctrl->value & 0xff),
- 0, 0);
+ if (!(err += sn9c102_i2c_write(cam, 0x20, 0xf6 - ctrl->value)))
+ tas5110c1b_gain.value = ctrl->value;
+ break;
default:
return -EINVAL;
}
+
+ return err ? -EIO : 0;
}
@@ -85,6 +104,8 @@ static struct sn9c102_sensor tas5110c1b = {
.maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
.frequency = SN9C102_I2C_100KHZ,
.interface = SN9C102_I2C_3WIRES,
+ .slave_read_id = SN9C102_I2C_SLAVEID_UNAVAILABLE,
+ .slave_write_id = SN9C102_I2C_SLAVEID_FICTITIOUS,
.init = &tas5110c1b_init,
.qctrl = {
{
@@ -92,9 +113,9 @@ static struct sn9c102_sensor tas5110c1b = {
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "global gain",
.minimum = 0x00,
- .maximum = 0xff,
+ .maximum = 0xf6,
.step = 0x01,
- .default_value = 0x48,
+ .default_value = 0x40,
.flags = 0,
},
},
@@ -113,6 +134,7 @@ static struct sn9c102_sensor tas5110c1b = {
.height = 288,
},
},
+ .get_ctrl = &tas5110c1b_get_ctrl,
.set_crop = &tas5110c1b_set_crop,
.pix_format = {
.width = 352,
@@ -130,7 +152,8 @@ int sn9c102_probe_tas5110c1b(struct sn9c102_device* cam)
/* At the moment, sensor detection is based on USB pid/vid */
if (tas5110c1b.usbdev->descriptor.idProduct != 0x6001 &&
- tas5110c1b.usbdev->descriptor.idProduct != 0x6005)
+ tas5110c1b.usbdev->descriptor.idProduct != 0x6005 &&
+ tas5110c1b.usbdev->descriptor.idProduct != 0x60ab)
return -ENODEV;
return 0;
diff --git a/drivers/usb/media/sn9c102_tas5130d1b.c b/drivers/usb/media/sn9c102_tas5130d1b.c
index 0bab19435399..0048e9c20d80 100644
--- a/drivers/usb/media/sn9c102_tas5130d1b.c
+++ b/drivers/usb/media/sn9c102_tas5130d1b.c
@@ -1,6 +1,6 @@
/***************************************************************************
- * Driver for TAS5130D1B image sensor connected to the SN9C10[12] PC *
- * Camera Controllers *
+ * Driver for TAS5130D1B image sensor connected to the SN9C10x PC Camera *
+ * Controllers *
* *
* Copyright (C) 2004 by Luca Risolia <luca.risolia@studio.unibo.it> *
* *
@@ -24,6 +24,8 @@
static struct sn9c102_sensor tas5130d1b;
+static struct v4l2_control tas5130d1b_gain, tas5130d1b_exposure;
+
static int tas5130d1b_init(struct sn9c102_device* cam)
{
@@ -38,25 +40,47 @@ static int tas5130d1b_init(struct sn9c102_device* cam)
err += sn9c102_write_reg(cam, 0x60, 0x17);
err += sn9c102_write_reg(cam, 0x07, 0x18);
- err += sn9c102_i2c_try_raw_write(cam, &tas5130d1b, 4, 0x11, 0x00, 0x40,
- 0x47, 0, 0);
-
return err;
}
+static int tas5130d1b_get_ctrl(struct sn9c102_device* cam,
+ struct v4l2_control* ctrl)
+{
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ctrl->value = tas5130d1b_gain.value;
+ break;
+ case V4L2_CID_EXPOSURE:
+ ctrl->value = tas5130d1b_exposure.value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
static int tas5130d1b_set_ctrl(struct sn9c102_device* cam,
const struct v4l2_control* ctrl)
{
+ int err = 0;
+
switch (ctrl->id) {
case V4L2_CID_GAIN:
- return sn9c102_i2c_try_raw_write(cam, &tas5130d1b, 4, 0x11,
- 0x02, 0x20,
- 0xff - (ctrl->value & 0xff),
- 0, 0);
+ if (!(err += sn9c102_i2c_write(cam, 0x20, 0xf6 - ctrl->value)))
+ tas5130d1b_gain.value = ctrl->value;
+ break;
+ case V4L2_CID_EXPOSURE:
+ if (!(err += sn9c102_i2c_write(cam, 0x40, 0x47 - ctrl->value)))
+ tas5130d1b_exposure.value = ctrl->value;
+ break;
default:
return -EINVAL;
}
+
+ return err ? -EIO : 0;
}
@@ -85,6 +109,8 @@ static struct sn9c102_sensor tas5130d1b = {
.maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
.frequency = SN9C102_I2C_100KHZ,
.interface = SN9C102_I2C_3WIRES,
+ .slave_read_id = SN9C102_I2C_SLAVEID_UNAVAILABLE,
+ .slave_write_id = SN9C102_I2C_SLAVEID_FICTITIOUS,
.init = &tas5130d1b_init,
.qctrl = {
{
@@ -92,12 +118,23 @@ static struct sn9c102_sensor tas5130d1b = {
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "global gain",
.minimum = 0x00,
- .maximum = 0xff,
+ .maximum = 0xf6,
+ .step = 0x02,
+ .default_value = 0x00,
+ .flags = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .minimum = 0x00,
+ .maximum = 0x47,
.step = 0x01,
.default_value = 0x00,
.flags = 0,
},
},
+ .get_ctrl = &tas5130d1b_get_ctrl,
.set_ctrl = &tas5130d1b_set_ctrl,
.cropcap = {
.bounds = {
@@ -129,7 +166,8 @@ int sn9c102_probe_tas5130d1b(struct sn9c102_device* cam)
sn9c102_attach_sensor(cam, &tas5130d1b);
/* At the moment, sensor detection is based on USB pid/vid */
- if (tas5130d1b.usbdev->descriptor.idProduct != 0x6025)
+ if (tas5130d1b.usbdev->descriptor.idProduct != 0x6025 &&
+ tas5130d1b.usbdev->descriptor.idProduct != 0x60aa)
return -ENODEV;
return 0;
diff --git a/drivers/usb/media/stv680.c b/drivers/usb/media/stv680.c
index cd73a45d0dd0..a91870f0b412 100644
--- a/drivers/usb/media/stv680.c
+++ b/drivers/usb/media/stv680.c
@@ -725,7 +725,7 @@ static int stv680_stop_stream (struct usb_stv *stv680)
for (i = 0; i < STV680_NUMSBUF; i++)
if (stv680->urb[i]) {
- usb_unlink_urb (stv680->urb[i]);
+ usb_kill_urb (stv680->urb[i]);
usb_free_urb (stv680->urb[i]);
stv680->urb[i] = NULL;
kfree (stv680->sbuf[i].data);
@@ -1457,7 +1457,7 @@ static inline void usb_stv680_remove_disconnected (struct usb_stv *stv680)
for (i = 0; i < STV680_NUMSBUF; i++)
if (stv680->urb[i]) {
- usb_unlink_urb (stv680->urb[i]);
+ usb_kill_urb (stv680->urb[i]);
usb_free_urb (stv680->urb[i]);
stv680->urb[i] = NULL;
kfree (stv680->sbuf[i].data);
diff --git a/drivers/usb/media/usbvideo.c b/drivers/usb/media/usbvideo.c
index 1107e398d6f3..122b7accf1e1 100644
--- a/drivers/usb/media/usbvideo.c
+++ b/drivers/usb/media/usbvideo.c
@@ -1910,9 +1910,7 @@ static void usbvideo_StopDataPump(struct uvd *uvd)
/* Unschedule all of the iso td's */
for (i=0; i < USBVIDEO_NUMSBUF; i++) {
- j = usb_unlink_urb(uvd->sbuf[i].urb);
- if (j < 0)
- err("%s: usb_unlink_urb() error %d.", __FUNCTION__, j);
+ usb_kill_urb(uvd->sbuf[i].urb);
}
if (uvd->debug > 1)
info("%s: streaming=0", __FUNCTION__);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 8c958af4a1d3..d6401c0a80d5 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -121,18 +121,6 @@ config USB_CYTHERM
To compile this driver as a module, choose M here: the
module will be called cytherm.
-config USB_SPEEDTOUCH
- tristate "Alcatel Speedtouch USB support"
- depends on USB && ATM
- select CRC32
- help
- Say Y here if you have an Alcatel SpeedTouch USB or SpeedTouch 330
- modem. In order to use your modem you will need to install some user
- space tools, see <http://www.linux-usb.org/SpeedTouch/> for details.
-
- To compile this driver as a module, choose M here: the
- module will be called speedtch.
-
config USB_PHIDGETSERVO
tristate "USB PhidgetServo support"
depends on USB
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index d641a470e8d7..074a1d66250b 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_USB_LCD) += usblcd.o
obj-$(CONFIG_USB_LED) += usbled.o
obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o
obj-$(CONFIG_USB_RIO500) += rio500.o
-obj-$(CONFIG_USB_SPEEDTOUCH) += speedtch.o
obj-$(CONFIG_USB_TEST) += usbtest.o
obj-$(CONFIG_USB_TIGL) += tiglusb.o
obj-$(CONFIG_USB_USS720) += uss720.o
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index c4bebdacda5c..94842c34a4c4 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -516,7 +516,7 @@ static void auerchain_unlink_all (pauerchain_t acp)
urbp = acep->urbp;
urbp->transfer_flags &= ~URB_ASYNC_UNLINK;
dbg ("unlink active urb");
- usb_unlink_urb (urbp);
+ usb_kill_urb (urbp);
}
}
@@ -1171,22 +1171,16 @@ intoend:
endpoint. This function returns 0 if successful or an error code.
NOTE: no mutex please!
*/
-static int auerswald_int_release (pauerswald_t cp)
+static void auerswald_int_release (pauerswald_t cp)
{
- int ret = 0;
dbg ("auerswald_int_release");
/* stop the int endpoint */
- if (cp->inturbp) {
- ret = usb_unlink_urb (cp->inturbp);
- if (ret)
- dbg ("nonzero int unlink result received: %d", ret);
- }
+ if (cp->inturbp)
+ usb_kill_urb (cp->inturbp);
/* deallocate memory */
auerswald_int_free (cp);
-
- return ret;
}
/* --------------------------------------------------------------------- */
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 06c4bfa2f4b9..e83e91148970 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -505,12 +505,12 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
dev->interrupt_in_running = 0;
mb();
if (dev->interrupt_in_urb != NULL && dev->udev) {
- usb_unlink_urb (dev->interrupt_in_urb);
+ usb_kill_urb (dev->interrupt_in_urb);
}
}
if (dev->interrupt_out_busy) {
if (dev->interrupt_out_urb != NULL && dev->udev) {
- usb_unlink_urb (dev->interrupt_out_urb);
+ usb_kill_urb (dev->interrupt_out_urb);
}
}
diff --git a/drivers/usb/misc/speedtch.c b/drivers/usb/misc/speedtch.c
deleted file mode 100644
index 667e2d1b227c..000000000000
--- a/drivers/usb/misc/speedtch.c
+++ /dev/null
@@ -1,1373 +0,0 @@
-/******************************************************************************
- * speedtouch.c - Alcatel SpeedTouch USB xDSL modem driver
- *
- * Copyright (C) 2001, Alcatel
- * Copyright (C) 2003, Duncan Sands
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- ******************************************************************************/
-
-/*
- * Written by Johan Verrept, maintained by Duncan Sands (duncan.sands@free.fr)
- *
- * 1.7+: - See the check-in logs
- *
- * 1.6: - No longer opens a connection if the firmware is not loaded
- * - Added support for the speedtouch 330
- * - Removed the limit on the number of devices
- * - Module now autoloads on device plugin
- * - Merged relevant parts of sarlib
- * - Replaced the kernel thread with a tasklet
- * - New packet transmission code
- * - Changed proc file contents
- * - Fixed all known SMP races
- * - Many fixes and cleanups
- * - Various fixes by Oliver Neukum (oliver@neukum.name)
- *
- * 1.5A: - Version for inclusion in 2.5 series kernel
- * - Modifications by Richard Purdie (rpurdie@rpsys.net)
- * - made compatible with kernel 2.5.6 onwards by changing
- * udsl_usb_send_data_context->urb to a pointer and adding code
- * to alloc and free it
- * - remove_wait_queue() added to udsl_atm_processqueue_thread()
- *
- * 1.5: - fixed memory leak when atmsar_decode_aal5 returned NULL.
- * (reported by stephen.robinson@zen.co.uk)
- *
- * 1.4: - changed the spin_lock() under interrupt to spin_lock_irqsave()
- * - unlink all active send urbs of a vcc that is being closed.
- *
- * 1.3.1: - added the version number
- *
- * 1.3: - Added multiple send urb support
- * - fixed memory leak and vcc->tx_inuse starvation bug
- * when not enough memory left in vcc.
- *
- * 1.2: - Fixed race condition in udsl_usb_send_data()
- * 1.1: - Turned off packet debugging
- *
- */
-
-#include <asm/semaphore.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <asm/uaccess.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/crc32.h>
-#include <linux/init.h>
-
-/*
-#define DEBUG
-#define VERBOSE_DEBUG
-*/
-
-#if !defined (DEBUG) && defined (CONFIG_USB_DEBUG)
-# define DEBUG
-#endif
-
-#include <linux/usb.h>
-
-#ifdef DEBUG
-#define DEBUG_ON(x) BUG_ON(x)
-#else
-#define DEBUG_ON(x) do { if (x); } while (0)
-#endif
-
-#ifdef VERBOSE_DEBUG
-static int udsl_print_packet (const unsigned char *data, int len);
-#define PACKETDEBUG(arg...) udsl_print_packet (arg)
-#define vdbg(arg...) dbg (arg)
-#else
-#define PACKETDEBUG(arg...)
-#define vdbg(arg...)
-#endif
-
-#define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>"
-#define DRIVER_VERSION "1.8"
-#define DRIVER_DESC "Alcatel SpeedTouch USB driver version " DRIVER_VERSION
-
-static const char udsl_driver_name [] = "speedtch";
-
-#define SPEEDTOUCH_VENDORID 0x06b9
-#define SPEEDTOUCH_PRODUCTID 0x4061
-
-#define UDSL_MAX_RCV_URBS 4
-#define UDSL_MAX_SND_URBS 4
-#define UDSL_MAX_RCV_BUFS 8
-#define UDSL_MAX_SND_BUFS 8
-#define UDSL_MAX_RCV_BUF_SIZE 1024 /* ATM cells */
-#define UDSL_MAX_SND_BUF_SIZE 1024 /* ATM cells */
-#define UDSL_DEFAULT_RCV_URBS 2
-#define UDSL_DEFAULT_SND_URBS 2
-#define UDSL_DEFAULT_RCV_BUFS 4
-#define UDSL_DEFAULT_SND_BUFS 4
-#define UDSL_DEFAULT_RCV_BUF_SIZE 64 /* ATM cells */
-#define UDSL_DEFAULT_SND_BUF_SIZE 64 /* ATM cells */
-
-static unsigned int num_rcv_urbs = UDSL_DEFAULT_RCV_URBS;
-static unsigned int num_snd_urbs = UDSL_DEFAULT_SND_URBS;
-static unsigned int num_rcv_bufs = UDSL_DEFAULT_RCV_BUFS;
-static unsigned int num_snd_bufs = UDSL_DEFAULT_SND_BUFS;
-static unsigned int rcv_buf_size = UDSL_DEFAULT_RCV_BUF_SIZE;
-static unsigned int snd_buf_size = UDSL_DEFAULT_SND_BUF_SIZE;
-
-module_param (num_rcv_urbs, uint, 0444);
-MODULE_PARM_DESC (num_rcv_urbs, "Number of urbs used for reception (range: 0-" __MODULE_STRING (UDSL_MAX_RCV_URBS) ", default: " __MODULE_STRING (UDSL_DEFAULT_RCV_URBS) ")");
-
-module_param (num_snd_urbs, uint, 0444);
-MODULE_PARM_DESC (num_snd_urbs, "Number of urbs used for transmission (range: 0-" __MODULE_STRING (UDSL_MAX_SND_URBS) ", default: " __MODULE_STRING (UDSL_DEFAULT_SND_URBS) ")");
-
-module_param (num_rcv_bufs, uint, 0444);
-MODULE_PARM_DESC (num_rcv_bufs, "Number of buffers used for reception (range: 0-" __MODULE_STRING (UDSL_MAX_RCV_BUFS) ", default: " __MODULE_STRING (UDSL_DEFAULT_RCV_BUFS) ")");
-
-module_param (num_snd_bufs, uint, 0444);
-MODULE_PARM_DESC (num_snd_bufs, "Number of buffers used for transmission (range: 0-" __MODULE_STRING (UDSL_MAX_SND_BUFS) ", default: " __MODULE_STRING (UDSL_DEFAULT_SND_BUFS) ")");
-
-module_param (rcv_buf_size, uint, 0444);
-MODULE_PARM_DESC (rcv_buf_size, "Size of the buffers used for reception (range: 0-" __MODULE_STRING (UDSL_MAX_RCV_BUF_SIZE) ", default: " __MODULE_STRING (UDSL_DEFAULT_RCV_BUF_SIZE) ")");
-
-module_param (snd_buf_size, uint, 0444);
-MODULE_PARM_DESC (snd_buf_size, "Size of the buffers used for transmission (range: 0-" __MODULE_STRING (UDSL_MAX_SND_BUF_SIZE) ", default: " __MODULE_STRING (UDSL_DEFAULT_SND_BUF_SIZE) ")");
-
-#define UDSL_IOCTL_LINE_UP 1
-#define UDSL_IOCTL_LINE_DOWN 2
-
-#define UDSL_ENDPOINT_DATA_OUT 0x07
-#define UDSL_ENDPOINT_DATA_IN 0x87
-
-#define ATM_CELL_HEADER (ATM_CELL_SIZE - ATM_CELL_PAYLOAD)
-#define UDSL_NUM_CELLS(x) (((x) + ATM_AAL5_TRAILER + ATM_CELL_PAYLOAD - 1) / ATM_CELL_PAYLOAD)
-
-#define hex2int(c) ( (c >= '0') && (c <= '9') ? (c - '0') : ((c & 0xf) + 9) )
-
-static struct usb_device_id udsl_usb_ids [] = {
- { USB_DEVICE (SPEEDTOUCH_VENDORID, SPEEDTOUCH_PRODUCTID) },
- { }
-};
-
-MODULE_DEVICE_TABLE (usb, udsl_usb_ids);
-
-/* receive */
-
-struct udsl_receive_buffer {
- struct list_head list;
- unsigned char *base;
- unsigned int filled_cells;
-};
-
-struct udsl_receiver {
- struct list_head list;
- struct udsl_receive_buffer *buffer;
- struct urb *urb;
- struct udsl_instance_data *instance;
-};
-
-struct udsl_vcc_data {
- /* vpi/vci lookup */
- struct list_head list;
- short vpi;
- int vci;
- struct atm_vcc *vcc;
-
- /* raw cell reassembly */
- struct sk_buff *sarb;
-};
-
-/* send */
-
-struct udsl_send_buffer {
- struct list_head list;
- unsigned char *base;
- unsigned char *free_start;
- unsigned int free_cells;
-};
-
-struct udsl_sender {
- struct list_head list;
- struct udsl_send_buffer *buffer;
- struct urb *urb;
- struct udsl_instance_data *instance;
-};
-
-struct udsl_control {
- struct atm_skb_data atm_data;
- unsigned int num_cells;
- unsigned int num_entire;
- unsigned int pdu_padding;
- unsigned char cell_header [ATM_CELL_HEADER];
- unsigned char aal5_trailer [ATM_AAL5_TRAILER];
-};
-
-#define UDSL_SKB(x) ((struct udsl_control *)(x)->cb)
-
-/* main driver data */
-
-struct udsl_instance_data {
- struct semaphore serialize;
-
- /* USB device part */
- struct usb_device *usb_dev;
- char description [64];
- int firmware_loaded;
-
- /* ATM device part */
- struct atm_dev *atm_dev;
- struct list_head vcc_list;
-
- /* receive */
- struct udsl_receiver receivers [UDSL_MAX_RCV_URBS];
- struct udsl_receive_buffer receive_buffers [UDSL_MAX_RCV_BUFS];
-
- spinlock_t receive_lock;
- struct list_head spare_receivers;
- struct list_head filled_receive_buffers;
-
- struct tasklet_struct receive_tasklet;
- struct list_head spare_receive_buffers;
-
- /* send */
- struct udsl_sender senders [UDSL_MAX_SND_URBS];
- struct udsl_send_buffer send_buffers [UDSL_MAX_SND_BUFS];
-
- struct sk_buff_head sndqueue;
-
- spinlock_t send_lock;
- struct list_head spare_senders;
- struct list_head spare_send_buffers;
-
- struct tasklet_struct send_tasklet;
- struct sk_buff *current_skb; /* being emptied */
- struct udsl_send_buffer *current_buffer; /* being filled */
- struct list_head filled_send_buffers;
-};
-
-/* ATM */
-
-static void udsl_atm_dev_close (struct atm_dev *dev);
-static int udsl_atm_open (struct atm_vcc *vcc);
-static void udsl_atm_close (struct atm_vcc *vcc);
-static int udsl_atm_ioctl (struct atm_dev *dev, unsigned int cmd, void __user *arg);
-static int udsl_atm_send (struct atm_vcc *vcc, struct sk_buff *skb);
-static int udsl_atm_proc_read (struct atm_dev *atm_dev, loff_t *pos, char *page);
-
-static struct atmdev_ops udsl_atm_devops = {
- .dev_close = udsl_atm_dev_close,
- .open = udsl_atm_open,
- .close = udsl_atm_close,
- .ioctl = udsl_atm_ioctl,
- .send = udsl_atm_send,
- .proc_read = udsl_atm_proc_read,
- .owner = THIS_MODULE,
-};
-
-/* USB */
-
-static int udsl_usb_probe (struct usb_interface *intf, const struct usb_device_id *id);
-static void udsl_usb_disconnect (struct usb_interface *intf);
-static int udsl_usb_ioctl (struct usb_interface *intf, unsigned int code, void *user_data);
-
-static struct usb_driver udsl_usb_driver = {
- .owner = THIS_MODULE,
- .name = udsl_driver_name,
- .probe = udsl_usb_probe,
- .disconnect = udsl_usb_disconnect,
- .ioctl = udsl_usb_ioctl,
- .id_table = udsl_usb_ids,
-};
-
-
-/***********
-** misc **
-***********/
-
-static inline void udsl_pop (struct atm_vcc *vcc, struct sk_buff *skb)
-{
- if (vcc->pop)
- vcc->pop (vcc, skb);
- else
- dev_kfree_skb (skb);
-}
-
-
-/*************
-** decode **
-*************/
-
-static inline struct udsl_vcc_data *udsl_find_vcc (struct udsl_instance_data *instance, short vpi, int vci)
-{
- struct udsl_vcc_data *vcc;
-
- list_for_each_entry (vcc, &instance->vcc_list, list)
- if ((vcc->vci == vci) && (vcc->vpi == vpi))
- return vcc;
- return NULL;
-}
-
-static void udsl_extract_cells (struct udsl_instance_data *instance, unsigned char *source, unsigned int howmany)
-{
- struct udsl_vcc_data *cached_vcc = NULL;
- struct atm_vcc *vcc;
- struct sk_buff *sarb;
- struct udsl_vcc_data *vcc_data;
- int cached_vci = 0;
- unsigned int i;
- int pti;
- int vci;
- short cached_vpi = 0;
- short vpi;
-
- for (i = 0; i < howmany; i++, source += ATM_CELL_SIZE) {
- vpi = ((source [0] & 0x0f) << 4) | (source [1] >> 4);
- vci = ((source [1] & 0x0f) << 12) | (source [2] << 4) | (source [3] >> 4);
- pti = (source [3] & 0x2) != 0;
-
- vdbg ("udsl_extract_cells: vpi %hd, vci %d, pti %d", vpi, vci, pti);
-
- if (cached_vcc && (vci == cached_vci) && (vpi == cached_vpi))
- vcc_data = cached_vcc;
- else if ((vcc_data = udsl_find_vcc (instance, vpi, vci))) {
- cached_vcc = vcc_data;
- cached_vpi = vpi;
- cached_vci = vci;
- } else {
- dbg ("udsl_extract_cells: unknown vpi/vci (%hd/%d)!", vpi, vci);
- continue;
- }
-
- vcc = vcc_data->vcc;
- sarb = vcc_data->sarb;
-
- if (sarb->tail + ATM_CELL_PAYLOAD > sarb->end) {
- dbg ("udsl_extract_cells: buffer overrun (sarb->len %u, vcc: 0x%p)!", sarb->len, vcc);
- /* discard cells already received */
- skb_trim (sarb, 0);
- }
-
- memcpy (sarb->tail, source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
- __skb_put (sarb, ATM_CELL_PAYLOAD);
-
- if (pti) {
- struct sk_buff *skb;
- unsigned int length;
- unsigned int pdu_length;
-
- length = (source [ATM_CELL_SIZE - 6] << 8) + source [ATM_CELL_SIZE - 5];
-
- /* guard against overflow */
- if (length > ATM_MAX_AAL5_PDU) {
- dbg ("udsl_extract_cells: bogus length %u (vcc: 0x%p)!", length, vcc);
- atomic_inc (&vcc->stats->rx_err);
- goto out;
- }
-
- pdu_length = UDSL_NUM_CELLS (length) * ATM_CELL_PAYLOAD;
-
- if (sarb->len < pdu_length) {
- dbg ("udsl_extract_cells: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!", pdu_length, sarb->len, vcc);
- atomic_inc (&vcc->stats->rx_err);
- goto out;
- }
-
- if (crc32_be (~0, sarb->tail - pdu_length, pdu_length) != 0xc704dd7b) {
- dbg ("udsl_extract_cells: packet failed crc check (vcc: 0x%p)!", vcc);
- atomic_inc (&vcc->stats->rx_err);
- goto out;
- }
-
- vdbg ("udsl_extract_cells: got packet (length: %u, pdu_length: %u, vcc: 0x%p)", length, pdu_length, vcc);
-
- if (!(skb = dev_alloc_skb (length))) {
- dbg ("udsl_extract_cells: no memory for skb (length: %u)!", length);
- atomic_inc (&vcc->stats->rx_drop);
- goto out;
- }
-
- vdbg ("udsl_extract_cells: allocated new sk_buff (skb: 0x%p, skb->truesize: %u)", skb, skb->truesize);
-
- if (!atm_charge (vcc, skb->truesize)) {
- dbg ("udsl_extract_cells: failed atm_charge (skb->truesize: %u)!", skb->truesize);
- dev_kfree_skb (skb);
- goto out; /* atm_charge increments rx_drop */
- }
-
- memcpy (skb->data, sarb->tail - pdu_length, length);
- __skb_put (skb, length);
-
- vdbg ("udsl_extract_cells: sending skb 0x%p, skb->len %u, skb->truesize %u", skb, skb->len, skb->truesize);
-
- PACKETDEBUG (skb->data, skb->len);
-
- vcc->push (vcc, skb);
-
- atomic_inc (&vcc->stats->rx);
-out:
- skb_trim (sarb, 0);
- }
- }
-}
-
-
-/*************
-** encode **
-*************/
-
-static const unsigned char zeros [ATM_CELL_PAYLOAD];
-
-static void udsl_groom_skb (struct atm_vcc *vcc, struct sk_buff *skb)
-{
- struct udsl_control *ctrl = UDSL_SKB (skb);
- unsigned int zero_padding;
- u32 crc;
-
- ctrl->atm_data.vcc = vcc;
- ctrl->cell_header [0] = vcc->vpi >> 4;
- ctrl->cell_header [1] = (vcc->vpi << 4) | (vcc->vci >> 12);
- ctrl->cell_header [2] = vcc->vci >> 4;
- ctrl->cell_header [3] = vcc->vci << 4;
- ctrl->cell_header [4] = 0xec;
-
- ctrl->num_cells = UDSL_NUM_CELLS (skb->len);
- ctrl->num_entire = skb->len / ATM_CELL_PAYLOAD;
-
- zero_padding = ctrl->num_cells * ATM_CELL_PAYLOAD - skb->len - ATM_AAL5_TRAILER;
-
- if (ctrl->num_entire + 1 < ctrl->num_cells)
- ctrl->pdu_padding = zero_padding - (ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER);
- else
- ctrl->pdu_padding = zero_padding;
-
- ctrl->aal5_trailer [0] = 0; /* UU = 0 */
- ctrl->aal5_trailer [1] = 0; /* CPI = 0 */
- ctrl->aal5_trailer [2] = skb->len >> 8;
- ctrl->aal5_trailer [3] = skb->len;
-
- crc = crc32_be (~0, skb->data, skb->len);
- crc = crc32_be (crc, zeros, zero_padding);
- crc = crc32_be (crc, ctrl->aal5_trailer, 4);
- crc = ~crc;
-
- ctrl->aal5_trailer [4] = crc >> 24;
- ctrl->aal5_trailer [5] = crc >> 16;
- ctrl->aal5_trailer [6] = crc >> 8;
- ctrl->aal5_trailer [7] = crc;
-}
-
-static unsigned int udsl_write_cells (unsigned int howmany, struct sk_buff *skb, unsigned char **target_p)
-{
- struct udsl_control *ctrl = UDSL_SKB (skb);
- unsigned char *target = *target_p;
- unsigned int nc, ne, i;
-
- vdbg ("udsl_write_cells: howmany=%u, skb->len=%d, num_cells=%u, num_entire=%u, pdu_padding=%u", howmany, skb->len, ctrl->num_cells, ctrl->num_entire, ctrl->pdu_padding);
-
- nc = ctrl->num_cells;
- ne = min (howmany, ctrl->num_entire);
-
- for (i = 0; i < ne; i++) {
- memcpy (target, ctrl->cell_header, ATM_CELL_HEADER);
- target += ATM_CELL_HEADER;
- memcpy (target, skb->data, ATM_CELL_PAYLOAD);
- target += ATM_CELL_PAYLOAD;
- __skb_pull (skb, ATM_CELL_PAYLOAD);
- }
-
- ctrl->num_entire -= ne;
-
- if (!(ctrl->num_cells -= ne) || !(howmany -= ne))
- goto out;
-
- memcpy (target, ctrl->cell_header, ATM_CELL_HEADER);
- target += ATM_CELL_HEADER;
- memcpy (target, skb->data, skb->len);
- target += skb->len;
- __skb_pull (skb, skb->len);
- memset (target, 0, ctrl->pdu_padding);
- target += ctrl->pdu_padding;
-
- if (--ctrl->num_cells) {
- if (!--howmany) {
- ctrl->pdu_padding = ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER;
- goto out;
- }
-
- memcpy (target, ctrl->cell_header, ATM_CELL_HEADER);
- target += ATM_CELL_HEADER;
- memset (target, 0, ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER);
- target += ATM_CELL_PAYLOAD - ATM_AAL5_TRAILER;
-
- DEBUG_ON (--ctrl->num_cells);
- }
-
- memcpy (target, ctrl->aal5_trailer, ATM_AAL5_TRAILER);
- target += ATM_AAL5_TRAILER;
- /* set pti bit in last cell */
- *(target + 3 - ATM_CELL_SIZE) |= 0x2;
-
-out:
- *target_p = target;
- return nc - ctrl->num_cells;
-}
-
-
-/**************
-** receive **
-**************/
-
-static void udsl_complete_receive (struct urb *urb, struct pt_regs *regs)
-{
- struct udsl_receive_buffer *buf;
- struct udsl_instance_data *instance;
- struct udsl_receiver *rcv;
- unsigned long flags;
-
- if (!urb || !(rcv = urb->context)) {
- dbg ("udsl_complete_receive: bad urb!");
- return;
- }
-
- instance = rcv->instance;
- buf = rcv->buffer;
-
- buf->filled_cells = urb->actual_length / ATM_CELL_SIZE;
-
- vdbg ("udsl_complete_receive: urb 0x%p, status %d, actual_length %d, filled_cells %u, rcv 0x%p, buf 0x%p", urb, urb->status, urb->actual_length, buf->filled_cells, rcv, buf);
-
- DEBUG_ON (buf->filled_cells > rcv_buf_size);
-
- /* may not be in_interrupt() */
- spin_lock_irqsave (&instance->receive_lock, flags);
- list_add (&rcv->list, &instance->spare_receivers);
- list_add_tail (&buf->list, &instance->filled_receive_buffers);
- if (likely (!urb->status))
- tasklet_schedule (&instance->receive_tasklet);
- spin_unlock_irqrestore (&instance->receive_lock, flags);
-}
-
-static void udsl_process_receive (unsigned long data)
-{
- struct udsl_receive_buffer *buf;
- struct udsl_instance_data *instance = (struct udsl_instance_data *) data;
- struct udsl_receiver *rcv;
- int err;
-
-made_progress:
- while (!list_empty (&instance->spare_receive_buffers)) {
- spin_lock_irq (&instance->receive_lock);
- if (list_empty (&instance->spare_receivers)) {
- spin_unlock_irq (&instance->receive_lock);
- break;
- }
- rcv = list_entry (instance->spare_receivers.next, struct udsl_receiver, list);
- list_del (&rcv->list);
- spin_unlock_irq (&instance->receive_lock);
-
- buf = list_entry (instance->spare_receive_buffers.next, struct udsl_receive_buffer, list);
- list_del (&buf->list);
-
- rcv->buffer = buf;
-
- usb_fill_bulk_urb (rcv->urb,
- instance->usb_dev,
- usb_rcvbulkpipe (instance->usb_dev, UDSL_ENDPOINT_DATA_IN),
- buf->base,
- rcv_buf_size * ATM_CELL_SIZE,
- udsl_complete_receive,
- rcv);
-
- vdbg ("udsl_process_receive: sending urb 0x%p, rcv 0x%p, buf 0x%p", rcv->urb, rcv, buf);
-
- if ((err = usb_submit_urb(rcv->urb, GFP_ATOMIC)) < 0) {
- dbg ("udsl_process_receive: urb submission failed (%d)!", err);
- list_add (&buf->list, &instance->spare_receive_buffers);
- spin_lock_irq (&instance->receive_lock);
- list_add (&rcv->list, &instance->spare_receivers);
- spin_unlock_irq (&instance->receive_lock);
- break;
- }
- }
-
- spin_lock_irq (&instance->receive_lock);
- if (list_empty (&instance->filled_receive_buffers)) {
- spin_unlock_irq (&instance->receive_lock);
- return; /* done - no more buffers */
- }
- buf = list_entry (instance->filled_receive_buffers.next, struct udsl_receive_buffer, list);
- list_del (&buf->list);
- spin_unlock_irq (&instance->receive_lock);
- vdbg ("udsl_process_receive: processing buf 0x%p", buf);
- udsl_extract_cells (instance, buf->base, buf->filled_cells);
- list_add (&buf->list, &instance->spare_receive_buffers);
- goto made_progress;
-}
-
-
-/***********
-** send **
-***********/
-
-static void udsl_complete_send (struct urb *urb, struct pt_regs *regs)
-{
- struct udsl_instance_data *instance;
- struct udsl_sender *snd;
- unsigned long flags;
-
- if (!urb || !(snd = urb->context) || !(instance = snd->instance)) {
- dbg ("udsl_complete_send: bad urb!");
- return;
- }
-
- vdbg ("udsl_complete_send: urb 0x%p, status %d, snd 0x%p, buf 0x%p", urb, urb->status, snd, snd->buffer);
-
- /* may not be in_interrupt() */
- spin_lock_irqsave (&instance->send_lock, flags);
- list_add (&snd->list, &instance->spare_senders);
- list_add (&snd->buffer->list, &instance->spare_send_buffers);
- tasklet_schedule (&instance->send_tasklet);
- spin_unlock_irqrestore (&instance->send_lock, flags);
-}
-
-static void udsl_process_send (unsigned long data)
-{
- struct udsl_send_buffer *buf;
- struct udsl_instance_data *instance = (struct udsl_instance_data *) data;
- struct sk_buff *skb;
- struct udsl_sender *snd;
- int err;
- unsigned int num_written;
-
-made_progress:
- spin_lock_irq (&instance->send_lock);
- while (!list_empty (&instance->spare_senders)) {
- if (!list_empty (&instance->filled_send_buffers)) {
- buf = list_entry (instance->filled_send_buffers.next, struct udsl_send_buffer, list);
- list_del (&buf->list);
- } else if ((buf = instance->current_buffer)) {
- instance->current_buffer = NULL;
- } else /* all buffers empty */
- break;
-
- snd = list_entry (instance->spare_senders.next, struct udsl_sender, list);
- list_del (&snd->list);
- spin_unlock_irq (&instance->send_lock);
-
- snd->buffer = buf;
- usb_fill_bulk_urb (snd->urb,
- instance->usb_dev,
- usb_sndbulkpipe (instance->usb_dev, UDSL_ENDPOINT_DATA_OUT),
- buf->base,
- (snd_buf_size - buf->free_cells) * ATM_CELL_SIZE,
- udsl_complete_send,
- snd);
-
- vdbg ("udsl_process_send: submitting urb 0x%p (%d cells), snd 0x%p, buf 0x%p", snd->urb, snd_buf_size - buf->free_cells, snd, buf);
-
- if ((err = usb_submit_urb(snd->urb, GFP_ATOMIC)) < 0) {
- dbg ("udsl_process_send: urb submission failed (%d)!", err);
- spin_lock_irq (&instance->send_lock);
- list_add (&snd->list, &instance->spare_senders);
- spin_unlock_irq (&instance->send_lock);
- list_add (&buf->list, &instance->filled_send_buffers);
- return; /* bail out */
- }
-
- spin_lock_irq (&instance->send_lock);
- } /* while */
- spin_unlock_irq (&instance->send_lock);
-
- if (!instance->current_skb && !(instance->current_skb = skb_dequeue (&instance->sndqueue)))
- return; /* done - no more skbs */
-
- skb = instance->current_skb;
-
- if (!(buf = instance->current_buffer)) {
- spin_lock_irq (&instance->send_lock);
- if (list_empty (&instance->spare_send_buffers)) {
- instance->current_buffer = NULL;
- spin_unlock_irq (&instance->send_lock);
- return; /* done - no more buffers */
- }
- buf = list_entry (instance->spare_send_buffers.next, struct udsl_send_buffer, list);
- list_del (&buf->list);
- spin_unlock_irq (&instance->send_lock);
-
- buf->free_start = buf->base;
- buf->free_cells = snd_buf_size;
-
- instance->current_buffer = buf;
- }
-
- num_written = udsl_write_cells (buf->free_cells, skb, &buf->free_start);
-
- vdbg ("udsl_process_send: wrote %u cells from skb 0x%p to buffer 0x%p", num_written, skb, buf);
-
- if (!(buf->free_cells -= num_written)) {
- list_add_tail (&buf->list, &instance->filled_send_buffers);
- instance->current_buffer = NULL;
- }
-
- vdbg ("udsl_process_send: buffer contains %d cells, %d left", snd_buf_size - buf->free_cells, buf->free_cells);
-
- if (!UDSL_SKB (skb)->num_cells) {
- struct atm_vcc *vcc = UDSL_SKB (skb)->atm_data.vcc;
-
- udsl_pop (vcc, skb);
- instance->current_skb = NULL;
-
- atomic_inc (&vcc->stats->tx);
- }
-
- goto made_progress;
-}
-
-static void udsl_cancel_send (struct udsl_instance_data *instance, struct atm_vcc *vcc)
-{
- struct sk_buff *skb, *n;
-
- dbg ("udsl_cancel_send entered");
- spin_lock_irq (&instance->sndqueue.lock);
- for (skb = instance->sndqueue.next, n = skb->next; skb != (struct sk_buff *)&instance->sndqueue; skb = n, n = skb->next)
- if (UDSL_SKB (skb)->atm_data.vcc == vcc) {
- dbg ("udsl_cancel_send: popping skb 0x%p", skb);
- __skb_unlink (skb, &instance->sndqueue);
- udsl_pop (vcc, skb);
- }
- spin_unlock_irq (&instance->sndqueue.lock);
-
- tasklet_disable (&instance->send_tasklet);
- if ((skb = instance->current_skb) && (UDSL_SKB (skb)->atm_data.vcc == vcc)) {
- dbg ("udsl_cancel_send: popping current skb (0x%p)", skb);
- instance->current_skb = NULL;
- udsl_pop (vcc, skb);
- }
- tasklet_enable (&instance->send_tasklet);
- dbg ("udsl_cancel_send done");
-}
-
-static int udsl_atm_send (struct atm_vcc *vcc, struct sk_buff *skb)
-{
- struct udsl_instance_data *instance = vcc->dev->dev_data;
- int err;
-
- vdbg ("udsl_atm_send called (skb 0x%p, len %u)", skb, skb->len);
-
- if (!instance || !instance->usb_dev) {
- dbg ("udsl_atm_send: NULL data!");
- err = -ENODEV;
- goto fail;
- }
-
- if (vcc->qos.aal != ATM_AAL5) {
- dbg ("udsl_atm_send: unsupported ATM type %d!", vcc->qos.aal);
- err = -EINVAL;
- goto fail;
- }
-
- if (skb->len > ATM_MAX_AAL5_PDU) {
- dbg ("udsl_atm_send: packet too long (%d vs %d)!", skb->len, ATM_MAX_AAL5_PDU);
- err = -EINVAL;
- goto fail;
- }
-
- PACKETDEBUG (skb->data, skb->len);
-
- udsl_groom_skb (vcc, skb);
- skb_queue_tail (&instance->sndqueue, skb);
- tasklet_schedule (&instance->send_tasklet);
-
- return 0;
-
-fail:
- udsl_pop (vcc, skb);
- return err;
-}
-
-
-/**********
-** ATM **
-**********/
-
-static void udsl_atm_dev_close (struct atm_dev *dev)
-{
- struct udsl_instance_data *instance = dev->dev_data;
-
- if (!instance) {
- dbg ("udsl_atm_dev_close: NULL instance!");
- return;
- }
-
- dbg ("udsl_atm_dev_close: queue has %u elements", instance->sndqueue.qlen);
-
- tasklet_kill (&instance->receive_tasklet);
- tasklet_kill (&instance->send_tasklet);
- kfree (instance);
- dev->dev_data = NULL;
-}
-
-static int udsl_atm_proc_read (struct atm_dev *atm_dev, loff_t *pos, char *page)
-{
- struct udsl_instance_data *instance = atm_dev->dev_data;
- int left = *pos;
-
- if (!instance) {
- dbg ("udsl_atm_proc_read: NULL instance!");
- return -ENODEV;
- }
-
- if (!left--)
- return sprintf (page, "%s\n", instance->description);
-
- if (!left--)
- return sprintf (page, "MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
- atm_dev->esi [0], atm_dev->esi [1], atm_dev->esi [2],
- atm_dev->esi [3], atm_dev->esi [4], atm_dev->esi [5]);
-
- if (!left--)
- return sprintf (page, "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
- atomic_read (&atm_dev->stats.aal5.tx),
- atomic_read (&atm_dev->stats.aal5.tx_err),
- atomic_read (&atm_dev->stats.aal5.rx),
- atomic_read (&atm_dev->stats.aal5.rx_err),
- atomic_read (&atm_dev->stats.aal5.rx_drop));
-
- if (!left--) {
- switch (atm_dev->signal) {
- case ATM_PHY_SIG_FOUND:
- sprintf (page, "Line up");
- break;
- case ATM_PHY_SIG_LOST:
- sprintf (page, "Line down");
- break;
- default:
- sprintf (page, "Line state unknown");
- break;
- }
-
- if (instance->usb_dev) {
- if (!instance->firmware_loaded)
- strcat (page, ", no firmware\n");
- else
- strcat (page, ", firmware loaded\n");
- } else
- strcat (page, ", disconnected\n");
-
- return strlen (page);
- }
-
- return 0;
-}
-
-static int udsl_atm_open (struct atm_vcc *vcc)
-{
- struct udsl_instance_data *instance = vcc->dev->dev_data;
- struct udsl_vcc_data *new;
- unsigned int max_pdu;
- int vci = vcc->vci;
- short vpi = vcc->vpi;
-
- dbg ("udsl_atm_open: vpi %hd, vci %d", vpi, vci);
-
- if (!instance || !instance->usb_dev) {
- dbg ("udsl_atm_open: NULL data!");
- return -ENODEV;
- }
-
- /* only support AAL5 */
- if ((vcc->qos.aal != ATM_AAL5) || (vcc->qos.rxtp.max_sdu < 0) || (vcc->qos.rxtp.max_sdu > ATM_MAX_AAL5_PDU)) {
- dbg ("udsl_atm_open: unsupported ATM type %d!", vcc->qos.aal);
- return -EINVAL;
- }
-
- if (!instance->firmware_loaded) {
- dbg ("udsl_atm_open: firmware not loaded!");
- return -EAGAIN;
- }
-
- down (&instance->serialize); /* vs self, udsl_atm_close */
-
- if (udsl_find_vcc (instance, vpi, vci)) {
- dbg ("udsl_atm_open: %hd/%d already in use!", vpi, vci);
- up (&instance->serialize);
- return -EADDRINUSE;
- }
-
- if (!(new = kmalloc (sizeof (struct udsl_vcc_data), GFP_KERNEL))) {
- dbg ("udsl_atm_open: no memory for vcc_data!");
- up (&instance->serialize);
- return -ENOMEM;
- }
-
- memset (new, 0, sizeof (struct udsl_vcc_data));
- new->vcc = vcc;
- new->vpi = vpi;
- new->vci = vci;
-
- /* udsl_extract_cells requires at least one cell */
- max_pdu = max (1, UDSL_NUM_CELLS (vcc->qos.rxtp.max_sdu)) * ATM_CELL_PAYLOAD;
- if (!(new->sarb = alloc_skb (max_pdu, GFP_KERNEL))) {
- dbg ("udsl_atm_open: no memory for SAR buffer!");
- kfree (new);
- up (&instance->serialize);
- return -ENOMEM;
- }
-
- vcc->dev_data = new;
-
- tasklet_disable (&instance->receive_tasklet);
- list_add (&new->list, &instance->vcc_list);
- tasklet_enable (&instance->receive_tasklet);
-
- set_bit (ATM_VF_ADDR, &vcc->flags);
- set_bit (ATM_VF_PARTIAL, &vcc->flags);
- set_bit (ATM_VF_READY, &vcc->flags);
-
- up (&instance->serialize);
-
- tasklet_schedule (&instance->receive_tasklet);
-
- dbg ("udsl_atm_open: allocated vcc data 0x%p (max_pdu: %u)", new, max_pdu);
-
- return 0;
-}
-
-static void udsl_atm_close (struct atm_vcc *vcc)
-{
- struct udsl_instance_data *instance = vcc->dev->dev_data;
- struct udsl_vcc_data *vcc_data = vcc->dev_data;
-
- dbg ("udsl_atm_close called");
-
- if (!instance || !vcc_data) {
- dbg ("udsl_atm_close: NULL data!");
- return;
- }
-
- dbg ("udsl_atm_close: deallocating vcc 0x%p with vpi %d vci %d", vcc_data, vcc_data->vpi, vcc_data->vci);
-
- udsl_cancel_send (instance, vcc);
-
- down (&instance->serialize); /* vs self, udsl_atm_open */
-
- tasklet_disable (&instance->receive_tasklet);
- list_del (&vcc_data->list);
- tasklet_enable (&instance->receive_tasklet);
-
- kfree_skb (vcc_data->sarb);
- vcc_data->sarb = NULL;
-
- kfree (vcc_data);
- vcc->dev_data = NULL;
-
- vcc->vpi = ATM_VPI_UNSPEC;
- vcc->vci = ATM_VCI_UNSPEC;
- clear_bit (ATM_VF_READY, &vcc->flags);
- clear_bit (ATM_VF_PARTIAL, &vcc->flags);
- clear_bit (ATM_VF_ADDR, &vcc->flags);
-
- up (&instance->serialize);
-
- dbg ("udsl_atm_close successful");
-}
-
-static int udsl_atm_ioctl (struct atm_dev *dev, unsigned int cmd, void __user *arg)
-{
- switch (cmd) {
- case ATM_QUERYLOOP:
- return put_user (ATM_LM_NONE, (int __user *)arg) ? -EFAULT : 0;
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-
-/**********
-** USB **
-**********/
-
-static int udsl_set_alternate (struct udsl_instance_data *instance)
-{
- down (&instance->serialize); /* vs self */
- if (!instance->firmware_loaded) {
- int ret;
-
- if ((ret = usb_set_interface (instance->usb_dev, 1, 1)) < 0) {
- dbg ("udsl_set_alternate: usb_set_interface returned %d!", ret);
- up (&instance->serialize);
- return ret;
- }
- instance->firmware_loaded = 1;
- }
- up (&instance->serialize);
-
- tasklet_schedule (&instance->receive_tasklet);
-
- return 0;
-}
-
-static int udsl_usb_ioctl (struct usb_interface *intf, unsigned int code, void *user_data)
-{
- struct udsl_instance_data *instance = usb_get_intfdata (intf);
-
- dbg ("udsl_usb_ioctl entered");
-
- if (!instance) {
- dbg ("udsl_usb_ioctl: NULL instance!");
- return -ENODEV;
- }
-
- switch (code) {
- case UDSL_IOCTL_LINE_UP:
- instance->atm_dev->signal = ATM_PHY_SIG_FOUND;
- return udsl_set_alternate (instance);
- case UDSL_IOCTL_LINE_DOWN:
- instance->atm_dev->signal = ATM_PHY_SIG_LOST;
- return 0;
- default:
- return -ENOTTY;
- }
-}
-
-static int udsl_usb_probe (struct usb_interface *intf, const struct usb_device_id *id)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- int ifnum = intf->altsetting->desc.bInterfaceNumber;
- struct udsl_instance_data *instance;
- unsigned char mac_str [13];
- int i, length;
- char *buf;
-
- dbg ("udsl_usb_probe: trying device with vendor=0x%x, product=0x%x, ifnum %d",
- dev->descriptor.idVendor, dev->descriptor.idProduct, ifnum);
-
- if ((dev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) ||
- (dev->descriptor.idVendor != SPEEDTOUCH_VENDORID) ||
- (dev->descriptor.idProduct != SPEEDTOUCH_PRODUCTID) || (ifnum != 1))
- return -ENODEV;
-
- dbg ("udsl_usb_probe: device accepted");
-
- /* instance init */
- if (!(instance = kmalloc (sizeof (struct udsl_instance_data), GFP_KERNEL))) {
- dbg ("udsl_usb_probe: no memory for instance data!");
- return -ENOMEM;
- }
-
- memset (instance, 0, sizeof (struct udsl_instance_data));
-
- init_MUTEX (&instance->serialize);
-
- instance->usb_dev = dev;
-
- INIT_LIST_HEAD (&instance->vcc_list);
-
- spin_lock_init (&instance->receive_lock);
- INIT_LIST_HEAD (&instance->spare_receivers);
- INIT_LIST_HEAD (&instance->filled_receive_buffers);
-
- tasklet_init (&instance->receive_tasklet, udsl_process_receive, (unsigned long) instance);
- INIT_LIST_HEAD (&instance->spare_receive_buffers);
-
- skb_queue_head_init (&instance->sndqueue);
-
- spin_lock_init (&instance->send_lock);
- INIT_LIST_HEAD (&instance->spare_senders);
- INIT_LIST_HEAD (&instance->spare_send_buffers);
-
- tasklet_init (&instance->send_tasklet, udsl_process_send, (unsigned long) instance);
- INIT_LIST_HEAD (&instance->filled_send_buffers);
-
- /* receive init */
- for (i = 0; i < num_rcv_urbs; i++) {
- struct udsl_receiver *rcv = &(instance->receivers [i]);
-
- if (!(rcv->urb = usb_alloc_urb (0, GFP_KERNEL))) {
- dbg ("udsl_usb_probe: no memory for receive urb %d!", i);
- goto fail;
- }
-
- rcv->instance = instance;
-
- list_add (&rcv->list, &instance->spare_receivers);
- }
-
- for (i = 0; i < num_rcv_bufs; i++) {
- struct udsl_receive_buffer *buf = &(instance->receive_buffers [i]);
-
- if (!(buf->base = kmalloc (rcv_buf_size * ATM_CELL_SIZE, GFP_KERNEL))) {
- dbg ("udsl_usb_probe: no memory for receive buffer %d!", i);
- goto fail;
- }
-
- list_add (&buf->list, &instance->spare_receive_buffers);
- }
-
- /* send init */
- for (i = 0; i < num_snd_urbs; i++) {
- struct udsl_sender *snd = &(instance->senders [i]);
-
- if (!(snd->urb = usb_alloc_urb (0, GFP_KERNEL))) {
- dbg ("udsl_usb_probe: no memory for send urb %d!", i);
- goto fail;
- }
-
- snd->instance = instance;
-
- list_add (&snd->list, &instance->spare_senders);
- }
-
- for (i = 0; i < num_snd_bufs; i++) {
- struct udsl_send_buffer *buf = &(instance->send_buffers [i]);
-
- if (!(buf->base = kmalloc (snd_buf_size * ATM_CELL_SIZE, GFP_KERNEL))) {
- dbg ("udsl_usb_probe: no memory for send buffer %d!", i);
- goto fail;
- }
-
- list_add (&buf->list, &instance->spare_send_buffers);
- }
-
- /* ATM init */
- if (!(instance->atm_dev = atm_dev_register (udsl_driver_name, &udsl_atm_devops, -1, NULL))) {
- dbg ("udsl_usb_probe: failed to register ATM device!");
- goto fail;
- }
-
- instance->atm_dev->ci_range.vpi_bits = ATM_CI_MAX;
- instance->atm_dev->ci_range.vci_bits = ATM_CI_MAX;
- instance->atm_dev->signal = ATM_PHY_SIG_UNKNOWN;
-
- /* temp init ATM device, set to 128kbit */
- instance->atm_dev->link_rate = 128 * 1000 / 424;
-
- /* set MAC address, it is stored in the serial number */
- memset (instance->atm_dev->esi, 0, sizeof (instance->atm_dev->esi));
- if (usb_string (dev, dev->descriptor.iSerialNumber, mac_str, sizeof (mac_str)) == 12)
- for (i = 0; i < 6; i++)
- instance->atm_dev->esi [i] = (hex2int (mac_str [i * 2]) * 16) + (hex2int (mac_str [i * 2 + 1]));
-
- /* device description */
- buf = instance->description;
- length = sizeof (instance->description);
-
- if ((i = usb_string (dev, dev->descriptor.iProduct, buf, length)) < 0)
- goto finish;
-
- buf += i;
- length -= i;
-
- i = scnprintf (buf, length, " (");
- buf += i;
- length -= i;
-
- if (length <= 0 || (i = usb_make_path (dev, buf, length)) < 0)
- goto finish;
-
- buf += i;
- length -= i;
-
- snprintf (buf, length, ")");
-
-finish:
- /* ready for ATM callbacks */
- wmb ();
- instance->atm_dev->dev_data = instance;
-
- usb_set_intfdata (intf, instance);
-
- return 0;
-
-fail:
- for (i = 0; i < num_snd_bufs; i++)
- kfree (instance->send_buffers [i].base);
-
- for (i = 0; i < num_snd_urbs; i++)
- usb_free_urb (instance->senders [i].urb);
-
- for (i = 0; i < num_rcv_bufs; i++)
- kfree (instance->receive_buffers [i].base);
-
- for (i = 0; i < num_rcv_urbs; i++)
- usb_free_urb (instance->receivers [i].urb);
-
- kfree (instance);
-
- return -ENOMEM;
-}
-
-static void udsl_usb_disconnect (struct usb_interface *intf)
-{
- struct udsl_instance_data *instance = usb_get_intfdata (intf);
- struct list_head *pos;
- unsigned int count;
- int result, i;
-
- dbg ("udsl_usb_disconnect entered");
-
- usb_set_intfdata (intf, NULL);
-
- if (!instance) {
- dbg ("udsl_usb_disconnect: NULL instance!");
- return;
- }
-
- /* receive finalize */
- tasklet_disable (&instance->receive_tasklet);
-
- for (i = 0; i < num_rcv_urbs; i++)
- if ((result = usb_unlink_urb (instance->receivers [i].urb)) < 0)
- dbg ("udsl_usb_disconnect: usb_unlink_urb on receive urb %d returned %d!", i, result);
-
- /* wait for completion handlers to finish */
- do {
- count = 0;
- spin_lock_irq (&instance->receive_lock);
- list_for_each (pos, &instance->spare_receivers)
- DEBUG_ON (++count > num_rcv_urbs);
- spin_unlock_irq (&instance->receive_lock);
-
- dbg ("udsl_usb_disconnect: found %u spare receivers", count);
-
- if (count == num_rcv_urbs)
- break;
-
- set_current_state (TASK_RUNNING);
- schedule ();
- } while (1);
-
- /* no need to take the spinlock */
- INIT_LIST_HEAD (&instance->filled_receive_buffers);
- INIT_LIST_HEAD (&instance->spare_receive_buffers);
-
- tasklet_enable (&instance->receive_tasklet);
-
- for (i = 0; i < num_rcv_urbs; i++)
- usb_free_urb (instance->receivers [i].urb);
-
- for (i = 0; i < num_rcv_bufs; i++)
- kfree (instance->receive_buffers [i].base);
-
- /* send finalize */
- tasklet_disable (&instance->send_tasklet);
-
- for (i = 0; i < num_snd_urbs; i++)
- if ((result = usb_unlink_urb (instance->senders [i].urb)) < 0)
- dbg ("udsl_usb_disconnect: usb_unlink_urb on send urb %d returned %d!", i, result);
-
- /* wait for completion handlers to finish */
- do {
- count = 0;
- spin_lock_irq (&instance->send_lock);
- list_for_each (pos, &instance->spare_senders)
- DEBUG_ON (++count > num_snd_urbs);
- spin_unlock_irq (&instance->send_lock);
-
- dbg ("udsl_usb_disconnect: found %u spare senders", count);
-
- if (count == num_snd_urbs)
- break;
-
- set_current_state (TASK_RUNNING);
- schedule ();
- } while (1);
-
- /* no need to take the spinlock */
- INIT_LIST_HEAD (&instance->spare_senders);
- INIT_LIST_HEAD (&instance->spare_send_buffers);
- instance->current_buffer = NULL;
-
- tasklet_enable (&instance->send_tasklet);
-
- for (i = 0; i < num_snd_urbs; i++)
- usb_free_urb (instance->senders [i].urb);
-
- for (i = 0; i < num_snd_bufs; i++)
- kfree (instance->send_buffers [i].base);
-
- wmb ();
- instance->usb_dev = NULL;
-
- /* ATM finalize */
- shutdown_atm_dev (instance->atm_dev); /* frees instance, kills tasklets */
-}
-
-
-/***********
-** init **
-***********/
-
-static int __init udsl_usb_init (void)
-{
- dbg ("udsl_usb_init: driver version " DRIVER_VERSION);
-
- if (sizeof (struct udsl_control) > sizeof (((struct sk_buff *)0)->cb)) {
- printk (KERN_ERR __FILE__ ": unusable with this kernel!\n");
- return -EIO;
- }
-
- if ((num_rcv_urbs > UDSL_MAX_RCV_URBS) || (num_snd_urbs > UDSL_MAX_SND_URBS) ||
- (num_rcv_bufs > UDSL_MAX_RCV_BUFS) || (num_snd_bufs > UDSL_MAX_SND_BUFS) ||
- (rcv_buf_size > UDSL_MAX_RCV_BUF_SIZE) || (snd_buf_size > UDSL_MAX_SND_BUF_SIZE))
- return -EINVAL;
-
- return usb_register (&udsl_usb_driver);
-}
-
-static void __exit udsl_usb_cleanup (void)
-{
- dbg ("udsl_usb_cleanup entered");
-
- usb_deregister (&udsl_usb_driver);
-}
-
-module_init (udsl_usb_init);
-module_exit (udsl_usb_cleanup);
-
-MODULE_AUTHOR (DRIVER_AUTHOR);
-MODULE_DESCRIPTION (DRIVER_DESC);
-MODULE_LICENSE ("GPL");
-MODULE_VERSION (DRIVER_VERSION);
-
-
-/************
-** debug **
-************/
-
-#ifdef VERBOSE_DEBUG
-static int udsl_print_packet (const unsigned char *data, int len)
-{
- unsigned char buffer [256];
- int i = 0, j = 0;
-
- for (i = 0; i < len;) {
- buffer [0] = '\0';
- sprintf (buffer, "%.3d :", i);
- for (j = 0; (j < 16) && (i < len); j++, i++) {
- sprintf (buffer, "%s %2.2x", buffer, data [i]);
- }
- dbg ("%s", buffer);
- }
- return i;
-}
-#endif
diff --git a/drivers/usb/misc/tiglusb.c b/drivers/usb/misc/tiglusb.c
index 0f9c5753772d..f902884a7bbb 100644
--- a/drivers/usb/misc/tiglusb.c
+++ b/drivers/usb/misc/tiglusb.c
@@ -115,6 +115,7 @@ tiglusb_open (struct inode *inode, struct file *filp)
return -EBUSY;
}
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout (HZ / 2);
if (signal_pending (current)) {
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 0bc5ccc244ba..8b22320d5ea6 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -44,6 +44,7 @@
#include <linux/parport.h>
#include <linux/init.h>
#include <linux/usb.h>
+#include <linux/delay.h>
/*
* Version Information
@@ -159,8 +160,7 @@ static int change_mode(struct parport *pp, int m)
if (time_after_eq (jiffies, expire))
/* The FIFO is stuck. */
return -EBUSY;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout((HZ + 99) / 100);
+ msleep_interruptible(10);
if (signal_pending (current))
break;
}
diff --git a/drivers/usb/net/catc.c b/drivers/usb/net/catc.c
index 455fe6e3be06..66d0a70a8210 100644
--- a/drivers/usb/net/catc.c
+++ b/drivers/usb/net/catc.c
@@ -765,10 +765,10 @@ static int catc_stop(struct net_device *netdev)
if (!catc->is_f5u011)
del_timer_sync(&catc->timer);
- usb_unlink_urb(catc->rx_urb);
- usb_unlink_urb(catc->tx_urb);
- usb_unlink_urb(catc->irq_urb);
- usb_unlink_urb(catc->ctrl_urb);
+ usb_kill_urb(catc->rx_urb);
+ usb_kill_urb(catc->tx_urb);
+ usb_kill_urb(catc->irq_urb);
+ usb_kill_urb(catc->ctrl_urb);
return 0;
}
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index 40e921ade8c8..2092cb9cb33f 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -668,13 +668,13 @@ static int kaweth_open(struct net_device *net)
INTBUFFERSIZE,
int_callback,
kaweth,
- 8);
+ 250); /* overriding the descriptor */
kaweth->irq_urb->transfer_dma = kaweth->intbufferhandle;
kaweth->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL);
if (res) {
- usb_unlink_urb(kaweth->rx_urb);
+ usb_kill_urb(kaweth->rx_urb);
return -EIO;
}
@@ -695,15 +695,15 @@ static int kaweth_close(struct net_device *net)
kaweth->status |= KAWETH_STATUS_CLOSING;
- usb_unlink_urb(kaweth->irq_urb);
- usb_unlink_urb(kaweth->rx_urb);
+ usb_kill_urb(kaweth->irq_urb);
+ usb_kill_urb(kaweth->rx_urb);
flush_scheduled_work();
/* a scheduled work may have resubmitted,
we hit them again */
- usb_unlink_urb(kaweth->irq_urb);
- usb_unlink_urb(kaweth->rx_urb);
+ usb_kill_urb(kaweth->irq_urb);
+ usb_kill_urb(kaweth->rx_urb);
kaweth->status &= ~KAWETH_STATUS_CLOSING;
@@ -1173,8 +1173,8 @@ static void kaweth_disconnect(struct usb_interface *intf)
}
kaweth->removed = 1;
- usb_unlink_urb(kaweth->irq_urb);
- usb_unlink_urb(kaweth->rx_urb);
+ usb_kill_urb(kaweth->irq_urb);
+ usb_kill_urb(kaweth->rx_urb);
/* we need to wait for the urb to be cancelled, if it is active */
spin_lock(&kaweth->device_lock);
@@ -1250,19 +1250,17 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
return status;
}
- set_current_state(TASK_UNINTERRUPTIBLE);
while (timeout && !awd.done) {
- timeout = schedule_timeout(timeout);
set_current_state(TASK_UNINTERRUPTIBLE);
+ timeout = schedule_timeout(timeout);
}
- set_current_state(TASK_RUNNING);
remove_wait_queue(&awd.wqh, &wait);
if (!timeout) {
// timeout
kaweth_warn("usb_control/bulk_msg: timeout");
- usb_unlink_urb(urb); // remove urb safely
+ usb_kill_urb(urb); // remove urb safely
status = -ETIMEDOUT;
}
else {
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index c515916e37a7..98cb5ae3f6f1 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -854,10 +854,10 @@ static void free_all_urbs(pegasus_t * pegasus)
static void unlink_all_urbs(pegasus_t * pegasus)
{
- usb_unlink_urb(pegasus->intr_urb);
- usb_unlink_urb(pegasus->tx_urb);
- usb_unlink_urb(pegasus->rx_urb);
- usb_unlink_urb(pegasus->ctrl_urb);
+ usb_kill_urb(pegasus->intr_urb);
+ usb_kill_urb(pegasus->tx_urb);
+ usb_kill_urb(pegasus->rx_urb);
+ usb_kill_urb(pegasus->ctrl_urb);
}
static int alloc_urbs(pegasus_t * pegasus)
@@ -920,8 +920,8 @@ static int pegasus_open(struct net_device *net)
if ((res = enable_net_traffic(net, pegasus->usb))) {
err("can't enable_net_traffic() - %d", res);
res = -EIO;
- usb_unlink_urb(pegasus->rx_urb);
- usb_unlink_urb(pegasus->intr_urb);
+ usb_kill_urb(pegasus->rx_urb);
+ usb_kill_urb(pegasus->intr_urb);
free_skb_pool(pegasus);
goto exit;
}
diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c
index 640aa5b68095..5cd2c9c5987c 100644
--- a/drivers/usb/net/rtl8150.c
+++ b/drivers/usb/net/rtl8150.c
@@ -20,7 +20,7 @@
#include <asm/uaccess.h>
/* Version Information */
-#define DRIVER_VERSION "v0.6.1 (2004/03/13)"
+#define DRIVER_VERSION "v0.6.2 (2004/08/27)"
#define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>"
#define DRIVER_DESC "rtl8150 based usb-ethernet driver"
@@ -392,10 +392,10 @@ static void free_all_urbs(rtl8150_t * dev)
static void unlink_all_urbs(rtl8150_t * dev)
{
- usb_unlink_urb(dev->rx_urb);
- usb_unlink_urb(dev->tx_urb);
- usb_unlink_urb(dev->intr_urb);
- usb_unlink_urb(dev->ctrl_urb);
+ usb_kill_urb(dev->rx_urb);
+ usb_kill_urb(dev->tx_urb);
+ usb_kill_urb(dev->intr_urb);
+ usb_kill_urb(dev->ctrl_urb);
}
static inline struct sk_buff *pull_skb(rtl8150_t *dev)
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index cca6e72cf2fa..ad65e741fa44 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -825,7 +825,7 @@ static void ax8817x_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct ax8817x_data *data = (struct ax8817x_data *)dev->data;
- usb_unlink_urb(data->int_urb);
+ usb_kill_urb(data->int_urb);
usb_free_urb(data->int_urb);
kfree(data->int_buf);
}
@@ -1437,7 +1437,7 @@ static int genelink_free (struct usbnet *dev)
// handling needs to be generic)
// cancel irq urb first
- usb_unlink_urb (priv->irq_urb);
+ usb_kill_urb (priv->irq_urb);
// free irq urb
usb_free_urb (priv->irq_urb);
@@ -3252,6 +3252,10 @@ static const struct usb_device_id products [] = {
// Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
USB_DEVICE (0x6189, 0x182d),
.driver_info = (unsigned long) &ax8817x_info,
+}, {
+ // Surecom EP-1427X-2
+ USB_DEVICE (0x1189, 0x0893),
+ .driver_info = (unsigned long) &ax8817x_info,
},
#endif
@@ -3308,11 +3312,18 @@ static const struct usb_device_id products [] = {
*
* PXA25x or PXA210 ... these use a "usb-eth" driver much like
* the sa1100 one, but hardware uses different endpoint numbers.
+ *
+ * Or the Linux "Ethernet" gadget on hardware that can't talk
+ * CDC Ethernet (e.g., no altsettings), in either of two modes:
+ * - acting just like the old "usb-eth" firmware, though
+ * the implementation is different
+ * - supporting RNDIS as the first/default configuration for
+ * MS-Windows interop; Linux needs to use the other config
*/
{
// 1183 = 0x049F, both used as hex values?
// Compaq "Itsy" vendor/product id
- USB_DEVICE (0x049F, 0x505A),
+ USB_DEVICE (0x049F, 0x505A), // usb-eth, or compatible
.driver_info = (unsigned long) &linuxdev_info,
}, {
USB_DEVICE (0x0E7E, 0x1001), // G.Mate "Yopy"
@@ -3320,6 +3331,10 @@ static const struct usb_device_id products [] = {
}, {
USB_DEVICE (0x8086, 0x07d3), // "blob" bootloader
.driver_info = (unsigned long) &blob_info,
+}, {
+ // Linux Ethernet/RNDIS gadget on pxa210/25x/26x
+ USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
+ .driver_info = (unsigned long) &linuxdev_info,
},
#endif
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 88fede7b447e..1499bf2fcd13 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -187,6 +187,16 @@ config USB_SERIAL_EDGEPORT_TI
To compile this driver as a module, choose M here: the
module will be called io_ti.
+config USB_SERIAL_IPW
+ tristate "USB IPWireless (3G UMTS TDD) Driver (EXPERIMENTAL)"
+ depends on USB_SERIAL && EXPERIMENTAL
+ help
+ Say Y here if you want to use a IPWireless USB modem such as
+ the ones supplied by Axity3G/Sentech South Africa.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ipw.
+
config USB_SERIAL_KEYSPAN_PDA
tristate "USB Keyspan PDA Single Port Serial Driver"
depends on USB_SERIAL
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 6cfbe3108ccb..dfd43ec80ff3 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_USB_SERIAL_EDGEPORT_TI) += io_ti.o
obj-$(CONFIG_USB_SERIAL_EMPEG) += empeg.o
obj-$(CONFIG_USB_SERIAL_FTDI_SIO) += ftdi_sio.o
obj-$(CONFIG_USB_SERIAL_IPAQ) += ipaq.o
+obj-$(CONFIG_USB_SERIAL_IPW) += ipw.o
obj-$(CONFIG_USB_SERIAL_IR) += ir-usb.o
obj-$(CONFIG_USB_SERIAL_KEYSPAN) += keyspan.o
obj-$(CONFIG_USB_SERIAL_KEYSPAN_PDA) += keyspan_pda.o
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index bcf56011ec22..a44cb9d96113 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -228,7 +228,7 @@ static int belkin_sa_open (struct usb_serial_port *port, struct file *filp)
port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (retval) {
- usb_unlink_urb(port->read_urb);
+ usb_kill_urb(port->read_urb);
err(" usb_submit_urb(read int) failed");
}
@@ -242,9 +242,9 @@ static void belkin_sa_close (struct usb_serial_port *port, struct file *filp)
dbg("%s port %d", __FUNCTION__, port->number);
/* shutdown our bulk reads and writes */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->read_urb);
- usb_unlink_urb (port->interrupt_in_urb);
+ usb_kill_urb(port->write_urb);
+ usb_kill_urb(port->read_urb);
+ usb_kill_urb(port->interrupt_in_urb);
} /* belkin_sa_close */
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 33613b008557..6b5ec2af32ad 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -149,7 +149,7 @@ static void cyberjack_shutdown (struct usb_serial *serial)
dbg("%s", __FUNCTION__);
for (i=0; i < serial->num_ports; ++i) {
- usb_unlink_urb (serial->port[i]->interrupt_in_urb);
+ usb_kill_urb(serial->port[i]->interrupt_in_urb);
/* My special items, the standard routines free my urbs */
kfree(usb_get_serial_port_data(serial->port[i]));
usb_set_serial_port_data(serial->port[i], NULL);
@@ -189,8 +189,8 @@ static void cyberjack_close (struct usb_serial_port *port, struct file *filp)
if (port->serial->dev) {
/* shutdown any bulk reads that might be going on */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->write_urb);
+ usb_kill_urb(port->read_urb);
}
}
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 3ac69f2c2510..b904bfcbffcf 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1615,7 +1615,7 @@ dbg( "digi_close: TOP: port=%d, open_count=%d", priv->dp_port_num, port->open_co
DIGI_CLOSE_TIMEOUT );
/* shutdown any outstanding bulk writes */
- usb_unlink_urb (port->write_urb);
+ usb_kill_urb(port->write_urb);
}
tty->closing = 0;
@@ -1754,8 +1754,8 @@ dbg( "digi_shutdown: TOP, in_interrupt()=%ld", in_interrupt() );
/* stop reads and writes on all ports */
for( i=0; i<serial->type->num_ports+1; i++ ) {
- usb_unlink_urb( serial->port[i]->read_urb );
- usb_unlink_urb( serial->port[i]->write_urb );
+ usb_kill_urb(serial->port[i]->read_urb);
+ usb_kill_urb(serial->port[i]->write_urb);
}
/* free the private data structures for all ports */
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 251a50fdab86..e94113e0dd43 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -185,7 +185,7 @@ static void empeg_close (struct usb_serial_port *port, struct file * filp)
dbg("%s - port %d", __FUNCTION__, port->number);
/* shutdown our bulk read */
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->read_urb);
/* Uncomment the following line if you want to see some statistics in your syslog */
/* dev_info (&port->dev, "Bytes In = %d Bytes Out = %d\n", bytes_in, bytes_out); */
}
@@ -406,7 +406,7 @@ static void empeg_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
static void empeg_throttle (struct usb_serial_port *port)
{
dbg("%s - port %d", __FUNCTION__, port->number);
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->read_urb);
}
@@ -579,10 +579,10 @@ static void __exit empeg_exit (void)
for (i = 0; i < NUM_URBS; ++i) {
if (write_urb_pool[i]) {
- /* FIXME - uncomment the following usb_unlink_urb call when
+ /* FIXME - uncomment the following usb_kill_urb call when
* the host controllers get fixed to set urb->dev = NULL after
* the urb is finished. Otherwise this call oopses. */
- /* usb_unlink_urb(write_urb_pool[i]); */
+ /* usb_kill_urb(write_urb_pool[i]); */
if (write_urb_pool[i]->transfer_buffer)
kfree(write_urb_pool[i]->transfer_buffer);
usb_free_urb (write_urb_pool[i]);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 9073ea84d8d3..b1a985470e42 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -368,6 +368,10 @@ static struct usb_device_id id_table_8U232AM [] = {
{ USB_DEVICE_VER(INTREPID_VID, INTREPID_NEOVI_PID, 0, 0x3ff) },
{ USB_DEVICE_VER(FALCOM_VID, FALCOM_TWIST_PID, 0, 0x3ff) },
{ USB_DEVICE_VER(FTDI_VID, FTDI_SUUNTO_SPORTS_PID, 0, 0x3ff) },
+ { USB_DEVICE_VER(FTDI_RM_VID, FTDI_RMCANVIEW_PID, 0, 0x3ff) },
+ { USB_DEVICE_VER(BANDB_VID, BANDB_USOTL4_PID, 0, 0x3ff) },
+ { USB_DEVICE_VER(BANDB_VID, BANDB_USTL4_PID, 0, 0x3ff) },
+ { USB_DEVICE_VER(BANDB_VID, BANDB_USO9ML2_PID, 0, 0x3ff) },
{ } /* Terminating entry */
};
@@ -478,6 +482,10 @@ static struct usb_device_id id_table_FT232BM [] = {
{ USB_DEVICE_VER(INTREPID_VID, INTREPID_NEOVI_PID, 0x400, 0xffff) },
{ USB_DEVICE_VER(FALCOM_VID, FALCOM_TWIST_PID, 0x400, 0xffff) },
{ USB_DEVICE_VER(FTDI_VID, FTDI_SUUNTO_SPORTS_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_RM_VID, FTDI_RMCANVIEW_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(BANDB_VID, BANDB_USOTL4_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(BANDB_VID, BANDB_USTL4_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(BANDB_VID, BANDB_USO9ML2_PID, 0x400, 0xffff) },
{ } /* Terminating entry */
};
@@ -595,6 +603,10 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(INTREPID_VID, INTREPID_NEOVI_PID) },
{ USB_DEVICE(FALCOM_VID, FALCOM_TWIST_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SUUNTO_SPORTS_PID) },
+ { USB_DEVICE(FTDI_RM_VID, FTDI_RMCANVIEW_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
{ } /* Terminating entry */
};
@@ -1479,16 +1491,8 @@ static void ftdi_close (struct usb_serial_port *port, struct file *filp)
} /* Note change no line if hupcl is off */
/* shutdown our bulk read */
- if (port->read_urb) {
- if (usb_unlink_urb (port->read_urb) < 0) {
- /* Generally, this isn't an error. If the previous
- read bulk callback occurred (or is about to occur)
- while the port was being closed or was throtted
- (and is still throttled), the read urb will not
- have been submitted. */
- dbg("%s - failed to unlink read urb (generally not an error)", __FUNCTION__);
- }
- }
+ if (port->read_urb)
+ usb_kill_urb(port->read_urb);
} /* ftdi_close */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 232213b02860..e7a7e0aba4f2 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -225,6 +225,21 @@
*/
#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */
+/*
+ * Definitions for B&B Electronics products.
+ */
+#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
+#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
+#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
+#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
+
+/*
+ * RM Michaelides CANview USB (http://www.rmcan.com)
+ * CAN filedbus interface adapter, addad by port GmbH www.port.de)
+ */
+#define FTDI_RM_VID 0x0403 /* Vendor Id */
+#define FTDI_RMCANVIEW_PID 0xfd60 /* Product Id */
+
/* Commands */
#define FTDI_SIO_RESET 0 /* Reset the port */
#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 196fea084f21..c88885f8ca05 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -147,9 +147,9 @@ static void generic_cleanup (struct usb_serial_port *port)
if (serial->dev) {
/* shutdown any bulk reads that might be going on */
if (serial->num_bulk_out)
- usb_unlink_urb (port->write_urb);
+ usb_kill_urb(port->write_urb);
if (serial->num_bulk_in)
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->read_urb);
}
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 1ef47613b958..b5d18c699a0e 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1238,7 +1238,7 @@ static void edge_close (struct usb_serial_port *port, struct file * filp)
edge_port->openPending = FALSE;
if (edge_port->write_urb) {
- usb_unlink_urb (edge_port->write_urb);
+ usb_kill_urb(edge_port->write_urb);
}
if (edge_port->write_urb) {
@@ -2443,8 +2443,8 @@ static int write_cmd_usb (struct edgeport_port *edge_port, unsigned char *buffer
if (status) {
/* something went wrong */
dbg("%s - usb_submit_urb(write bulk) failed", __FUNCTION__);
- usb_unlink_urb (urb);
- usb_free_urb (urb);
+ usb_kill_urb(urb);
+ usb_free_urb(urb);
return status;
}
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index ace42c870727..359d3c0b8a15 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1972,7 +1972,7 @@ static void edge_close (struct usb_serial_port *port, struct file * filp)
/* chase the port close */
TIChasePort (edge_port);
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->read_urb);
/* assuming we can still talk to the device,
* send a close port command to it */
@@ -1987,7 +1987,7 @@ static void edge_close (struct usb_serial_port *port, struct file * filp)
--edge_port->edge_serial->num_ports_open;
if (edge_port->edge_serial->num_ports_open <= 0) {
/* last port is now closed, let's shut down our interrupt urb */
- usb_unlink_urb (port->serial->port[0]->interrupt_in_urb);
+ usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
edge_port->edge_serial->num_ports_open = 0;
}
edge_port->close_pending = 0;
@@ -2121,7 +2121,7 @@ static void edge_throttle (struct usb_serial_port *port)
status = TIClearRts (edge_port);
}
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->read_urb);
}
static void edge_unthrottle (struct usb_serial_port *port)
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 8fbce2732c51..1a748b515112 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -288,8 +288,8 @@ static void ipaq_close(struct usb_serial_port *port, struct file *filp)
/*
* shut down bulk read and write
*/
- usb_unlink_urb(port->write_urb);
- usb_unlink_urb(port->read_urb);
+ usb_kill_urb(port->write_urb);
+ usb_kill_urb(port->read_urb);
ipaq_destroy_lists(port);
kfree(priv);
usb_set_serial_port_data(port, NULL);
@@ -419,9 +419,8 @@ static void ipaq_write_gather(struct usb_serial_port *port)
struct ipaq_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
int count, room;
- struct ipaq_packet *pkt;
+ struct ipaq_packet *pkt, *tmp;
struct urb *urb = port->write_urb;
- struct list_head *tmp;
if (urb->status == -EINPROGRESS) {
/* Should never happen */
@@ -429,9 +428,7 @@ static void ipaq_write_gather(struct usb_serial_port *port)
return;
}
room = URBDATA_SIZE;
- for (tmp = priv->queue.next; tmp != &priv->queue;) {
- pkt = list_entry(tmp, struct ipaq_packet, list);
- tmp = tmp->next;
+ list_for_each_entry_safe(pkt, tmp, &priv->queue, list) {
count = min(room, (int)(pkt->len - pkt->written));
memcpy(urb->transfer_buffer + (URBDATA_SIZE - room),
pkt->data + pkt->written, count);
@@ -503,22 +500,16 @@ static int ipaq_chars_in_buffer(struct usb_serial_port *port)
static void ipaq_destroy_lists(struct usb_serial_port *port)
{
struct ipaq_private *priv = usb_get_serial_port_data(port);
- struct list_head *tmp;
- struct ipaq_packet *pkt;
+ struct ipaq_packet *pkt, *tmp;
- for (tmp = priv->queue.next; tmp != &priv->queue;) {
- pkt = list_entry(tmp, struct ipaq_packet, list);
- tmp = tmp->next;
+ list_for_each_entry_safe(pkt, tmp, &priv->queue, list) {
kfree(pkt->data);
kfree(pkt);
}
- for (tmp = priv->freelist.next; tmp != &priv->freelist;) {
- pkt = list_entry(tmp, struct ipaq_packet, list);
- tmp = tmp->next;
+ list_for_each_entry_safe(pkt, tmp, &priv->freelist, list) {
kfree(pkt->data);
kfree(pkt);
}
- return;
}
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
new file mode 100644
index 000000000000..2fc04f905a5c
--- /dev/null
+++ b/drivers/usb/serial/ipw.c
@@ -0,0 +1,496 @@
+/*
+ * IPWireless 3G UMTS TDD Modem driver (USB connected)
+ *
+ * Copyright (C) 2004 Roelf Diedericks <roelfd@inet.co.za>
+ * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * All information about the device was acquired using SnoopyPro
+ * on MSFT's O/S, and examing the MSFT drivers' debug output
+ * (insanely left _on_ in the enduser version)
+ *
+ * It was written out of frustration with the IPWireless USB modem
+ * supplied by Axity3G/Sentech South Africa not supporting
+ * Linux whatsoever.
+ *
+ * Nobody provided any proprietary information that was not already
+ * available for this device.
+ *
+ * The modem adheres to the "3GPP TS 27.007 AT command set for 3G
+ * User Equipment (UE)" standard, available from
+ * http://www.3gpp.org/ftp/Specs/html-info/27007.htm
+ *
+ * The code was only tested the IPWireless handheld modem distributed
+ * in South Africa by Sentech.
+ *
+ * It may work for Woosh Inc in .nz too, as it appears they use the
+ * same kit.
+ *
+ * There is still some work to be done in terms of handling
+ * DCD, DTR, RTS, CTS which are currently faked.
+ * It's good enough for PPP at this point. It's based off all kinds of
+ * code found in usb/serial and usb/class
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/usb.h>
+#include <linux/usb.h>
+#include <asm/uaccess.h>
+#include "usb-serial.h"
+
+/*
+ * Version Information
+ */
+#define DRIVER_VERSION "v0.3"
+#define DRIVER_AUTHOR "Roelf Diedericks"
+#define DRIVER_DESC "IPWireless tty driver"
+
+#define IPW_TTY_MAJOR 240 /* real device node major id, experimental range */
+#define IPW_TTY_MINORS 256 /* we support 256 devices, dunno why, it'd be insane :) */
+
+#define USB_IPW_MAGIC 0x6d02 /* magic number for ipw struct */
+
+
+/* Message sizes */
+#define EVENT_BUFFER_SIZE 0xFF
+#define CHAR2INT16(c1,c0) (((u32)((c1) & 0xff) << 8) + (u32)((c0) & 0xff))
+#define NUM_BULK_URBS 24
+#define NUM_CONTROL_URBS 16
+
+/* vendor/product pairs that are known work with this driver*/
+#define IPW_VID 0x0bc3
+#define IPW_PID 0x0001
+
+
+/* Vendor commands: */
+
+/* baud rates */
+enum {
+ ipw_sio_b256000 = 0x000e,
+ ipw_sio_b128000 = 0x001d,
+ ipw_sio_b115200 = 0x0020,
+ ipw_sio_b57600 = 0x0040,
+ ipw_sio_b56000 = 0x0042,
+ ipw_sio_b38400 = 0x0060,
+ ipw_sio_b19200 = 0x00c0,
+ ipw_sio_b14400 = 0x0100,
+ ipw_sio_b9600 = 0x0180,
+ ipw_sio_b4800 = 0x0300,
+ ipw_sio_b2400 = 0x0600,
+ ipw_sio_b1200 = 0x0c00,
+ ipw_sio_b600 = 0x1800
+};
+
+/* data bits */
+#define ipw_dtb_7 0x700
+#define ipw_dtb_8 0x810 // ok so the define is misleading, I know, but forces 8,n,1
+ // I mean, is there a point to any other setting these days? :)
+
+/* usb control request types : */
+#define IPW_SIO_RXCTL 0x00 // control bulk rx channel transmissions, value=1/0 (on/off)
+#define IPW_SIO_SET_BAUD 0x01 // set baud, value=requested ipw_sio_bxxxx
+#define IPW_SIO_SET_LINE 0x03 // set databits, parity. value=ipw_dtb_x
+#define IPW_SIO_SET_PIN 0x03 // set/clear dtr/rts value=ipw_pin_xxx
+#define IPW_SIO_POLL 0x08 // get serial port status byte, call with value=0
+#define IPW_SIO_INIT 0x11 // initializes ? value=0 (appears as first thing todo on open)
+#define IPW_SIO_PURGE 0x12 // purge all transmissions?, call with value=numchar_to_purge
+#define IPW_SIO_HANDFLOW 0x13 // set xon/xoff limits value=0, and a buffer of 0x10 bytes
+#define IPW_SIO_SETCHARS 0x13 // set the flowcontrol special chars, value=0, buf=6 bytes,
+ // last 2 bytes contain flowcontrol chars e.g. 00 00 00 00 11 13
+
+/* values used for request IPW_SIO_SET_PIN */
+#define IPW_PIN_SETDTR 0x101
+#define IPW_PIN_SETRTS 0x202
+#define IPW_PIN_CLRDTR 0x100
+#define IPW_PIN_CLRRTS 0x200 // unconfirmed
+
+/* values used for request IPW_SIO_RXCTL */
+#define IPW_RXBULK_ON 1
+#define IPW_RXBULK_OFF 0
+
+/* various 16 byte hardcoded transferbuffers used by flow control */
+#define IPW_BYTES_FLOWINIT { 0x01, 0, 0, 0, 0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* Interpretation of modem status lines */
+/* These need sorting out by individually connecting pins and checking
+ * results. FIXME!
+ * When data is being sent we see 0x30 in the lower byte; this must
+ * contain DSR and CTS ...
+ */
+#define IPW_DSR ((1<<4) | (1<<5))
+#define IPW_CTS ((1<<5) | (1<<4))
+
+#define IPW_WANTS_TO_SEND 0x30
+//#define IPW_DTR /* Data Terminal Ready */
+//#define IPW_CTS /* Clear To Send */
+//#define IPW_CD /* Carrier Detect */
+//#define IPW_DSR /* Data Set Ready */
+//#define IPW_RxD /* Receive pin */
+
+//#define IPW_LE
+//#define IPW_RTS
+//#define IPW_ST
+//#define IPW_SR
+//#define IPW_RI /* Ring Indicator */
+
+static struct usb_device_id usb_ipw_ids[] = {
+ { USB_DEVICE(IPW_VID, IPW_PID) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(usb, usb_ipw_ids);
+
+static struct usb_driver usb_ipw_driver = {
+ .owner = THIS_MODULE,
+ .name = "ipwtty",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = usb_ipw_ids,
+};
+
+static int debug;
+
+static void ipw_read_bulk_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct usb_serial_port *port = urb->context;
+ unsigned char *data = urb->transfer_buffer;
+ struct tty_struct *tty;
+ int i;
+ int result;
+
+ dbg("%s - port %d", __FUNCTION__, port->number);
+
+ if (urb->status) {
+ dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ return;
+ }
+
+ usb_serial_debug_data(debug, &port->dev, __FUNCTION__, urb->actual_length, data);
+
+ tty = port->tty;
+ if (tty && urb->actual_length) {
+ for (i = 0; i < urb->actual_length ; ++i) {
+ /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
+ if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
+ tty_flip_buffer_push(tty);
+ }
+ /* this doesn't actually push the data through unless tty->low_latency is set */
+ tty_insert_flip_char(tty, data[i], 0);
+ }
+ tty_flip_buffer_push(tty);
+ }
+
+ /* Continue trying to always read */
+ usb_fill_bulk_urb (port->read_urb, port->serial->dev,
+ usb_rcvbulkpipe(port->serial->dev,
+ port->bulk_in_endpointAddress),
+ port->read_urb->transfer_buffer,
+ port->read_urb->transfer_buffer_length,
+ ipw_read_bulk_callback, port);
+ result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ if (result)
+ dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __FUNCTION__, result);
+ return;
+}
+
+static int ipw_open(struct usb_serial_port *port, struct file *filp)
+{
+ struct usb_device *dev = port->serial->dev;
+ u8 buf_flow_static[16] = IPW_BYTES_FLOWINIT;
+ u8 *buf_flow_init;
+ int result;
+
+ dbg("%s", __FUNCTION__);
+
+ buf_flow_init = kmalloc(16, GFP_KERNEL);
+ if (!buf_flow_init)
+ return -ENOMEM;
+ memcpy(buf_flow_init, buf_flow_static, 16);
+
+ if (port->tty)
+ port->tty->low_latency = 1;
+
+ /* --1: Tell the modem to initialize (we think) From sniffs this is always the
+ * first thing that gets sent to the modem during opening of the device */
+ dbg("%s: Sending SIO_INIT (we guess)",__FUNCTION__);
+ result = usb_control_msg(dev, usb_sndctrlpipe(dev,0),
+ IPW_SIO_INIT,
+ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ 0,
+ 0, /* index */
+ NULL,
+ 0,
+ 100*HZ);
+ if (result < 0)
+ dev_err(&port->dev, "Init of modem failed (error = %d)", result);
+
+ /* reset the bulk pipes */
+ usb_clear_halt(dev, usb_rcvbulkpipe(dev, port->bulk_in_endpointAddress));
+ usb_clear_halt(dev, usb_sndbulkpipe(dev, port->bulk_out_endpointAddress));
+
+ /*--2: Start reading from the device */
+ dbg("%s: setting up bulk read callback",__FUNCTION__);
+ usb_fill_bulk_urb(port->read_urb, dev,
+ usb_rcvbulkpipe(dev, port->bulk_in_endpointAddress),
+ port->bulk_in_buffer,
+ port->bulk_in_size,
+ ipw_read_bulk_callback, port);
+ result = usb_submit_urb(port->read_urb, GFP_KERNEL);
+ if (result < 0)
+ dbg("%s - usb_submit_urb(read bulk) failed with status %d", __FUNCTION__, result);
+
+ /*--3: Tell the modem to open the floodgates on the rx bulk channel */
+ dbg("%s:asking modem for RxRead (RXBULK_ON)",__FUNCTION__);
+ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ IPW_SIO_RXCTL,
+ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ IPW_RXBULK_ON,
+ 0, /* index */
+ NULL,
+ 0,
+ 100*HZ);
+ if (result < 0)
+ dev_err(&port->dev, "Enabling bulk RxRead failed (error = %d)", result);
+
+ /*--4: setup the initial flowcontrol */
+ dbg("%s:setting init flowcontrol (%s)",__FUNCTION__,buf_flow_init);
+ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ IPW_SIO_HANDFLOW,
+ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ 0,
+ 0,
+ buf_flow_init,
+ 0x10,
+ 200*HZ);
+ if (result < 0)
+ dev_err(&port->dev, "initial flowcontrol failed (error = %d)", result);
+
+
+ /*--5: raise the dtr */
+ dbg("%s:raising dtr",__FUNCTION__);
+ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ IPW_SIO_SET_PIN,
+ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ IPW_PIN_SETDTR,
+ 0,
+ NULL,
+ 0,
+ 200*HZ);
+ if (result < 0)
+ dev_err(&port->dev, "setting dtr failed (error = %d)", result);
+
+ /*--6: raise the rts */
+ dbg("%s:raising rts",__FUNCTION__);
+ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ IPW_SIO_SET_PIN,
+ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ IPW_PIN_SETRTS,
+ 0,
+ NULL,
+ 0,
+ 200*HZ);
+ if (result < 0)
+ dev_err(&port->dev, "setting dtr failed (error = %d)", result);
+
+ kfree(buf_flow_init);
+ return 0;
+}
+
+static void ipw_close(struct usb_serial_port *port, struct file * filp)
+{
+ struct usb_device *dev = port->serial->dev;
+ int result;
+
+ if (tty_hung_up_p(filp)) {
+ dbg("%s: tty_hung_up_p ...", __FUNCTION__);
+ return;
+ }
+
+ /*--1: drop the dtr */
+ dbg("%s:dropping dtr",__FUNCTION__);
+ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ IPW_SIO_SET_PIN,
+ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ IPW_PIN_CLRDTR,
+ 0,
+ NULL,
+ 0,
+ 200*HZ);
+ if (result < 0)
+ dev_err(&port->dev, "dropping dtr failed (error = %d)", result);
+
+ /*--2: drop the rts */
+ dbg("%s:dropping rts",__FUNCTION__);
+ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ IPW_SIO_SET_PIN, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ IPW_PIN_CLRRTS,
+ 0,
+ NULL,
+ 0,
+ 200*HZ);
+ if (result < 0)
+ dev_err(&port->dev, "dropping rts failed (error = %d)", result);
+
+
+ /*--3: purge */
+ dbg("%s:sending purge",__FUNCTION__);
+ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ IPW_SIO_PURGE, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ 0x03,
+ 0,
+ NULL,
+ 0,
+ 200*HZ);
+ if (result < 0)
+ dev_err(&port->dev, "purge failed (error = %d)", result);
+
+
+ /* send RXBULK_off (tell modem to stop transmitting bulk data on rx chan) */
+ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ IPW_SIO_RXCTL,
+ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ IPW_RXBULK_OFF,
+ 0, /* index */
+ NULL,
+ 0,
+ 100*HZ);
+
+ if (result < 0)
+ dev_err(&port->dev, "Disabling bulk RxRead failed (error = %d)", result);
+
+ /* shutdown any in-flight urbs that we know about */
+ usb_kill_urb(port->read_urb);
+ usb_kill_urb(port->write_urb);
+}
+
+static void ipw_write_bulk_callback(struct urb *urb, struct pt_regs *regs)
+{
+ struct usb_serial_port *port = urb->context;
+
+ dbg("%s", __FUNCTION__);
+
+ if (urb->status)
+ dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+
+ schedule_work(&port->work);
+}
+
+static int ipw_write(struct usb_serial_port *port, int from_user, const unsigned char *buf, int count)
+{
+ struct usb_device *dev = port->serial->dev;
+ int ret;
+
+ dbg("%s: TOP: count=%d, from_user=%d, in_interrupt=%ld", __FUNCTION__,
+ count, from_user, in_interrupt() );
+
+ if (count == 0) {
+ dbg("%s - write request of 0 bytes", __FUNCTION__);
+ return 0;
+ }
+
+ /* Racy and broken, FIXME properly! */
+ if (port->write_urb->status == -EINPROGRESS)
+ return 0;
+
+ count = min(count, port->bulk_out_size);
+ if (from_user) {
+ if (copy_from_user(port->bulk_out_buffer, buf, count))
+ return -EFAULT;
+ } else {
+ memcpy(port->bulk_out_buffer, buf, count);
+ }
+
+ dbg("%s count now:%d", __FUNCTION__, count);
+
+ usb_fill_bulk_urb(port->write_urb, dev,
+ usb_sndbulkpipe(dev, port->bulk_out_endpointAddress),
+ port->write_urb->transfer_buffer,
+ count,
+ ipw_write_bulk_callback,
+ port);
+
+ ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
+ if (ret != 0) {
+ dbg("%s - usb_submit_urb(write bulk) failed with error = %d", __FUNCTION__, ret);
+ return ret;
+ }
+
+ dbg("%s returning %d", __FUNCTION__, count);
+ return count;
+}
+
+static int ipw_probe(struct usb_serial_port *port)
+{
+ return 0;
+}
+
+static int ipw_disconnect(struct usb_serial_port *port)
+{
+ usb_set_serial_port_data(port, NULL);
+ return 0;
+}
+
+static struct usb_serial_device_type ipw_device = {
+ .owner = THIS_MODULE,
+ .name = "IPWireless converter",
+ .short_name = "ipw",
+ .id_table = usb_ipw_ids,
+ .num_interrupt_in = NUM_DONT_CARE,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
+ .num_ports = 1,
+ .open = ipw_open,
+ .close = ipw_close,
+ .port_probe = ipw_probe,
+ .port_remove = ipw_disconnect,
+ .write = ipw_write,
+ .write_bulk_callback = ipw_write_bulk_callback,
+ .read_bulk_callback = ipw_read_bulk_callback,
+};
+
+
+
+int usb_ipw_init(void)
+{
+ int retval;
+
+ retval = usb_serial_register(&ipw_device);
+ if (retval)
+ return retval;
+ retval = usb_register(&usb_ipw_driver);
+ if (retval) {
+ usb_serial_deregister(&ipw_device);
+ return retval;
+ }
+ info(DRIVER_DESC " " DRIVER_VERSION);
+ return 0;
+}
+
+void usb_ipw_exit(void)
+{
+ usb_deregister(&usb_ipw_driver);
+ usb_serial_deregister(&ipw_device);
+}
+
+module_init(usb_ipw_init);
+module_exit(usb_ipw_exit);
+
+/* Module information */
+MODULE_AUTHOR( DRIVER_AUTHOR );
+MODULE_DESCRIPTION( DRIVER_DESC );
+MODULE_LICENSE("GPL");
+
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 3d1571695bde..c2c536f194e0 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -322,7 +322,7 @@ static void ir_close (struct usb_serial_port *port, struct file * filp)
dbg("%s - port %d", __FUNCTION__, port->number);
/* shutdown our bulk read */
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->read_urb);
}
static int ir_write (struct usb_serial_port *port, int from_user, const unsigned char *buf, int count)
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index cd441d783cbe..0a979e208167 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -285,7 +285,7 @@ static void keyspan_pda_rx_throttle (struct usb_serial_port *port)
upon the device too. */
dbg("keyspan_pda_rx_throttle port %d", port->number);
- usb_unlink_urb(port->interrupt_in_urb);
+ usb_kill_urb(port->interrupt_in_urb);
}
@@ -706,8 +706,8 @@ static void keyspan_pda_close(struct usb_serial_port *port, struct file *filp)
keyspan_pda_set_modem_info(serial, 0);
/* shutdown our bulk reads and writes */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->interrupt_in_urb);
+ usb_kill_urb(port->write_urb);
+ usb_kill_urb(port->interrupt_in_urb);
}
}
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index c5207d19401a..9e60b3476775 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -336,12 +336,12 @@ static void klsi_105_shutdown (struct usb_serial *serial)
for (j = 0; j < NUM_URBS; j++) {
if (write_urbs[j]) {
/* FIXME - uncomment the following
- * usb_unlink_urb call when the host
+ * usb_kill_urb call when the host
* controllers get fixed to set
* urb->dev = NULL after the urb is
* finished. Otherwise this call
* oopses. */
- /* usb_unlink_urb(write_urbs[j]); */
+ /* usb_kill_urb(write_urbs[j]); */
if (write_urbs[j]->transfer_buffer)
kfree(write_urbs[j]->transfer_buffer);
usb_free_urb (write_urbs[j]);
@@ -467,12 +467,12 @@ static void klsi_105_close (struct usb_serial_port *port, struct file *filp)
err("Disabling read failed (error = %d)", rc);
/* shutdown our bulk reads and writes */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->write_urb);
+ usb_kill_urb(port->read_urb);
/* unlink our write pool */
/* FIXME */
/* wgg - do I need this? I think so. */
- usb_unlink_urb (port->interrupt_in_urb);
+ usb_kill_urb(port->interrupt_in_urb);
info("kl5kusb105 port stats: %ld bytes in, %ld bytes out", priv->bytes_in, priv->bytes_out);
} /* klsi_105_close */
@@ -994,7 +994,7 @@ static int klsi_105_ioctl (struct usb_serial_port *port, struct file * file,
static void klsi_105_throttle (struct usb_serial_port *port)
{
dbg("%s - port %d", __FUNCTION__, port->number);
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->read_urb);
}
static void klsi_105_unthrottle (struct usb_serial_port *port)
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index b479916ce269..33539874bd0c 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -350,14 +350,13 @@ static void kobil_close (struct usb_serial_port *port, struct file *filp)
{
dbg("%s - port %d", __FUNCTION__, port->number);
- if (port->write_urb){
- usb_unlink_urb( port->write_urb );
+ if (port->write_urb) {
+ usb_kill_urb(port->write_urb);
usb_free_urb( port->write_urb );
port->write_urb = NULL;
}
- if (port->interrupt_in_urb){
- usb_unlink_urb (port->interrupt_in_urb);
- }
+ if (port->interrupt_in_urb)
+ usb_kill_urb(port->interrupt_in_urb);
}
@@ -458,9 +457,8 @@ static int kobil_write (struct usb_serial_port *port, int from_user,
((priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID) && (priv->filled > 3) && (priv->filled >= (priv->buf[2] + 4))) ) {
// stop reading (except TWIN and KAAN SIM)
- if ( (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID) || (priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) ) {
- usb_unlink_urb( port->interrupt_in_urb );
- }
+ if ( (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID) || (priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) )
+ usb_kill_urb(port->interrupt_in_urb);
todo = priv->filled - priv->cur_pos;
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 212906c74f25..1d7c682aed7a 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -480,9 +480,9 @@ static void mct_u232_close (struct usb_serial_port *port, struct file *filp)
if (port->serial->dev) {
/* shutdown our urbs */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->read_urb);
- usb_unlink_urb (port->interrupt_in_urb);
+ usb_kill_urb(port->write_urb);
+ usb_kill_urb(port->read_urb);
+ usb_kill_urb(port->interrupt_in_urb);
}
} /* mct_u232_close */
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 371aa2e8e3f1..7032d5f7c6e1 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -183,8 +183,8 @@ static void omninet_close (struct usb_serial_port *port, struct file * filp)
dbg("%s - port %d", __FUNCTION__, port->number);
wport = serial->port[1];
- usb_unlink_urb(wport->write_urb);
- usb_unlink_urb(port->read_urb);
+ usb_kill_urb(wport->write_urb);
+ usb_kill_urb(port->read_urb);
od = usb_get_serial_port_data(port);
if (od)
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index c61e56829a96..5be9c370313b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -55,11 +55,26 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.11"
+#define DRIVER_VERSION "v0.12"
#define DRIVER_DESC "Prolific PL2303 USB to serial adaptor driver"
static int debug;
+#define PL2303_CLOSING_WAIT (30*HZ)
+
+#define PL2303_BUF_SIZE 1024
+#define PL2303_TMP_BUF_SIZE 1024
+
+static char pl2303_tmp_buf[PL2303_TMP_BUF_SIZE];
+static DECLARE_MUTEX(pl2303_tmp_buf_sem);
+
+struct pl2303_buf {
+ unsigned int buf_size;
+ char *buf_buf;
+ char *buf_get;
+ char *buf_put;
+};
+
static struct usb_device_id id_table [] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
@@ -134,12 +149,24 @@ static void pl2303_read_bulk_callback (struct urb *urb, struct pt_regs *regs);
static void pl2303_write_bulk_callback (struct urb *urb, struct pt_regs *regs);
static int pl2303_write (struct usb_serial_port *port, int from_user,
const unsigned char *buf, int count);
+static void pl2303_send (struct usb_serial_port *port);
+static int pl2303_write_room(struct usb_serial_port *port);
+static int pl2303_chars_in_buffer(struct usb_serial_port *port);
static void pl2303_break_ctl(struct usb_serial_port *port,int break_state);
static int pl2303_tiocmget (struct usb_serial_port *port, struct file *file);
static int pl2303_tiocmset (struct usb_serial_port *port, struct file *file,
unsigned int set, unsigned int clear);
static int pl2303_startup (struct usb_serial *serial);
static void pl2303_shutdown (struct usb_serial *serial);
+static struct pl2303_buf *pl2303_buf_alloc(unsigned int size);
+static void pl2303_buf_free(struct pl2303_buf *pb);
+static void pl2303_buf_clear(struct pl2303_buf *pb);
+static unsigned int pl2303_buf_data_avail(struct pl2303_buf *pb);
+static unsigned int pl2303_buf_space_avail(struct pl2303_buf *pb);
+static unsigned int pl2303_buf_put(struct pl2303_buf *pb, const char *buf,
+ unsigned int count);
+static unsigned int pl2303_buf_get(struct pl2303_buf *pb, char *buf,
+ unsigned int count);
/* All of the device info needed for the PL2303 SIO serial converter */
@@ -162,6 +189,8 @@ static struct usb_serial_device_type pl2303_device = {
.read_bulk_callback = pl2303_read_bulk_callback,
.read_int_callback = pl2303_read_int_callback,
.write_bulk_callback = pl2303_write_bulk_callback,
+ .write_room = pl2303_write_room,
+ .chars_in_buffer = pl2303_chars_in_buffer,
.attach = pl2303_startup,
.shutdown = pl2303_shutdown,
};
@@ -174,6 +203,8 @@ enum pl2303_type {
struct pl2303_private {
spinlock_t lock;
+ struct pl2303_buf *buf;
+ int write_urb_in_use;
wait_queue_head_t delta_msr_wait;
u8 line_control;
u8 line_status;
@@ -201,14 +232,28 @@ static int pl2303_startup (struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
priv = kmalloc (sizeof (struct pl2303_private), GFP_KERNEL);
if (!priv)
- return -ENOMEM;
+ goto cleanup;
memset (priv, 0x00, sizeof (struct pl2303_private));
spin_lock_init(&priv->lock);
+ priv->buf = pl2303_buf_alloc(PL2303_BUF_SIZE);
+ if (priv->buf == NULL) {
+ kfree(priv);
+ goto cleanup;
+ }
init_waitqueue_head(&priv->delta_msr_wait);
priv->type = type;
usb_set_serial_port_data(serial->port[i], priv);
}
return 0;
+
+cleanup:
+ for (--i; i>=0; --i) {
+ priv = usb_get_serial_port_data(serial->port[i]);
+ pl2303_buf_free(priv->buf);
+ kfree(priv);
+ usb_set_serial_port_data(serial->port[i], NULL);
+ }
+ return -ENOMEM;
}
static int set_control_lines (struct usb_device *dev, u8 value)
@@ -224,40 +269,109 @@ static int set_control_lines (struct usb_device *dev, u8 value)
static int pl2303_write (struct usb_serial_port *port, int from_user, const unsigned char *buf, int count)
{
- int result;
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
dbg("%s - port %d, %d bytes", __FUNCTION__, port->number, count);
if (!count)
return count;
- if (port->write_urb->status == -EINPROGRESS) {
- dbg("%s - already writing", __FUNCTION__);
- return 0;
- }
-
- count = (count > port->bulk_out_size) ? port->bulk_out_size : count;
if (from_user) {
- if (copy_from_user (port->write_urb->transfer_buffer, buf, count))
+ if (count > PL2303_TMP_BUF_SIZE)
+ count = PL2303_TMP_BUF_SIZE;
+ down(&pl2303_tmp_buf_sem);
+ if (copy_from_user(pl2303_tmp_buf, buf, count)) {
+ up(&pl2303_tmp_buf_sem);
return -EFAULT;
- } else {
- memcpy (port->write_urb->transfer_buffer, buf, count);
+ }
+ buf = pl2303_tmp_buf;
}
-
+
+ spin_lock_irqsave(&priv->lock, flags);
+ count = pl2303_buf_put(priv->buf, buf, count);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (from_user)
+ up(&pl2303_tmp_buf_sem);
+
+ pl2303_send(port);
+
+ return count;
+}
+
+static void pl2303_send(struct usb_serial_port *port)
+{
+ int count, result;
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ dbg("%s - port %d", __FUNCTION__, port->number);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (priv->write_urb_in_use) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+
+ count = pl2303_buf_get(priv->buf, port->write_urb->transfer_buffer,
+ port->bulk_out_size);
+
+ if (count == 0) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+
+ priv->write_urb_in_use = 1;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
usb_serial_debug_data(debug, &port->dev, __FUNCTION__, count, port->write_urb->transfer_buffer);
port->write_urb->transfer_buffer_length = count;
port->write_urb->dev = port->serial->dev;
result = usb_submit_urb (port->write_urb, GFP_ATOMIC);
- if (result)
+ if (result) {
dev_err(&port->dev, "%s - failed submitting write urb, error %d\n", __FUNCTION__, result);
- else
- result = count;
+ priv->write_urb_in_use = 0;
+ // TODO: reschedule pl2303_send
+ }
- return result;
+ schedule_work(&port->work);
}
+static int pl2303_write_room(struct usb_serial_port *port)
+{
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ int room = 0;
+ unsigned long flags;
+
+ dbg("%s - port %d", __FUNCTION__, port->number);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ room = pl2303_buf_space_avail(priv->buf);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ dbg("%s - returns %d", __FUNCTION__, room);
+ return room;
+}
+
+static int pl2303_chars_in_buffer(struct usb_serial_port *port)
+{
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ int chars = 0;
+ unsigned long flags;
+
+ dbg("%s - port %d", __FUNCTION__, port->number);
+ spin_lock_irqsave(&priv->lock, flags);
+ chars = pl2303_buf_data_avail(priv->buf);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ dbg("%s - returns %d", __FUNCTION__, chars);
+ return chars;
+}
static void pl2303_set_termios (struct usb_serial_port *port, struct termios *old_termios)
{
@@ -422,7 +536,7 @@ static void pl2303_set_termios (struct usb_serial_port *port, struct termios *ol
}
kfree (buf);
-}
+}
static int pl2303_open (struct usb_serial_port *port, struct file *filp)
{
@@ -461,7 +575,7 @@ static int pl2303_open (struct usb_serial_port *port, struct file *filp)
FISH (VENDOR_READ_REQUEST_TYPE, VENDOR_READ_REQUEST, 0x8383, 0);
SOUP (VENDOR_WRITE_REQUEST_TYPE, VENDOR_WRITE_REQUEST, 0, 1);
SOUP (VENDOR_WRITE_REQUEST_TYPE, VENDOR_WRITE_REQUEST, 1, 0);
-
+
if (priv->type == HX) {
/* HX chip */
SOUP (VENDOR_WRITE_REQUEST_TYPE, VENDOR_WRITE_REQUEST, 2, 0x44);
@@ -504,45 +618,67 @@ static int pl2303_open (struct usb_serial_port *port, struct file *filp)
static void pl2303_close (struct usb_serial_port *port, struct file *filp)
{
- struct pl2303_private *priv;
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int c_cflag;
- int result;
+ int bps;
+ long timeout;
+ wait_queue_t wait; \
dbg("%s - port %d", __FUNCTION__, port->number);
- /* shutdown our urbs */
- dbg("%s - shutting down urbs", __FUNCTION__);
- result = usb_unlink_urb (port->write_urb);
- if (result)
- dbg("%s - usb_unlink_urb (write_urb)"
- " failed with reason: %d", __FUNCTION__,
- result);
+ /* wait for data to drain from the buffer */
+ spin_lock_irqsave(&priv->lock, flags);
+ timeout = PL2303_CLOSING_WAIT;
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&port->tty->write_wait, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (pl2303_buf_data_avail(priv->buf) == 0
+ || timeout == 0 || signal_pending(current)
+ || !usb_get_intfdata(port->serial->interface)) /* disconnect */
+ break;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ timeout = schedule_timeout(timeout);
+ spin_lock_irqsave(&priv->lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&port->tty->write_wait, &wait);
+ /* clear out any remaining data in the buffer */
+ pl2303_buf_clear(priv->buf);
+ spin_unlock_irqrestore(&priv->lock, flags);
- result = usb_unlink_urb (port->read_urb);
- if (result)
- dbg("%s - usb_unlink_urb (read_urb) "
- "failed with reason: %d", __FUNCTION__,
- result);
+ /* wait for characters to drain from the device */
+ /* (this is long enough for the entire 256 byte */
+ /* pl2303 hardware buffer to drain with no flow */
+ /* control for data rates of 1200 bps or more, */
+ /* for lower rates we should really know how much */
+ /* data is in the buffer to compute a delay */
+ /* that is not unnecessarily long) */
+ bps = tty_get_baud_rate(port->tty);
+ if (bps > 1200)
+ timeout = max((HZ*2560)/bps,HZ/10);
+ else
+ timeout = 2*HZ;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
- result = usb_unlink_urb (port->interrupt_in_urb);
- if (result)
- dbg("%s - usb_unlink_urb (interrupt_in_urb)"
- " failed with reason: %d", __FUNCTION__,
- result);
+ /* shutdown our urbs */
+ dbg("%s - shutting down urbs", __FUNCTION__);
+ usb_kill_urb(port->write_urb);
+ usb_kill_urb(port->read_urb);
+ usb_kill_urb(port->interrupt_in_urb);
if (port->tty) {
c_cflag = port->tty->termios->c_cflag;
if (c_cflag & HUPCL) {
/* drop DTR and RTS */
- priv = usb_get_serial_port_data(port);
spin_lock_irqsave(&priv->lock, flags);
priv->line_control = 0;
spin_unlock_irqrestore (&priv->lock, flags);
set_control_lines (port->serial->dev, 0);
}
}
-
}
static int pl2303_tiocmset (struct usb_serial_port *port, struct file *file,
@@ -672,12 +808,17 @@ static void pl2303_break_ctl (struct usb_serial_port *port, int break_state)
static void pl2303_shutdown (struct usb_serial *serial)
{
int i;
+ struct pl2303_private *priv;
dbg("%s", __FUNCTION__);
for (i = 0; i < serial->num_ports; ++i) {
- kfree (usb_get_serial_port_data(serial->port[i]));
- usb_set_serial_port_data(serial->port[i], NULL);
+ priv = usb_get_serial_port_data(serial->port[i]);
+ if (priv) {
+ pl2303_buf_free(priv->buf);
+ kfree(priv);
+ usb_set_serial_port_data(serial->port[i], NULL);
+ }
}
}
@@ -815,11 +956,23 @@ static void pl2303_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
static void pl2303_write_bulk_callback (struct urb *urb, struct pt_regs *regs)
{
struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
int result;
dbg("%s - port %d", __FUNCTION__, port->number);
-
- if (urb->status) {
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ priv->write_urb_in_use = 0;
+ return;
+ default:
/* error in the urb, so we have to resubmit it */
dbg("%s - Overflow in write", __FUNCTION__);
dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
@@ -828,14 +981,199 @@ static void pl2303_write_bulk_callback (struct urb *urb, struct pt_regs *regs)
result = usb_submit_urb (port->write_urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev, "%s - failed resubmitting write urb, error %d\n", __FUNCTION__, result);
+ else
+ return;
+ }
- return;
+ priv->write_urb_in_use = 0;
+
+ /* send any buffered data */
+ pl2303_send(port);
+}
+
+
+/*
+ * pl2303_buf_alloc
+ *
+ * Allocate a circular buffer and all associated memory.
+ */
+
+static struct pl2303_buf *pl2303_buf_alloc(unsigned int size)
+{
+
+ struct pl2303_buf *pb;
+
+
+ if (size == 0)
+ return NULL;
+
+ pb = (struct pl2303_buf *)kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL);
+ if (pb == NULL)
+ return NULL;
+
+ pb->buf_buf = kmalloc(size, GFP_KERNEL);
+ if (pb->buf_buf == NULL) {
+ kfree(pb);
+ return NULL;
}
- schedule_work(&port->work);
+ pb->buf_size = size;
+ pb->buf_get = pb->buf_put = pb->buf_buf;
+
+ return pb;
+
+}
+
+
+/*
+ * pl2303_buf_free
+ *
+ * Free the buffer and all associated memory.
+ */
+
+static void pl2303_buf_free(struct pl2303_buf *pb)
+{
+ if (pb != NULL) {
+ if (pb->buf_buf != NULL)
+ kfree(pb->buf_buf);
+ kfree(pb);
+ }
+}
+
+
+/*
+ * pl2303_buf_clear
+ *
+ * Clear out all data in the circular buffer.
+ */
+
+static void pl2303_buf_clear(struct pl2303_buf *pb)
+{
+ if (pb != NULL)
+ pb->buf_get = pb->buf_put;
+ /* equivalent to a get of all data available */
+}
+
+
+/*
+ * pl2303_buf_data_avail
+ *
+ * Return the number of bytes of data available in the circular
+ * buffer.
+ */
+
+static unsigned int pl2303_buf_data_avail(struct pl2303_buf *pb)
+{
+ if (pb != NULL)
+ return ((pb->buf_size + pb->buf_put - pb->buf_get) % pb->buf_size);
+ else
+ return 0;
+}
+
+
+/*
+ * pl2303_buf_space_avail
+ *
+ * Return the number of bytes of space available in the circular
+ * buffer.
+ */
+
+static unsigned int pl2303_buf_space_avail(struct pl2303_buf *pb)
+{
+ if (pb != NULL)
+ return ((pb->buf_size + pb->buf_get - pb->buf_put - 1) % pb->buf_size);
+ else
+ return 0;
+}
+
+
+/*
+ * pl2303_buf_put
+ *
+ * Copy data data from a user buffer and put it into the circular buffer.
+ * Restrict to the amount of space available.
+ *
+ * Return the number of bytes copied.
+ */
+
+static unsigned int pl2303_buf_put(struct pl2303_buf *pb, const char *buf,
+ unsigned int count)
+{
+
+ unsigned int len;
+
+
+ if (pb == NULL)
+ return 0;
+
+ len = pl2303_buf_space_avail(pb);
+ if (count > len)
+ count = len;
+
+ if (count == 0)
+ return 0;
+
+ len = pb->buf_buf + pb->buf_size - pb->buf_put;
+ if (count > len) {
+ memcpy(pb->buf_put, buf, len);
+ memcpy(pb->buf_buf, buf+len, count - len);
+ pb->buf_put = pb->buf_buf + count - len;
+ } else {
+ memcpy(pb->buf_put, buf, count);
+ if (count < len)
+ pb->buf_put += count;
+ else /* count == len */
+ pb->buf_put = pb->buf_buf;
+ }
+
+ return count;
+
}
+/*
+ * pl2303_buf_get
+ *
+ * Get data from the circular buffer and copy to the given buffer.
+ * Restrict to the amount of data available.
+ *
+ * Return the number of bytes copied.
+ */
+
+static unsigned int pl2303_buf_get(struct pl2303_buf *pb, char *buf,
+ unsigned int count)
+{
+
+ unsigned int len;
+
+
+ if (pb == NULL)
+ return 0;
+
+ len = pl2303_buf_data_avail(pb);
+ if (count > len)
+ count = len;
+
+ if (count == 0)
+ return 0;
+
+ len = pb->buf_buf + pb->buf_size - pb->buf_get;
+ if (count > len) {
+ memcpy(buf, pb->buf_get, len);
+ memcpy(buf+len, pb->buf_buf, count - len);
+ pb->buf_get = pb->buf_buf + count - len;
+ } else {
+ memcpy(buf, pb->buf_get, count);
+ if (count < len)
+ pb->buf_get += count;
+ else /* count == len */
+ pb->buf_get = pb->buf_buf;
+ }
+
+ return count;
+
+}
+
static int __init pl2303_init (void)
{
int retval;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 03e8a7e1c8ee..e3b7c1dd2f4c 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -388,7 +388,7 @@ static struct usb_serial *get_free_serial (struct usb_serial *serial, int num_po
good_spot = 1;
for (j = 1; j <= num_ports-1; ++j)
- if ((serial_table[i+j]) || (i+j >= SERIAL_TTY_MINORS)) {
+ if ((i+j >= SERIAL_TTY_MINORS) || (serial_table[i+j])) {
good_spot = 0;
i += j;
break;
@@ -405,7 +405,7 @@ static struct usb_serial *get_free_serial (struct usb_serial *serial, int num_po
return NULL;
}
-static void return_serial (struct usb_serial *serial)
+static void return_serial(struct usb_serial *serial)
{
int i;
@@ -417,8 +417,6 @@ static void return_serial (struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
serial_table[serial->minor + i] = NULL;
}
-
- return;
}
static void destroy_serial(struct kref *kref)
@@ -455,15 +453,15 @@ static void destroy_serial(struct kref *kref)
if (!port)
continue;
if (port->read_urb) {
- usb_unlink_urb(port->read_urb);
+ usb_kill_urb(port->read_urb);
usb_free_urb(port->read_urb);
}
if (port->write_urb) {
- usb_unlink_urb(port->write_urb);
+ usb_kill_urb(port->write_urb);
usb_free_urb(port->write_urb);
}
if (port->interrupt_in_urb) {
- usb_unlink_urb(port->interrupt_in_urb);
+ usb_kill_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_in_urb);
}
kfree(port->bulk_in_buffer);
@@ -621,15 +619,12 @@ static void serial_throttle (struct tty_struct * tty)
if (!port->open_count) {
dbg ("%s - port not open", __FUNCTION__);
- goto exit;
+ return;
}
/* pass on to the driver specific version of this function */
if (port->serial->type->throttle)
port->serial->type->throttle(port);
-
-exit:
- ;
}
static void serial_unthrottle (struct tty_struct * tty)
@@ -640,15 +635,12 @@ static void serial_unthrottle (struct tty_struct * tty)
if (!port->open_count) {
dbg("%s - port not open", __FUNCTION__);
- goto exit;
+ return;
}
/* pass on to the driver specific version of this function */
if (port->serial->type->unthrottle)
port->serial->type->unthrottle(port);
-
-exit:
- ;
}
static int serial_ioctl (struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg)
@@ -681,15 +673,12 @@ static void serial_set_termios (struct tty_struct *tty, struct termios * old)
if (!port->open_count) {
dbg("%s - port not open", __FUNCTION__);
- goto exit;
+ return;
}
/* pass on to the driver specific version of this function if it is available */
if (port->serial->type->set_termios)
port->serial->type->set_termios(port, old);
-
-exit:
- ;
}
static void serial_break (struct tty_struct *tty, int break_state)
@@ -700,15 +689,12 @@ static void serial_break (struct tty_struct *tty, int break_state)
if (!port->open_count) {
dbg("%s - port not open", __FUNCTION__);
- goto exit;
+ return;
}
/* pass on to the driver specific version of this function if it is available */
if (port->serial->type->break_ctl)
port->serial->type->break_ctl(port, break_state);
-
-exit:
- ;
}
static int serial_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data)
@@ -814,15 +800,15 @@ static void port_release(struct device *dev)
dbg ("%s - %s", __FUNCTION__, dev->bus_id);
if (port->read_urb) {
- usb_unlink_urb(port->read_urb);
+ usb_kill_urb(port->read_urb);
usb_free_urb(port->read_urb);
}
if (port->write_urb) {
- usb_unlink_urb(port->write_urb);
+ usb_kill_urb(port->write_urb);
usb_free_urb(port->write_urb);
}
if (port->interrupt_in_urb) {
- usb_unlink_urb(port->interrupt_in_urb);
+ usb_kill_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_in_urb);
}
kfree(port->bulk_in_buffer);
@@ -853,6 +839,25 @@ static struct usb_serial * create_serial (struct usb_device *dev,
return serial;
}
+static struct usb_serial_device_type *search_serial_device(struct usb_interface *iface)
+{
+ struct list_head *p;
+ const struct usb_device_id *id;
+ struct usb_serial_device_type *t;
+
+ /* List trough know devices and see if the usb id matches */
+ list_for_each(p, &usb_serial_driver_list) {
+ t = list_entry(p, struct usb_serial_device_type, driver_list);
+ id = usb_match_id(iface, t->id_table);
+ if (id != NULL) {
+ dbg("descriptor matches");
+ return t;
+ }
+ }
+
+ return NULL;
+}
+
int usb_serial_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -865,9 +870,7 @@ int usb_serial_probe(struct usb_interface *interface,
struct usb_endpoint_descriptor *bulk_in_endpoint[MAX_NUM_PORTS];
struct usb_endpoint_descriptor *bulk_out_endpoint[MAX_NUM_PORTS];
struct usb_serial_device_type *type = NULL;
- struct list_head *tmp;
int retval;
- int found;
int minor;
int buffer_size;
int i;
@@ -876,22 +879,9 @@ int usb_serial_probe(struct usb_interface *interface,
int num_bulk_out = 0;
int num_ports = 0;
int max_endpoints;
- const struct usb_device_id *id_pattern = NULL;
-
- /* loop through our list of known serial converters, and see if this
- device matches. */
- found = 0;
- list_for_each (tmp, &usb_serial_driver_list) {
- type = list_entry(tmp, struct usb_serial_device_type, driver_list);
- id_pattern = usb_match_id(interface, type->id_table);
- if (id_pattern != NULL) {
- dbg("descriptor matches");
- found = 1;
- break;
- }
- }
- if (!found) {
- /* no match */
+
+ type = search_serial_device(interface);
+ if (!type) {
dbg("none matched");
return -ENODEV;
}
@@ -899,17 +889,21 @@ int usb_serial_probe(struct usb_interface *interface,
serial = create_serial (dev, interface, type);
if (!serial) {
dev_err(&interface->dev, "%s - out of memory\n", __FUNCTION__);
- return -ENODEV;
+ return -ENOMEM;
}
/* if this device type has a probe function, call it */
if (type->probe) {
+ const struct usb_device_id *id;
+
if (!try_module_get(type->owner)) {
dev_err(&interface->dev, "module get failed, exiting\n");
kfree (serial);
return -EIO;
}
- retval = type->probe (serial, id_pattern);
+
+ id = usb_match_id(interface, type->id_table);
+ retval = type->probe(serial, id);
module_put(type->owner);
if (retval) {
@@ -1053,6 +1047,7 @@ int usb_serial_probe(struct usb_interface *interface,
goto probe_error;
}
buffer_size = endpoint->wMaxPacketSize;
+ port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
port->bulk_in_buffer = kmalloc (buffer_size, GFP_KERNEL);
if (!port->bulk_in_buffer) {
@@ -1224,7 +1219,7 @@ struct tty_driver *usb_serial_tty_driver;
static int __init usb_serial_init(void)
{
int i;
- int result = 0;
+ int result;
usb_serial_tty_driver = alloc_tty_driver(SERIAL_TTY_MINORS);
if (!usb_serial_tty_driver)
@@ -1235,13 +1230,17 @@ static int __init usb_serial_init(void)
serial_table[i] = NULL;
}
- bus_register(&usb_serial_bus_type);
+ result = bus_register(&usb_serial_bus_type);
+ if (result) {
+ err("%s - registering bus driver failed", __FUNCTION__);
+ goto exit_bus;
+ }
/* register the generic driver, if we should */
result = usb_serial_generic_register(debug);
if (result < 0) {
err("%s - registering generic driver failed", __FUNCTION__);
- goto exit;
+ goto exit_generic;
}
usb_serial_tty_driver->owner = THIS_MODULE;
@@ -1259,7 +1258,7 @@ static int __init usb_serial_init(void)
result = tty_register_driver(usb_serial_tty_driver);
if (result) {
err("%s - tty_register_driver failed", __FUNCTION__);
- goto exit_generic;
+ goto exit_reg_driver;
}
/* register the USB driver */
@@ -1276,10 +1275,13 @@ static int __init usb_serial_init(void)
exit_tty:
tty_unregister_driver(usb_serial_tty_driver);
-exit_generic:
+exit_reg_driver:
usb_serial_generic_deregister();
-exit:
+exit_generic:
+ bus_unregister(&usb_serial_bus_type);
+
+exit_bus:
err ("%s - returning with error %d", __FUNCTION__, result);
put_tty_driver(usb_serial_tty_driver);
return result;
@@ -1332,17 +1334,13 @@ int usb_serial_register(struct usb_serial_device_type *new_device)
/* Add this device to our list of devices */
list_add(&new_device->driver_list, &usb_serial_driver_list);
- retval = usb_serial_bus_register (new_device);
-
- if (retval)
- goto error;
-
- info("USB Serial support registered for %s", new_device->name);
-
- return retval;
-error:
- err("problem %d when registering driver %s", retval, new_device->name);
- list_del(&new_device->driver_list);
+ retval = usb_serial_bus_register(new_device);
+ if (retval) {
+ err("problem %d when registering driver %s", retval, new_device->name);
+ list_del(&new_device->driver_list);
+ }
+ else
+ info("USB Serial support registered for %s", new_device->name);
return retval;
}
@@ -1369,6 +1367,7 @@ EXPORT_SYMBOL(usb_serial_port_softint);
/* Module information */
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
+MODULE_VERSION( DRIVER_VERSION );
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
diff --git a/drivers/usb/serial/usb-serial.h b/drivers/usb/serial/usb-serial.h
index e2e59560d7fb..d3702957b609 100644
--- a/drivers/usb/serial/usb-serial.h
+++ b/drivers/usb/serial/usb-serial.h
@@ -100,6 +100,7 @@ struct usb_serial_port {
__u8 interrupt_in_endpointAddress;
unsigned char * bulk_in_buffer;
+ int bulk_in_size;
struct urb * read_urb;
__u8 bulk_in_endpointAddress;
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 8a0a75533e70..0e5d90552789 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -446,9 +446,9 @@ static void visor_close (struct usb_serial_port *port, struct file * filp)
dbg("%s - port %d", __FUNCTION__, port->number);
/* shutdown our urbs */
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->read_urb);
if (port->interrupt_in_urb)
- usb_unlink_urb (port->interrupt_in_urb);
+ usb_kill_urb(port->interrupt_in_urb);
/* Try to send shutdown message, if the device is gone, this will just fail. */
transfer_buffer = kmalloc (0x12, GFP_KERNEL);
@@ -655,7 +655,7 @@ exit:
static void visor_throttle (struct usb_serial_port *port)
{
dbg("%s - port %d", __FUNCTION__, port->number);
- usb_unlink_urb (port->read_urb);
+ usb_kill_urb(port->read_urb);
}
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 1b759018e8b8..179a671a2770 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -679,7 +679,7 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp)
list_for_each_safe(tmp, tmp2, &info->rx_urbs_submitted) {
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
- usb_unlink_urb(urb);
+ usb_kill_urb(urb);
list_del(tmp);
list_add(tmp, &info->rx_urbs_free);
}
@@ -690,7 +690,7 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp)
list_for_each_safe(tmp, tmp2, &info->tx_urbs_submitted) {
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
- usb_unlink_urb(urb);
+ usb_kill_urb(urb);
list_del(tmp);
list_add(tmp, &info->tx_urbs_free);
}
@@ -1343,7 +1343,7 @@ static void stop_command_port(struct usb_serial *serial)
spin_lock_irqsave(&command_info->lock, flags);
command_info->port_running--;
if (!command_info->port_running)
- usb_unlink_urb(command_port->read_urb);
+ usb_kill_urb(command_port->read_urb);
spin_unlock_irqrestore(&command_info->lock, flags);
}
@@ -1371,7 +1371,7 @@ static int start_port_read(struct usb_serial_port *port)
list_for_each_safe(tmp, tmp2, &info->rx_urbs_submitted) {
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
- usb_unlink_urb(urb);
+ usb_kill_urb(urb);
list_del(tmp);
list_add(tmp, &info->rx_urbs_free);
}
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 8aa136b4b380..d599362bebcc 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1053,12 +1053,6 @@ static int isd200_get_inquiry_data( struct us_data *us )
/* Standard IDE interface only supports disks */
info->InquiryData.DeviceType = DIRECT_ACCESS_DEVICE;
- /* Fix-up the return data from an INQUIRY command to show
- * ANSI SCSI rev 2 so we don't confuse the SCSI layers above us
- * in Linux.
- */
- info->InquiryData.Versions = 0x2;
-
/* The length must be at least 36 (5 + 31) */
info->InquiryData.AdditionalLength = 0x1F;
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 99ed4d9bc490..9d3d77452525 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -58,38 +58,6 @@
***********************************************************************/
/*
- * Fix-up the return data from an INQUIRY command to show
- * ANSI SCSI rev 2 so we don't confuse the SCSI layers above us
- */
-static void fix_inquiry_data(struct scsi_cmnd *srb)
-{
- unsigned char databuf[3];
- unsigned int index, offset;
-
- /* verify that it's an INQUIRY command */
- if (srb->cmnd[0] != INQUIRY)
- return;
-
- index = offset = 0;
- if (usb_stor_access_xfer_buf(databuf, sizeof(databuf), srb,
- &index, &offset, FROM_XFER_BUF) != sizeof(databuf))
- return;
-
- if ((databuf[2] & 7) == 2)
- return;
-
- US_DEBUGP("Fixing INQUIRY data to show SCSI rev 2 - was %d\n",
- databuf[2] & 7);
-
- /* Change the SCSI revision number */
- databuf[2] = (databuf[2] & ~7) | 2;
-
- index = offset = 0;
- usb_stor_access_xfer_buf(databuf, sizeof(databuf), srb,
- &index, &offset, TO_XFER_BUF);
-}
-
-/*
* Fix-up the return data from a READ CAPACITY command. My Feiya reader
* returns a value that is 1 too large.
*/
@@ -137,10 +105,6 @@ void usb_stor_qic157_command(struct scsi_cmnd *srb, struct us_data *us)
/* send the command to the transport layer */
usb_stor_invoke_transport(srb, us);
- if (srb->result == SAM_STAT_GOOD) {
- /* fix the INQUIRY data if necessary */
- fix_inquiry_data(srb);
- }
}
void usb_stor_ATAPI_command(struct scsi_cmnd *srb, struct us_data *us)
@@ -160,11 +124,6 @@ void usb_stor_ATAPI_command(struct scsi_cmnd *srb, struct us_data *us)
/* send the command to the transport layer */
usb_stor_invoke_transport(srb, us);
-
- if (srb->result == SAM_STAT_GOOD) {
- /* fix the INQUIRY data if necessary */
- fix_inquiry_data(srb);
- }
}
@@ -208,11 +167,6 @@ void usb_stor_ufi_command(struct scsi_cmnd *srb, struct us_data *us)
/* send the command to the transport layer */
usb_stor_invoke_transport(srb, us);
-
- if (srb->result == SAM_STAT_GOOD) {
- /* Fix the data for an INQUIRY, if necessary */
- fix_inquiry_data(srb);
- }
}
void usb_stor_transparent_scsi_command(struct scsi_cmnd *srb,
@@ -222,9 +176,6 @@ void usb_stor_transparent_scsi_command(struct scsi_cmnd *srb,
usb_stor_invoke_transport(srb, us);
if (srb->result == SAM_STAT_GOOD) {
- /* Fix the INQUIRY data if necessary */
- fix_inquiry_data(srb);
-
/* Fix the READ CAPACITY result if necessary */
if (us->flags & US_FL_FIX_CAPACITY)
fix_read_capacity(srb);
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index c18cd06ea31c..83d7003861ff 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -98,6 +98,23 @@ static int slave_configure(struct scsi_device *sdev)
* the end, scatter-gather buffers follow page boundaries. */
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
+ /* Set the SCSI level to at least 2. We'll leave it at 3 if that's
+ * what is originally reported. We need this to avoid confusing
+ * the SCSI layer with devices that report 0 or 1, but need 10-byte
+ * commands (ala ATAPI devices behind certain bridges, or devices
+ * which simply have broken INQUIRY data).
+ *
+ * NOTE: This means /dev/sg programs (ala cdrecord) will get the
+ * actual information. This seems to be the preference for
+ * programs like that.
+ *
+ * NOTE: This also means that /proc/scsi/scsi and sysfs may report
+ * the actual value or the modified one, depending on where the
+ * data comes from.
+ */
+ if (sdev->scsi_level < SCSI_2)
+ sdev->scsi_level = SCSI_2;
+
/* According to the technical support people at Genesys Logic,
* devices using their chips have problems transferring more than
* 32 KB at a time. In practice people have found that 64 KB
@@ -266,7 +283,7 @@ static int device_reset(struct scsi_cmnd *srb)
static int bus_reset(struct scsi_cmnd *srb)
{
struct us_data *us = (struct us_data *)srb->device->host->hostdata[0];
- int result;
+ int result, rc;
US_DEBUGP("%s called\n", __FUNCTION__);
if (us->sm_state != US_STATE_IDLE) {
@@ -291,8 +308,16 @@ static int bus_reset(struct scsi_cmnd *srb)
result = -EBUSY;
US_DEBUGP("Refusing to reset a multi-interface device\n");
} else {
- result = usb_reset_device(us->pusb_dev);
- US_DEBUGP("usb_reset_device returns %d\n", result);
+ rc = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
+ if (rc < 0) {
+ US_DEBUGP("unable to lock device for reset: %d\n", rc);
+ result = rc;
+ } else {
+ result = usb_reset_device(us->pusb_dev);
+ if (rc)
+ usb_unlock_device(us->pusb_dev);
+ US_DEBUGP("usb_reset_device returns %d\n", result);
+ }
}
up(&(us->dev_semaphore));
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 42f784321607..07b919d800a1 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -911,7 +911,6 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
int result;
/* issue the command */
- us->iobuf[0] = 0;
result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
US_BULK_GET_MAX_LUN,
USB_DIR_IN | USB_TYPE_CLASS |
@@ -922,7 +921,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
result, us->iobuf[0]);
/* if we have a successful request, return the result */
- if (result >= 0)
+ if (result > 0)
return us->iobuf[0];
/*
@@ -934,13 +933,16 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
if (result == -EPIPE) {
usb_stor_clear_halt(us, us->recv_bulk_pipe);
usb_stor_clear_halt(us, us->send_bulk_pipe);
- /* return the default -- no LUNs */
- return 0;
}
- /* An answer or a STALL are the only valid responses. If we get
- * something else, return an indication of error */
- return -1;
+ /*
+ * Some devices don't like GetMaxLUN. They may STALL the control
+ * pipe, they may return a zero-length result, they may do nothing at
+ * all and timeout, or they may fail in even more bizarrely creative
+ * ways. In these cases the best approach is to use the default
+ * value: only one LUN.
+ */
+ return 0;
}
int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
@@ -1055,8 +1057,13 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
/* try to compute the actual residue, based on how much data
* was really transferred and what the device tells us */
- residue = min(residue, transfer_length);
- srb->resid = max(srb->resid, (int) residue);
+ if (residue) {
+ if (!(us->flags & US_FL_IGNORE_RESIDUE) ||
+ srb->sc_data_direction == DMA_TO_DEVICE) {
+ residue = min(residue, transfer_length);
+ srb->resid = max(srb->resid, (int) residue);
+ }
+ }
/* based on the status code, we report good or bad */
switch (bcs->Status) {
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 4fe25f6b0432..1e0eb58f0c09 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -36,13 +36,16 @@
/* If you edit this file, please try to keep it sorted first by VendorID,
* then by ProductID.
*
- * If you want to add an entry for this file, please send the following
- * to greg@kroah.com:
- * - patch that adds the entry for your device which includes your
- * email address right above the entry.
+ * If you want to add an entry for this file, be sure to include the
+ * following information:
+ * - a patch that adds the entry for your device, including your
+ * email address right above the entry (plus maybe a brief
+ * explanation of the reason for the entry),
* - a copy of /proc/bus/usb/devices with your device plugged in
* running with this patch.
- *
+ * Send your submission to either Phil Dibowitz <phil@ipom.com> or
+ * Alan Stern <stern@rowland.harvard.edu>, and don't forget to CC: the
+ * USB development list <linux-usb-devel@lists.sourceforge.net>.
*/
UNUSUAL_DEV( 0x03ee, 0x6901, 0x0000, 0x0100,
@@ -68,16 +71,6 @@ UNUSUAL_DEV( 0x03f0, 0x0307, 0x0001, 0x0001,
US_SC_8070, US_PR_SCM_ATAPI, init_8200e, 0),
#endif
-/* <torsten.scherer@uni-bielefeld.de>: I don't know the name of the bridge
- * manufacturer, but I've got an external USB drive by the Revoltec company
- * that needs this. otherwise the drive is recognized as /dev/sda, but any
- * access to it blocks indefinitely.
- */
-UNUSUAL_DEV( 0x0402, 0x5621, 0x0103, 0x0103,
- "Revoltec",
- "USB/IDE Bridge (ATA/ATAPI)",
- US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY),
-
/* Deduced by Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
* Entry needed for flags: US_FL_FIX_INQUIRY because initial inquiry message
* always fails and confuses drive.
@@ -95,12 +88,6 @@ UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
US_SC_SCSI, US_PR_DPCM_USB, NULL, 0 ),
#endif
-/* Patch submitted by Alessandro Fracchetti <al.fracchetti@tin.it> */
-UNUSUAL_DEV( 0x0482, 0x0105, 0x0100, 0x0100,
- "Kyocera",
- "Finecam L3",
- US_SC_SCSI, US_PR_BULK, NULL, US_FL_FIX_INQUIRY),
-
/* Patch submitted by Philipp Friedrich <philipp@void.at> */
UNUSUAL_DEV( 0x0482, 0x0100, 0x0100, 0x0100,
"Kyocera",
@@ -121,6 +108,7 @@ UNUSUAL_DEV( 0x0482, 0x0103, 0x0100, 0x0100,
/* Patch for Kyocera Finecam L3
* Submitted by Michael Krauth <michael.krauth@web.de>
+ * and Alessandro Fracchetti <al.fracchetti@tin.it>
*/
UNUSUAL_DEV( 0x0482, 0x0105, 0x0100, 0x0100,
"Kyocera",
@@ -149,10 +137,13 @@ UNUSUAL_DEV( 0x04b8, 0x0602, 0x0110, 0x0110,
"785EPX Storage",
US_SC_SCSI, US_PR_BULK, NULL, US_FL_SINGLE_LUN),
+/* Not sure who reported this originally but
+ * Pavel Machek <pavel@ucw.cz> reported that the extra US_FL_SINGLE_LUN
+ * flag be added */
UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
"Fujifilm",
"FinePix 1400Zoom",
- US_SC_UFI, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY),
+ US_SC_UFI, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
/* Reported by Peter Wächtler <pwaechtler@loewe-komp.de>
* The device needs the flags only.
@@ -180,6 +171,16 @@ UNUSUAL_DEV( 0x04da, 0x0d05, 0x0000, 0x0000,
"CD-R/RW Drive",
US_SC_8070, US_PR_CB, NULL, 0),
+/* Reported by Adriaan Penning <a.penning@luon.net>
+ * Note that these cameras report "Medium not present" after
+ * ALLOW_MEDIUM_REMOVAL, so they also need to be marked
+ * NOT_LOCKABLE in the SCSI blacklist (and the vendor is MATSHITA). */
+UNUSUAL_DEV( 0x04da, 0x2372, 0x0000, 0x9999,
+ "Panasonic",
+ "DMC-LCx Camera",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Most of the following entries were developed with the help of
* Shuttle/SCM directly.
*/
@@ -265,6 +266,21 @@ UNUSUAL_DEV( 0x0525, 0xa140, 0x0100, 0x0100,
US_SC_8070, US_PR_BULK, NULL,
US_FL_FIX_INQUIRY ),
+/* Reported by Iacopo Spalletti <avvisi@spalletti.it> */
+UNUSUAL_DEV( 0x052b, 0x1807, 0x0100, 0x0100,
+ "Tekom Technologies, Inc",
+ "300_CAMERA",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
+/* Yakumo Mega Image 37
+ * Submitted by Stephan Fuhrmann <atomenergie@t-online.de> */
+UNUSUAL_DEV( 0x052b, 0x1801, 0x0100, 0x0100,
+ "Tekom Technologies, Inc",
+ "300_CAMERA",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* This entry is needed because the device reports Sub=ff */
UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0450,
"Sony",
@@ -383,10 +399,17 @@ UNUSUAL_DEV( 0x0595, 0x4343, 0x0000, 0x2210,
"Digital Camera EX-20 DSC",
US_SC_8070, US_PR_DEVICE, NULL, 0 ),
+/* The entry was here before I took over, and had US_SC_RBC. It turns
+ * out that isn't needed. Additionally, Torsten Eriksson
+ * <Torsten.Eriksson@bergianska.se> is able to use his device fine
+ * without this entry at all - but I don't suspect that will be true
+ * for all users (the protocol is likely needed), so is staying at
+ * this time. - Phil Dibowitz <phil@ipom.com>
+ */
UNUSUAL_DEV( 0x059f, 0xa601, 0x0200, 0x0200,
"LaCie",
"USB Hard Disk",
- US_SC_RBC, US_PR_CB, NULL, 0 ),
+ US_SC_DEVICE, US_PR_CB, NULL, 0 ),
/* Submitted by Joel Bourquard <numlock@freesurf.ch>
* Some versions of this device need the SubClass and Protocol overrides
@@ -439,36 +462,6 @@ UNUSUAL_DEV( 0x05dc, 0xb002, 0x0000, 0x0113,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
-/* Reported by Carlos Villegas <cav@uniscope.co.jp>
- * This device needs an INQUIRY of exactly 36-bytes to function.
- * That is the only reason this entry is needed.
- */
-UNUSUAL_DEV( 0x05e3, 0x0700, 0x0000, 0xffff,
- "Genesys Logic",
- "USB to IDE Card Reader",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY ),
-
-/* Submitted Alexander Oltu <alexander@all-2.com> */
-UNUSUAL_DEV( 0x05e3, 0x0701, 0x0000, 0xffff,
- "Genesys Logic",
- "USB to IDE Optical",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_MODE_XLATE ),
-
-/* Reported by Peter Marks <peter.marks@turner.com>
- * Like the SIIG unit above, this unit needs an INQUIRY to ask for exactly
- * 36 bytes of data. No more, no less. That is the only reason this entry
- * is needed.
- *
- * ST818 slim drives (rev 0.02) don't need special care.
-*/
-UNUSUAL_DEV( 0x05e3, 0x0702, 0x0000, 0xffff,
- "Genesys Logic",
- "USB to IDE Disk",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY ),
-
/* Reported by Hanno Boeck <hanno@gmx.de>
* Taken from the Lycoris Kernel */
UNUSUAL_DEV( 0x0636, 0x0003, 0x0000, 0x9999,
@@ -554,6 +547,13 @@ UNUSUAL_DEV( 0x07ab, 0xfc01, 0x0000, 0x9999,
US_SC_QIC, US_PR_FREECOM, freecom_init, 0),
#endif
+/* Reported by Eero Volotinen <eero@ping-viini.org> */
+UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0406, 0x0406,
+ "Freecom Technologies",
+ "FHD-Classic",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY),
+
UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
"Microtech",
"USB-SCSI-DB25",
@@ -724,12 +724,6 @@ UNUSUAL_DEV( 0x097a, 0x0001, 0x0000, 0x0001,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MODE_XLATE ),
-UNUSUAL_DEV( 0x0a16, 0x8888, 0x0100, 0x0100,
- "IBM",
- "IBM USB Memory Key",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY ),
-
/* This Pentax still camera is not conformant
* to the USB storage specification: -
* - It does not like the INQUIRY command. So we must handle this command
@@ -802,13 +796,28 @@ UNUSUAL_DEV( 0x0dd8, 0x1060, 0x0000, 0xffff,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
+/* Patch by Stephan Walter <stephan.walter@epfl.ch>
+ * I don't know why, but it works... */
+UNUSUAL_DEV( 0x0dda, 0x0001, 0x0012, 0x0012,
+ "WINWARD",
+ "Music Disk",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Submitted by Antoine Mairesse <antoine.mairesse@free.fr> */
UNUSUAL_DEV( 0x0ed1, 0x6660, 0x0100, 0x0300,
"USB",
"Solid state disk",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
-
+
+/* Reported by Rastislav Stanik <rs_kernel@yahoo.com> */
+UNUSUAL_DEV( 0x0ea0, 0x6828, 0x0110, 0x0110,
+ "USB",
+ "Flash Disk",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu>
* Tested on hardware version 1.10.
* Entry is needed only for the initializer function override.
@@ -830,6 +839,13 @@ UNUSUAL_DEV( 0x1065, 0x2136, 0x0000, 0x9999,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MODE_XLATE ),
+/* Reported by Kotrla Vitezslav <kotrla@ceb.cz> */
+UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
+ "SWISSBIT",
+ "Black Silver",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
#ifdef CONFIG_USB_STORAGE_SDDR55
UNUSUAL_DEV( 0x55aa, 0xa103, 0x0000, 0x9999,
"Sandisk",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 9579bdf4969a..32e2f1482c57 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -97,6 +97,11 @@ MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
MODULE_LICENSE("GPL");
+static unsigned int delay_use = 5;
+module_param(delay_use, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
+
+
static int storage_probe(struct usb_interface *iface,
const struct usb_device_id *id);
@@ -882,6 +887,42 @@ static void dissociate_dev(struct us_data *us)
kfree(us);
}
+/* Thread to carry out delayed SCSI-device scanning */
+static int usb_stor_scan_thread(void * __us)
+{
+ struct us_data *us = (struct us_data *)__us;
+
+ /*
+ * This thread doesn't need any user-level access,
+ * so get rid of all our resources.
+ */
+ lock_kernel();
+ daemonize("usb-stor");
+ current->flags |= PF_NOFREEZE;
+ unlock_kernel();
+
+ printk(KERN_DEBUG
+ "usb-storage: device found at %d\n", us->pusb_dev->devnum);
+
+ /* Wait for the timeout to expire or for a disconnect */
+ if (delay_use > 0) {
+ printk(KERN_DEBUG "usb-storage: waiting for device "
+ "to settle before scanning\n");
+ wait_event_interruptible_timeout(us->scsi_scan_wait,
+ test_bit(US_FLIDX_DISCONNECTING, &us->flags),
+ delay_use * HZ);
+ }
+
+ /* If the device is still connected, perform the scanning */
+ if (!test_bit(US_FLIDX_DISCONNECTING, &us->flags)) {
+ scsi_scan_host(us->host);
+ printk(KERN_DEBUG "usb-storage: device scan complete\n");
+ }
+
+ complete_and_exit(&us->scsi_scan_done, 0);
+}
+
+
/* Probe to see if we can drive a newly-connected USB device */
static int storage_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -903,6 +944,8 @@ static int storage_probe(struct usb_interface *intf,
init_MUTEX_LOCKED(&(us->sema));
init_completion(&(us->notify));
init_waitqueue_head(&us->dev_reset_wait);
+ init_waitqueue_head(&us->scsi_scan_wait);
+ init_completion(&us->scsi_scan_done);
/* Associate the us_data structure with the USB device */
result = associate_dev(us, intf);
@@ -951,12 +994,10 @@ static int storage_probe(struct usb_interface *intf,
if (result)
goto BadDevice;
- /* Acquire all the other resources */
+ /* Acquire all the other resources and add the host */
result = usb_stor_acquire_resources(us);
if (result)
goto BadDevice;
-
- /* Finally, add the host (this does SCSI device scanning) */
result = scsi_add_host(us->host, &intf->dev);
if (result) {
printk(KERN_WARNING USB_STORAGE
@@ -964,10 +1005,15 @@ static int storage_probe(struct usb_interface *intf,
goto BadDevice;
}
- scsi_scan_host(us->host);
+ /* Start up the thread for delayed SCSI-device scanning */
+ result = kernel_thread(usb_stor_scan_thread, us, CLONE_VM);
+ if (result < 0) {
+ printk(KERN_WARNING USB_STORAGE
+ "Unable to start the device-scanning thread\n");
+ scsi_remove_host(us->host);
+ goto BadDevice;
+ }
- printk(KERN_DEBUG
- "USB Mass Storage device found at %d\n", us->pusb_dev->devnum);
return 0;
/* We come here if there are any problems */
@@ -991,6 +1037,11 @@ static void storage_disconnect(struct usb_interface *intf)
usb_stor_stop_transport(us);
wake_up(&us->dev_reset_wait);
+ /* Interrupt the SCSI-device-scanning thread's time delay, and
+ * wait for the thread to finish */
+ wake_up(&us->scsi_scan_wait);
+ wait_for_completion(&us->scsi_scan_done);
+
/* Wait for the current command to finish, then remove the host */
down(&us->dev_semaphore);
up(&us->dev_semaphore);
@@ -1012,12 +1063,9 @@ static int __init usb_stor_init(void)
/* register the driver, return usb_register return code if error */
retval = usb_register(&usb_storage_driver);
- if (retval)
- goto out;
+ if (retval == 0)
+ printk(KERN_INFO "USB Mass Storage support registered.\n");
- /* we're all set */
- printk(KERN_INFO "USB Mass Storage support registered.\n");
-out:
return retval;
}
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 785f30be68c5..199594967870 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -73,6 +73,7 @@ struct us_unusual_dev {
#define US_FL_SCM_MULT_TARG 0x00000020 /* supports multiple targets */
#define US_FL_FIX_INQUIRY 0x00000040 /* INQUIRY response needs faking */
#define US_FL_FIX_CAPACITY 0x00000080 /* READ CAPACITY response too big */
+#define US_FL_IGNORE_RESIDUE 0x00000100 /* reported residue is wrong */
/* Dynamic flag definitions: used in set_bit() etc. */
#define US_FLIDX_URB_ACTIVE 18 /* 0x00040000 current_urb is in use */
@@ -161,6 +162,8 @@ struct us_data {
struct semaphore sema; /* to sleep thread on */
struct completion notify; /* thread begin/end */
wait_queue_head_t dev_reset_wait; /* wait during reset */
+ wait_queue_head_t scsi_scan_wait; /* wait before scanning */
+ struct completion scsi_scan_done; /* scan thread end */
/* subdriver information */
void *extra; /* Any extra data */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index ab23abe01649..8fff0829a2b6 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -49,6 +49,24 @@ config FB_MODE_HELPERS
your driver does not take advantage of this feature, choosing Y will
just increase the kernel size by about 5K.
+config FB_TILEBLITTING
+ bool "Enable Tile Blitting Support"
+ depends on FB
+ default n
+ ---help---
+ This enables tile blitting. Tile blitting is a drawing technique
+ where the screen is divided into rectangular sections (tiles), whereas
+ the standard blitting divides the screen into pixels. Because the
+ default drawing element is a tile, drawing functions will be passed
+ parameters in terms of number of tiles instead of number of pixels.
+ For example, to draw a single character, instead of using bitmaps,
+ an index to an array of bitmaps will be used. To clear or move a
+ rectangular section of a screen, the rectangle willbe described in
+ terms of number of tiles in the x- and y-axis.
+
+ This is particularly important to one driver, the matroxfb. If
+ unsure, say N.
+
config FB_CIRRUS
tristate "Cirrus Logic support"
depends on FB && (ZORRO || PCI)
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 2d753a150405..264b53179321 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1974,7 +1974,7 @@ int __init atyfb_do_init(void)
info->fix = atyfb_fix;
info->par = default_par;
-
+ info->device = &pdev->dev;
#ifdef __sparc__
/*
* Map memory-mapped registers.
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 325b83b3017c..abecde5c4ed5 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -61,6 +61,7 @@
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/time.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -527,8 +528,7 @@ static int __devinit radeon_probe_pll_params(struct radeonfb_info *rinfo)
break;
}
- OUTREG8(CLOCK_CNTL_INDEX, 1);
- ppll_div_sel = INREG8(CLOCK_CNTL_DATA + 1) & 0x3;
+ ppll_div_sel = INREG(CLOCK_CNTL_INDEX + 1) & 0x3;
n = (INPLL(PPLL_DIV_0 + ppll_div_sel) & 0x7ff);
m = (INPLL(PPLL_REF_DIV) & 0x3ff);
@@ -595,53 +595,10 @@ static int __devinit radeon_probe_pll_params(struct radeonfb_info *rinfo)
*/
static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo)
{
-#ifdef CONFIG_PPC_OF
- /*
- * Retreive PLL infos from Open Firmware first
- */
- if (!force_measure_pll && radeon_read_xtal_OF(rinfo) == 0) {
- printk(KERN_INFO "radeonfb: Retreived PLL infos from Open Firmware\n");
- rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & 0x3ff;
- /* FIXME: Max clock may be higher on newer chips */
- rinfo->pll.ppll_min = 12000;
- rinfo->pll.ppll_max = 35000;
- goto found;
- }
-#endif /* CONFIG_PPC_OF */
-
- /*
- * Check out if we have an X86 which gave us some PLL informations
- * and if yes, retreive them
- */
- if (!force_measure_pll && rinfo->bios_seg) {
- u16 pll_info_block = BIOS_IN16(rinfo->fp_bios_start + 0x30);
-
- rinfo->pll.sclk = BIOS_IN16(pll_info_block + 0x08);
- rinfo->pll.mclk = BIOS_IN16(pll_info_block + 0x0a);
- rinfo->pll.ref_clk = BIOS_IN16(pll_info_block + 0x0e);
- rinfo->pll.ref_div = BIOS_IN16(pll_info_block + 0x10);
- rinfo->pll.ppll_min = BIOS_IN32(pll_info_block + 0x12);
- rinfo->pll.ppll_max = BIOS_IN32(pll_info_block + 0x16);
-
- printk(KERN_INFO "radeonfb: Retreived PLL infos from BIOS\n");
- goto found;
- }
-
- /*
- * We didn't get PLL parameters from either OF or BIOS, we try to
- * probe them
- */
- if (radeon_probe_pll_params(rinfo) == 0) {
- printk(KERN_INFO "radeonfb: Retreived PLL infos from registers\n");
- /* FIXME: Max clock may be higher on newer chips */
- rinfo->pll.ppll_min = 12000;
- rinfo->pll.ppll_max = 35000;
- goto found;
- }
-
/*
- * Neither of the above worked, we have a few default values, though
- * that's mostly incomplete
+ * In the case nothing works, these are defaults; they are mostly
+ * incomplete, however. It does provide ppll_max and _min values
+ * even for most other methods, however.
*/
switch (rinfo->chipset) {
case PCI_DEVICE_ID_ATI_RADEON_QW:
@@ -697,6 +654,47 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo)
}
rinfo->pll.ref_div = INPLL(PPLL_REF_DIV) & 0x3ff;
+
+#ifdef CONFIG_PPC_OF
+ /*
+ * Retreive PLL infos from Open Firmware first
+ */
+ if (!force_measure_pll && radeon_read_xtal_OF(rinfo) == 0) {
+ printk(KERN_INFO "radeonfb: Retreived PLL infos from Open Firmware\n");
+ goto found;
+ }
+#endif /* CONFIG_PPC_OF */
+
+ /*
+ * Check out if we have an X86 which gave us some PLL informations
+ * and if yes, retreive them
+ */
+ if (!force_measure_pll && rinfo->bios_seg) {
+ u16 pll_info_block = BIOS_IN16(rinfo->fp_bios_start + 0x30);
+
+ rinfo->pll.sclk = BIOS_IN16(pll_info_block + 0x08);
+ rinfo->pll.mclk = BIOS_IN16(pll_info_block + 0x0a);
+ rinfo->pll.ref_clk = BIOS_IN16(pll_info_block + 0x0e);
+ rinfo->pll.ref_div = BIOS_IN16(pll_info_block + 0x10);
+ rinfo->pll.ppll_min = BIOS_IN32(pll_info_block + 0x12);
+ rinfo->pll.ppll_max = BIOS_IN32(pll_info_block + 0x16);
+
+ printk(KERN_INFO "radeonfb: Retreived PLL infos from BIOS\n");
+ goto found;
+ }
+
+ /*
+ * We didn't get PLL parameters from either OF or BIOS, we try to
+ * probe them
+ */
+ if (radeon_probe_pll_params(rinfo) == 0) {
+ printk(KERN_INFO "radeonfb: Retreived PLL infos from registers\n");
+ goto found;
+ }
+
+ /*
+ * Fall back to already-set defaults...
+ */
printk(KERN_INFO "radeonfb: Used default PLL infos\n");
found:
@@ -715,6 +713,7 @@ found:
rinfo->pll.ref_div,
rinfo->pll.mclk / 100, rinfo->pll.mclk % 100,
rinfo->pll.sclk / 100, rinfo->pll.sclk % 100);
+ printk("radeonfb: PLL min %d max %d\n", rinfo->pll.ppll_min, rinfo->pll.ppll_max);
}
static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
@@ -934,43 +933,94 @@ static int radeonfb_ioctl (struct inode *inode, struct file *file, unsigned int
}
-static int radeon_screen_blank (struct radeonfb_info *rinfo, int blank)
+static int radeon_screen_blank (struct radeonfb_info *rinfo, int blank, int mode_switch)
{
- u32 val = INREG(CRTC_EXT_CNTL);
- u32 val2 = 0;
+ u32 val;
+ u32 tmp_pix_clks;
- if (rinfo->mon1_type == MT_LCD)
- val2 = INREG(LVDS_GEN_CNTL) & ~LVDS_DISPLAY_DIS;
-
- /* reset it */
+ if (rinfo->lock_blank)
+ return 0;
+
+ radeon_engine_idle();
+
+ val = INREG(CRTC_EXT_CNTL);
val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS |
CRTC_VSYNC_DIS);
-
switch (blank) {
- case VESA_NO_BLANKING:
- break;
- case VESA_VSYNC_SUSPEND:
- val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS);
- break;
- case VESA_HSYNC_SUSPEND:
- val |= (CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS);
- break;
- case VESA_POWERDOWN:
- val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS |
- CRTC_HSYNC_DIS);
- val2 |= (LVDS_DISPLAY_DIS);
- break;
+ case VESA_NO_BLANKING:
+ break;
+ case VESA_VSYNC_SUSPEND:
+ val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS);
+ break;
+ case VESA_HSYNC_SUSPEND:
+ val |= (CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS);
+ break;
+ case VESA_POWERDOWN:
+ val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS |
+ CRTC_HSYNC_DIS);
+ break;
}
+ OUTREG(CRTC_EXT_CNTL, val);
+
- radeon_fifo_wait(1);
switch (rinfo->mon1_type) {
- case MT_LCD:
- OUTREG(LVDS_GEN_CNTL, val2);
- break;
- case MT_CRT:
- default:
- OUTREG(CRTC_EXT_CNTL, val);
+ case MT_DFP:
+ if (mode_switch)
break;
+ if (blank == VESA_NO_BLANKING)
+ OUTREGP(FP_GEN_CNTL, (FP_FPON | FP_TMDS_EN),
+ ~(FP_FPON | FP_TMDS_EN));
+ else
+ OUTREGP(FP_GEN_CNTL, 0, ~(FP_FPON | FP_TMDS_EN));
+ break;
+ case MT_LCD:
+ val = INREG(LVDS_GEN_CNTL);
+ if (blank == VESA_NO_BLANKING) {
+ u32 target_val = (val & ~LVDS_DISPLAY_DIS) | LVDS_BLON | LVDS_ON
+ | LVDS_ON | (rinfo->init_state.lvds_gen_cntl & LVDS_DIGON);
+ if ((val ^ target_val) == LVDS_DISPLAY_DIS)
+ OUTREG(LVDS_GEN_CNTL, target_val);
+ else if ((val ^ target_val) != 0) {
+ del_timer_sync(&rinfo->lvds_timer);
+ OUTREG(LVDS_GEN_CNTL, target_val & ~LVDS_ON);
+ rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
+ rinfo->init_state.lvds_gen_cntl |= target_val & LVDS_STATE_MASK;
+ if (mode_switch) {
+ msleep(rinfo->panel_info.pwr_delay);
+ OUTREG(LVDS_GEN_CNTL, target_val);
+ }
+ else {
+ rinfo->pending_lvds_gen_cntl = target_val;
+ mod_timer(&rinfo->lvds_timer,
+ jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay));
+ }
+ }
+ } else {
+ val |= LVDS_DISPLAY_DIS;
+ OUTREG(LVDS_GEN_CNTL, val);
+
+ /* We don't do a full switch-off on a simple mode switch */
+ if (mode_switch)
+ break;
+
+ /* Asic bug, when turning off LVDS_ON, we have to make sure
+ * RADEON_PIXCLK_LVDS_ALWAYS_ON bit is off
+ */
+ tmp_pix_clks = INPLL(PIXCLKS_CNTL);
+ if (rinfo->is_mobility || rinfo->is_IGP)
+ OUTPLLP(PIXCLKS_CNTL, 0, ~PIXCLK_LVDS_ALWAYS_ONb);
+ val &= ~(LVDS_BLON | LVDS_ON);
+ OUTREG(LVDS_GEN_CNTL, val);
+ rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
+ rinfo->init_state.lvds_gen_cntl |= val & LVDS_STATE_MASK;
+ if (rinfo->is_mobility || rinfo->is_IGP)
+ OUTPLL(PIXCLKS_CNTL, tmp_pix_clks);
+ }
+ break;
+ case MT_CRT:
+ // todo: powerdown DAC
+ default:
+ break;
}
return 0;
@@ -983,17 +1033,7 @@ int radeonfb_blank (int blank, struct fb_info *info)
if (rinfo->asleep)
return 0;
-#ifdef CONFIG_PMAC_BACKLIGHT
- if (rinfo->mon1_type == MT_LCD && _machine == _MACH_Pmac && blank)
- set_backlight_enable(0);
-#endif
-
- radeon_screen_blank(rinfo, blank);
-
-#ifdef CONFIG_PMAC_BACKLIGHT
- if (rinfo->mon1_type == MT_LCD && _machine == _MACH_Pmac && !blank)
- set_backlight_enable(1);
-#endif
+ radeon_screen_blank(rinfo, blank, 0);
return 0;
}
@@ -1225,7 +1265,8 @@ static void radeon_write_mode (struct radeonfb_info *rinfo,
del_timer_sync(&rinfo->lvds_timer);
- radeon_screen_blank(rinfo, VESA_POWERDOWN);
+ radeon_screen_blank(rinfo, VESA_POWERDOWN, 1);
+ msleep(100);
radeon_fifo_wait(31);
for (i=0; i<10; i++)
@@ -1265,35 +1306,9 @@ static void radeon_write_mode (struct radeonfb_info *rinfo,
OUTREG(FP_GEN_CNTL, mode->fp_gen_cntl);
OUTREG(TMDS_CRC, mode->tmds_crc);
OUTREG(TMDS_TRANSMITTER_CNTL, mode->tmds_transmitter_cntl);
-
- if (primary_mon == MT_LCD) {
- unsigned int tmp = INREG(LVDS_GEN_CNTL);
-
- /* HACK: The backlight control code may have modified init_state.lvds_gen_cntl,
- * so we update ourselves
- */
- mode->lvds_gen_cntl &= ~LVDS_STATE_MASK;
- mode->lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_STATE_MASK);
-
- if ((tmp & (LVDS_ON | LVDS_BLON)) ==
- (mode->lvds_gen_cntl & (LVDS_ON | LVDS_BLON))) {
- OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl);
- } else {
- rinfo->pending_pixclks_cntl = INPLL(PIXCLKS_CNTL);
- if (rinfo->is_mobility || rinfo->is_IGP)
- OUTPLLP(PIXCLKS_CNTL, 0, ~PIXCLK_LVDS_ALWAYS_ONb);
- if (!(tmp & (LVDS_ON | LVDS_BLON)))
- OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl | LVDS_BLON);
- rinfo->pending_lvds_gen_cntl = mode->lvds_gen_cntl;
- mod_timer(&rinfo->lvds_timer,
- jiffies + MS_TO_HZ(rinfo->panel_info.pwr_delay));
- }
- }
}
- RTRACE("lvds_gen_cntl: %08x\n", INREG(LVDS_GEN_CNTL));
-
- radeon_screen_blank(rinfo, VESA_NO_BLANKING);
+ radeon_screen_blank(rinfo, VESA_NO_BLANKING, 1);
radeon_fifo_wait(2);
OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl);
@@ -1329,7 +1344,7 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
* not sure which model starts having FP2_GEN_CNTL, I assume anything more
* recent than an r(v)100...
*/
-#if 0
+#if 1
/* XXX I had reports of flicker happening with the cinema display
* on TMDS1 that seem to be fixed if I also forbit odd dividers in
* this case. This could just be a bandwidth calculation issue, I
@@ -1379,6 +1394,8 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
freq = rinfo->pll.ppll_max;
if (freq*12 < rinfo->pll.ppll_min)
freq = rinfo->pll.ppll_min / 12;
+ RTRACE("freq = %lu, PLL min = %u, PLL max = %u\n",
+ freq, rinfo->pll.ppll_min, rinfo->pll.ppll_max);
for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
pll_output_freq = post_div->divider * freq;
@@ -1392,6 +1409,16 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
break;
}
+ /* If we fall through the bottom, try the "default value"
+ given by the terminal post_div->bitvalue */
+ if ( !post_div->divider ) {
+ post_div = &post_divs[post_div->bitvalue];
+ pll_output_freq = post_div->divider * freq;
+ }
+ RTRACE("ref_div = %d, ref_clk = %d, output_freq = %d\n",
+ rinfo->pll.ref_div, rinfo->pll.ref_clk,
+ pll_output_freq);
+
fb_div = round_div(rinfo->pll.ref_div*pll_output_freq,
rinfo->pll.ref_clk);
regs->ppll_ref_div = rinfo->pll.ref_div;
@@ -1780,8 +1807,7 @@ static int backlight_conv_m7[] = {
static int radeon_set_backlight_enable(int on, int level, void *data)
{
struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
- unsigned int lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
- unsigned long tmpPixclksCntl = INPLL(PIXCLKS_CNTL);
+ u32 lvds_gen_cntl, tmpPixclksCntl;
int* conv_table;
if (rinfo->mon1_type != MT_LCD)
@@ -1803,42 +1829,47 @@ static int radeon_set_backlight_enable(int on, int level, void *data)
conv_table = backlight_conv_m6;
del_timer_sync(&rinfo->lvds_timer);
+ radeon_engine_idle();
- lvds_gen_cntl |= (LVDS_BL_MOD_EN | LVDS_BLON);
- radeon_fifo_wait(3);
+ lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
if (on && (level > BACKLIGHT_OFF)) {
- lvds_gen_cntl |= LVDS_DIGON;
- if (!(lvds_gen_cntl & LVDS_ON)) {
- lvds_gen_cntl &= ~LVDS_BLON;
+ lvds_gen_cntl &= ~LVDS_DISPLAY_DIS;
+ if (!(lvds_gen_cntl & LVDS_BLON) || !(lvds_gen_cntl & LVDS_ON)) {
+ lvds_gen_cntl |= LVDS_BLON /* | LVDS_EN | LVDS_DIGON */;
OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
- (void)INREG(LVDS_GEN_CNTL);
- mdelay(rinfo->panel_info.pwr_delay);/* OUCH !!! FIXME */
- lvds_gen_cntl |= LVDS_BLON;
+ lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
+ lvds_gen_cntl |= (conv_table[level] <<
+ LVDS_BL_MOD_LEVEL_SHIFT);
+ lvds_gen_cntl |= LVDS_ON;
+ rinfo->pending_lvds_gen_cntl = lvds_gen_cntl;
+ mod_timer(&rinfo->lvds_timer,
+ jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay));
+ } else {
+ lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
+ lvds_gen_cntl |= (conv_table[level] <<
+ LVDS_BL_MOD_LEVEL_SHIFT);
OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
}
- lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
- lvds_gen_cntl |= (conv_table[level] <<
- LVDS_BL_MOD_LEVEL_SHIFT);
- lvds_gen_cntl |= (LVDS_ON | LVDS_EN);
- lvds_gen_cntl &= ~LVDS_DISPLAY_DIS;
+ rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
+ rinfo->init_state.lvds_gen_cntl |= rinfo->pending_lvds_gen_cntl
+ & LVDS_STATE_MASK;
} else {
/* Asic bug, when turning off LVDS_ON, we have to make sure
RADEON_PIXCLK_LVDS_ALWAYS_ON bit is off
*/
+ tmpPixclksCntl = INPLL(PIXCLKS_CNTL);
if (rinfo->is_mobility || rinfo->is_IGP)
OUTPLLP(PIXCLKS_CNTL, 0, ~PIXCLK_LVDS_ALWAYS_ONb);
lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
lvds_gen_cntl |= (conv_table[0] <<
LVDS_BL_MOD_LEVEL_SHIFT);
- lvds_gen_cntl |= LVDS_DISPLAY_DIS | LVDS_BLON;
+ lvds_gen_cntl |= LVDS_DISPLAY_DIS;
+ OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
+ lvds_gen_cntl &= ~(LVDS_ON | LVDS_BLON /* | LVDS_EN | LVDS_DIGON */);
OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
- mdelay(rinfo->panel_info.pwr_delay);/* OUCH !!! FIXME */
- lvds_gen_cntl &= ~(LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGON);
+ if (rinfo->is_mobility || rinfo->is_IGP)
+ OUTPLL(PIXCLKS_CNTL, tmpPixclksCntl);
}
-
- OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
- if (rinfo->is_mobility || rinfo->is_IGP)
- OUTPLL(PIXCLKS_CNTL, tmpPixclksCntl);
rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
rinfo->init_state.lvds_gen_cntl |= (lvds_gen_cntl & LVDS_STATE_MASK);
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c
index e6c01642cfdc..22cf8b0c9c57 100644
--- a/drivers/video/aty/radeon_monitor.c
+++ b/drivers/video/aty/radeon_monitor.c
@@ -69,11 +69,12 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_
mt = MT_DFP;
else if (!strcmp(pmt, "CRT"))
mt = MT_CRT;
- else if (strcmp(pmt, "NONE")) {
- printk(KERN_WARNING "radeonfb: Unknown OF display-type: %s\n", pmt);
- return MT_NONE;
- } else
+ else {
+ if (strcmp(pmt, "NONE") != 0)
+ printk(KERN_WARNING "radeonfb: Unknown OF display-type: %s\n",
+ pmt);
return MT_NONE;
+ }
for (i = 0; propnames[i] != NULL; ++i) {
pedid = (u8 *)get_property(dp, propnames[i], NULL);
@@ -632,43 +633,25 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
*/
static void radeon_fixup_panel_info(struct radeonfb_info *rinfo)
{
+ #ifdef CONFIG_PPC_OF
/*
- * A few iBook laptop panels seem to need a fixed PLL setting
- *
- * We should probably do this differently based on the panel
- * type/model or eventually some other device-tree informations,
- * but these tweaks below work enough for now. --BenH
+ * LCD Flat panels should use fixed dividers, we enfore that on
+ * PowerMac only for now...
*/
-#ifdef CONFIG_PPC_OF
- /* iBook2's */
- if (machine_is_compatible("PowerBook4,3")) {
- rinfo->panel_info.ref_divider = rinfo->pll.ref_div;
- rinfo->panel_info.post_divider = 0x6;
- rinfo->panel_info.fbk_divider = 0xad;
- rinfo->panel_info.use_bios_dividers = 1;
- }
- /* Aluminium PowerBook 15" */
- if (machine_is_compatible("PowerBook5,4")) {
+ if (!rinfo->panel_info.use_bios_dividers && rinfo->mon1_type == MT_LCD
+ && rinfo->is_mobility) {
+ int ppll_div_sel = INREG8(CLOCK_CNTL_INDEX + 1) & 0x3;
+ u32 ppll_divn = INPLL(PPLL_DIV_0 + ppll_div_sel);
rinfo->panel_info.ref_divider = rinfo->pll.ref_div;
- rinfo->panel_info.post_divider = 0x2;
- rinfo->panel_info.fbk_divider = 0x8e;
- rinfo->panel_info.use_bios_dividers = 1;
- }
- /* Aluminium PowerBook 17" */
- if (machine_is_compatible("PowerBook5,3") ||
- machine_is_compatible("PowerBook5,5")) {
- rinfo->panel_info.ref_divider = rinfo->pll.ref_div;
- rinfo->panel_info.post_divider = 0x4;
- rinfo->panel_info.fbk_divider = 0x80;
- rinfo->panel_info.use_bios_dividers = 1;
- }
- /* iBook G4 */
- if (machine_is_compatible("PowerBook6,3") ||
- machine_is_compatible("PowerBook6,5")) {
- rinfo->panel_info.ref_divider = rinfo->pll.ref_div;
- rinfo->panel_info.post_divider = 0x6;
- rinfo->panel_info.fbk_divider = 0xad;
+ rinfo->panel_info.fbk_divider = ppll_divn & 0x7ff;
+ rinfo->panel_info.post_divider = (ppll_divn >> 16) & 0x7;
rinfo->panel_info.use_bios_dividers = 1;
+
+ printk(KERN_DEBUG "radeonfb: Using Firmware dividers 0x%08x "
+ "from PPLL %d\n",
+ rinfo->panel_info.fbk_divider |
+ (rinfo->panel_info.post_divider << 16),
+ ppll_div_sel);
}
#endif /* CONFIG_PPC_OF */
}
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index c26f43c04155..cad299f160c1 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -465,7 +465,9 @@ static void radeon_pm_setup_for_suspend(struct radeonfb_info *rinfo)
OUTPLL( pllPIXCLKS_CNTL, pixclks_cntl);
-
+ /* Switch off LVDS interface */
+ OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) &
+ ~(LVDS_BLON | LVDS_EN | LVDS_ON | LVDS_DIGON));
/* Enable System power management */
pll_pwrmgt_cntl = INPLL( pllPLL_PWRMGT_CNTL);
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 447eb58411d7..7377cb7b14b9 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -425,8 +425,6 @@ static inline u32 _INPLL(struct radeonfb_info *rinfo, u32 addr)
spin_unlock_irqrestore(&rinfo->reg_lock, flags); \
} while (0)
-#define MS_TO_HZ(ms) ((ms * HZ + 999) / 1000)
-
#define BIOS_IN8(v) (readb(rinfo->bios_seg + (v)))
#define BIOS_IN16(v) (readb(rinfo->bios_seg + (v)) | \
(readb(rinfo->bios_seg + (v) + 1) << 8))
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 65b65addff6b..3b2c5a5e6c12 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -386,7 +386,7 @@ int __init bw2_init(void)
struct sbus_bus *sbus;
struct sbus_dev *sdev;
- if (fb_get_options("bw2fb", &option))
+ if (fb_get_options("bw2fb", NULL))
return -ENODEV;
#ifdef CONFIG_SUN4
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index 4ee5a25db298..a51f4d2b69d8 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -416,7 +416,7 @@ chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
release_mem_region(addr, size);
return -ENOMEM;
}
-
+ p->device = &dp->dev;
init_chips(p, addr);
#ifdef CONFIG_PMAC_PBOOK
diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
index bfdd40665b6c..e5fa93b68043 100644
--- a/drivers/video/console/Makefile
+++ b/drivers/video/console/Makefile
@@ -24,7 +24,8 @@ obj-$(CONFIG_PROM_CONSOLE) += promcon.o promcon_tbl.o
obj-$(CONFIG_STI_CONSOLE) += sticon.o sticore.o
obj-$(CONFIG_VGA_CONSOLE) += vgacon.o
obj-$(CONFIG_MDA_CONSOLE) += mdacon.o
-obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon.o font.o
+obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon.o bitblit.o font.o
+obj-$(CONFIG_FB_TILEBLITTING) += tileblit.o
obj-$(CONFIG_FB_STI) += sticore.o
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
new file mode 100644
index 000000000000..43044f99c37f
--- /dev/null
+++ b/drivers/video/console/bitblit.c
@@ -0,0 +1,370 @@
+/*
+ * linux/drivers/video/console/bitblit.c -- BitBlitting Operation
+ *
+ * Originally from the 'accel_*' routines in drivers/video/console/fbcon.c
+ *
+ * Copyright (C) 2004 Antonino Daplas <adaplas @pol.net>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fb.h>
+#include <linux/vt_kern.h>
+#include <linux/console.h>
+#include <asm/types.h>
+#include "fbcon.h"
+
+/*
+ * Accelerated handlers.
+ */
+#define FBCON_ATTRIBUTE_UNDERLINE 1
+#define FBCON_ATTRIBUTE_REVERSE 2
+#define FBCON_ATTRIBUTE_BOLD 4
+
+static inline int real_y(struct display *p, int ypos)
+{
+ int rows = p->vrows;
+
+ ypos += p->yscroll;
+ return ypos < rows ? ypos : ypos - rows;
+}
+
+
+static inline int get_attribute(struct fb_info *info, u16 c)
+{
+ int attribute = 0;
+
+ if (fb_get_color_depth(info) == 1) {
+ if (attr_underline(c))
+ attribute |= FBCON_ATTRIBUTE_UNDERLINE;
+ if (attr_reverse(c))
+ attribute |= FBCON_ATTRIBUTE_REVERSE;
+ if (attr_bold(c))
+ attribute |= FBCON_ATTRIBUTE_BOLD;
+ }
+
+ return attribute;
+}
+
+static inline void update_attr(u8 *dst, u8 *src, int attribute,
+ struct vc_data *vc)
+{
+ int i, offset = (vc->vc_font.height < 10) ? 1 : 2;
+ int width = (vc->vc_font.width + 7) >> 3;
+ unsigned int cellsize = vc->vc_font.height * width;
+ u8 c;
+
+ offset = cellsize - (offset * width);
+ for (i = 0; i < cellsize; i++) {
+ c = src[i];
+ if (attribute & FBCON_ATTRIBUTE_UNDERLINE && i >= offset)
+ c = 0xff;
+ if (attribute & FBCON_ATTRIBUTE_BOLD)
+ c |= c >> 1;
+ if (attribute & FBCON_ATTRIBUTE_REVERSE)
+ c = ~c;
+ dst[i] = c;
+ }
+}
+
+static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_copyarea area;
+
+ area.sx = sx * vc->vc_font.width;
+ area.sy = sy * vc->vc_font.height;
+ area.dx = dx * vc->vc_font.width;
+ area.dy = dy * vc->vc_font.height;
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
+static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int height, int width)
+{
+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ struct fb_fillrect region;
+
+ region.color = attr_bgcol_ec(bgshift, vc);
+ region.dx = sx * vc->vc_font.width;
+ region.dy = sy * vc->vc_font.height;
+ region.width = width * vc->vc_font.width;
+ region.height = height * vc->vc_font.height;
+ region.rop = ROP_COPY;
+
+ info->fbops->fb_fillrect(info, &region);
+}
+
+static void bit_putcs(struct vc_data *vc, struct fb_info *info,
+ const unsigned short *s, int count, int yy, int xx,
+ int fg, int bg)
+{
+ void (*move_unaligned)(struct fb_info *info, struct fb_pixmap *buf,
+ u8 *dst, u32 d_pitch, u8 *src, u32 idx,
+ u32 height, u32 shift_high, u32 shift_low,
+ u32 mod);
+ void (*move_aligned)(struct fb_info *info, struct fb_pixmap *buf,
+ u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch,
+ u32 height);
+ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+ unsigned int width = (vc->vc_font.width + 7) >> 3;
+ unsigned int cellsize = vc->vc_font.height * width;
+ unsigned int maxcnt = info->pixmap.size/cellsize;
+ unsigned int scan_align = info->pixmap.scan_align - 1;
+ unsigned int buf_align = info->pixmap.buf_align - 1;
+ unsigned int shift_low = 0, mod = vc->vc_font.width % 8;
+ unsigned int shift_high = 8, pitch, cnt, size, k;
+ unsigned int idx = vc->vc_font.width >> 3;
+ unsigned int attribute = get_attribute(info, scr_readw(s));
+ struct fb_image image;
+ u8 *src, *dst, *buf = NULL;
+
+ if (attribute) {
+ buf = kmalloc(cellsize, GFP_KERNEL);
+ if (!buf)
+ return;
+ }
+
+ image.fg_color = fg;
+ image.bg_color = bg;
+
+ image.dx = xx * vc->vc_font.width;
+ image.dy = yy * vc->vc_font.height;
+ image.height = vc->vc_font.height;
+ image.depth = 1;
+
+ if (info->pixmap.outbuf && info->pixmap.inbuf) {
+ move_aligned = fb_iomove_buf_aligned;
+ move_unaligned = fb_iomove_buf_unaligned;
+ } else {
+ move_aligned = fb_sysmove_buf_aligned;
+ move_unaligned = fb_sysmove_buf_unaligned;
+ }
+ while (count) {
+ if (count > maxcnt)
+ cnt = k = maxcnt;
+ else
+ cnt = k = count;
+
+ image.width = vc->vc_font.width * cnt;
+ pitch = ((image.width + 7) >> 3) + scan_align;
+ pitch &= ~scan_align;
+ size = pitch * image.height + buf_align;
+ size &= ~buf_align;
+ dst = fb_get_buffer_offset(info, &info->pixmap, size);
+ image.data = dst;
+ if (mod) {
+ while (k--) {
+ src = vc->vc_font.data + (scr_readw(s++)&
+ charmask)*cellsize;
+
+ if (attribute) {
+ update_attr(buf, src, attribute, vc);
+ src = buf;
+ }
+
+ move_unaligned(info, &info->pixmap, dst, pitch,
+ src, idx, image.height,
+ shift_high, shift_low, mod);
+ shift_low += mod;
+ dst += (shift_low >= 8) ? width : width - 1;
+ shift_low &= 7;
+ shift_high = 8 - shift_low;
+ }
+ } else {
+ while (k--) {
+ src = vc->vc_font.data + (scr_readw(s++)&
+ charmask)*cellsize;
+
+ if (attribute) {
+ update_attr(buf, src, attribute, vc);
+ src = buf;
+ }
+
+ move_aligned(info, &info->pixmap, dst, pitch,
+ src, idx, image.height);
+ dst += width;
+ }
+ }
+ info->fbops->fb_imageblit(info, &image);
+ image.dx += cnt * vc->vc_font.width;
+ count -= cnt;
+ }
+
+ if (buf)
+ kfree(buf);
+}
+
+static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
+ int bottom_only)
+{
+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ unsigned int cw = vc->vc_font.width;
+ unsigned int ch = vc->vc_font.height;
+ unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+ unsigned int bh = info->var.yres - (vc->vc_rows*ch);
+ unsigned int rs = info->var.xres - rw;
+ unsigned int bs = info->var.yres - bh;
+ struct fb_fillrect region;
+
+ region.color = attr_bgcol_ec(bgshift, vc);
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+ region.dx = info->var.xoffset + rs;
+ region.dy = 0;
+ region.width = rw;
+ region.height = info->var.yres_virtual;
+ info->fbops->fb_fillrect(info, &region);
+ }
+
+ if (bh) {
+ region.dx = info->var.xoffset;
+ region.dy = info->var.yoffset + bs;
+ region.width = rs;
+ region.height = bh;
+ info->fbops->fb_fillrect(info, &region);
+ }
+}
+
+static void bit_cursor(struct vc_data *vc, struct fb_info *info,
+ struct display *p, int mode, int fg, int bg)
+{
+ struct fb_cursor cursor;
+ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+ int w = (vc->vc_font.width + 7) >> 3, c;
+ int y = real_y(p, vc->vc_y);
+ int attribute;
+ char *src;
+
+ c = scr_readw((u16 *) vc->vc_pos);
+ attribute = get_attribute(info, c);
+ src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
+ if (attribute) {
+ u8 *dst;
+
+ dst = kmalloc(w * vc->vc_font.height, GFP_ATOMIC);
+ if (!dst)
+ return;
+ if (info->cursor.data)
+ kfree(info->cursor.data);
+ info->cursor.data = dst;
+ update_attr(dst, src, attribute, vc);
+ src = dst;
+ }
+
+ cursor.image.data = src;
+ cursor.set = FB_CUR_SETCUR;
+ cursor.image.depth = 1;
+
+ switch (mode) {
+ case CM_ERASE:
+ if (info->cursor.rop == ROP_XOR) {
+ info->cursor.enable = 0;
+ info->cursor.rop = ROP_COPY;
+ info->fbops->fb_cursor(info, &cursor);
+ }
+ break;
+ case CM_MOVE:
+ case CM_DRAW:
+ info->cursor.enable = 1;
+ info->cursor.rop = ROP_XOR;
+
+ if (info->cursor.image.fg_color != fg ||
+ info->cursor.image.bg_color != bg) {
+ cursor.image.fg_color = fg;
+ cursor.image.bg_color = bg;
+ cursor.set |= FB_CUR_SETCMAP;
+ }
+
+ if ((info->cursor.image.dx != (vc->vc_font.width * vc->vc_x)) ||
+ (info->cursor.image.dy != (vc->vc_font.height * y))) {
+ cursor.image.dx = vc->vc_font.width * vc->vc_x;
+ cursor.image.dy = vc->vc_font.height * y;
+ cursor.set |= FB_CUR_SETPOS;
+ }
+
+ if (info->cursor.image.height != vc->vc_font.height ||
+ info->cursor.image.width != vc->vc_font.width) {
+ cursor.image.height = vc->vc_font.height;
+ cursor.image.width = vc->vc_font.width;
+ cursor.set |= FB_CUR_SETSIZE;
+ }
+
+ if (info->cursor.hot.x || info->cursor.hot.y) {
+ cursor.hot.x = cursor.hot.y = 0;
+ cursor.set |= FB_CUR_SETHOT;
+ }
+
+ if ((cursor.set & FB_CUR_SETSIZE) ||
+ ((vc->vc_cursor_type & 0x0f) != p->cursor_shape)
+ || info->cursor.mask == NULL) {
+ char *mask = kmalloc(w*vc->vc_font.height, GFP_ATOMIC);
+ int cur_height, size, i = 0;
+ u8 msk = 0xff;
+
+ if (!mask)
+ return;
+
+ if (info->cursor.mask)
+ kfree(info->cursor.mask);
+ info->cursor.mask = mask;
+ p->cursor_shape = vc->vc_cursor_type & 0x0f;
+ cursor.set |= FB_CUR_SETSHAPE;
+
+ switch (vc->vc_cursor_type & 0x0f) {
+ case CUR_NONE:
+ cur_height = 0;
+ break;
+ case CUR_UNDERLINE:
+ cur_height = (vc->vc_font.height < 10) ? 1 : 2;
+ break;
+ case CUR_LOWER_THIRD:
+ cur_height = vc->vc_font.height/3;
+ break;
+ case CUR_LOWER_HALF:
+ cur_height = vc->vc_font.height >> 1;
+ break;
+ case CUR_TWO_THIRDS:
+ cur_height = (vc->vc_font.height << 1)/3;
+ break;
+ case CUR_BLOCK:
+ default:
+ cur_height = vc->vc_font.height;
+ break;
+ }
+ size = (vc->vc_font.height - cur_height) * w;
+ while (size--)
+ mask[i++] = ~msk;
+ size = cur_height * w;
+ while (size--)
+ mask[i++] = msk;
+ }
+ info->fbops->fb_cursor(info, &cursor);
+ break;
+ }
+}
+
+void fbcon_set_bitops(struct fbcon_ops *ops)
+{
+ ops->bmove = bit_bmove;
+ ops->clear = bit_clear;
+ ops->putcs = bit_putcs;
+ ops->clear_margins = bit_clear_margins;
+ ops->cursor = bit_cursor;
+}
+
+EXPORT_SYMBOL(fbcon_set_bitops);
+
+MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
+MODULE_DESCRIPTION("Bit Blitting Operation");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 2fba5cfcbcee..aa4019d5a4e7 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -102,6 +102,7 @@
struct display fb_display[MAX_NR_CONSOLES];
signed char con2fb_map[MAX_NR_CONSOLES];
+signed char con2fb_map_boot[MAX_NR_CONSOLES];
static int logo_height;
static int logo_lines;
static int logo_shown = -1;
@@ -119,13 +120,7 @@ static int fbcon_is_default = 1;
static char fontname[40];
/* current fb_info */
-static int info_idx = -1;
-
-#define REFCOUNT(fd) (((int *)(fd))[-1])
-#define FNTSIZE(fd) (((int *)(fd))[-2])
-#define FNTCHARCNT(fd) (((int *)(fd))[-3])
-#define FNTSUM(fd) (((int *)(fd))[-4])
-#define FONT_EXTRA_WORDS 4
+static int info_idx = -1;
#define CM_SOFTBACK (8)
@@ -158,6 +153,7 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos);
static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos);
+static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
static void fbcon_cursor(struct vc_data *vc, int mode);
static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
int count);
@@ -167,9 +163,6 @@ static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
static int fbcon_set_palette(struct vc_data *vc, unsigned char *table);
static int fbcon_scrolldelta(struct vc_data *vc, int lines);
-void accel_clear_margins(struct vc_data *vc, struct fb_info *info,
- int bottom_only);
-
/*
* Internal routines
@@ -182,6 +175,7 @@ static __inline__ void ypan_down(struct vc_data *vc, int count);
static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct vc_data *vc);
+static void fbcon_preset_disp(struct fb_info *info, int unit);
static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
int line, int count, int dy);
@@ -198,21 +192,69 @@ static irqreturn_t fb_vbl_detect(int irq, void *dummy, struct pt_regs *fp)
}
#endif
+static inline int get_color(struct vc_data *vc, struct fb_info *info,
+ u16 c, int is_fg)
+{
+ int depth = fb_get_color_depth(info);
+ int color = 0;
+
+ if (depth != 1)
+ color = (is_fg) ? attr_fgcol((vc->vc_hi_font_mask) ? 9 : 8, c)
+ : attr_bgcol((vc->vc_hi_font_mask) ? 13 : 12, c);
+
+ switch (depth) {
+ case 1:
+ {
+ /* 0 or 1 */
+ int fg = (info->fix.visual != FB_VISUAL_MONO01) ? 1 : 0;
+ int bg = (info->fix.visual != FB_VISUAL_MONO01) ? 0 : 1;
+
+ color = (is_fg) ? fg : bg;
+ break;
+ }
+ case 2:
+ /*
+ * Scale down 16-colors to 4 colors. Default 4-color palette
+ * is grayscale.
+ */
+ color /= 4;
+ break;
+ case 3:
+ /*
+ * Last 8 entries of default 16-color palette is a more intense
+ * version of the first 8 (i.e., same chrominance, different
+ * luminance).
+ */
+ color &= 7;
+ break;
+ }
+
+ return color;
+}
+
static void fb_flashcursor(void *private)
{
struct fb_info *info = (struct fb_info *) private;
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+ struct display *p;
struct vc_data *vc = NULL;
+ int c;
+ int mode;
if (info->currcon != -1)
vc = vc_cons[info->currcon].d;
if (info->state != FBINFO_STATE_RUNNING ||
- info->cursor.rop == ROP_COPY || !vc || !CON_IS_VISIBLE(vc)
- || registered_fb[(int) con2fb_map[vc->vc_num]] != info)
+ !vc || !CON_IS_VISIBLE(vc) || !info->cursor.flash ||
+ vt_cons[vc->vc_num]->vc_mode != KD_TEXT ||
+ registered_fb[(int) con2fb_map[vc->vc_num]] != info)
return;
+ p = &fb_display[vc->vc_num];
+ c = scr_readw((u16 *) vc->vc_pos);
acquire_console_sem();
- info->cursor.enable ^= 1;
- info->fbops->fb_cursor(info, &info->cursor);
+ mode = (info->cursor.enable) ? CM_ERASE : CM_DRAW;
+ ops->cursor(vc, info, p, mode, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
release_console_sem();
}
@@ -271,7 +313,8 @@ int __init fb_console_setup(char *this_opt)
for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) {
if (!options[j])
j = 0;
- con2fb_map[i] = (options[j++]-'0') % FB_MAX;
+ con2fb_map_boot[i] =
+ (options[j++]-'0') % FB_MAX;
}
return 0;
}
@@ -314,13 +357,16 @@ static int search_for_mapped_con(void)
return 0;
}
-static int fbcon_takeover(void)
+static int fbcon_takeover(int show_logo)
{
int err, i;
if (!num_registered_fb)
return -ENODEV;
+ if (!show_logo)
+ logo_shown = -3;
+
for (i = first_fb_vc; i <= last_fb_vc; i++)
con2fb_map[i] = info_idx;
@@ -336,15 +382,112 @@ static int fbcon_takeover(void)
return err;
}
+static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
+ int cols, int rows, int new_cols, int new_rows)
+{
+ /* Need to make room for the logo */
+ int cnt, erase = vc->vc_video_erase_char, step;
+ unsigned short *save = NULL, *r, *q;
+
+ /*
+ * remove underline attribute from erase character
+ * if black and white framebuffer.
+ */
+ if (fb_get_color_depth(info) == 1)
+ erase &= ~0x400;
+ logo_height = fb_prepare_logo(info);
+ logo_lines = (logo_height + vc->vc_font.height - 1) /
+ vc->vc_font.height;
+ q = (unsigned short *) (vc->vc_origin +
+ vc->vc_size_row * rows);
+ step = logo_lines * cols;
+ for (r = q - logo_lines * cols; r < q; r++)
+ if (scr_readw(r) != vc->vc_video_erase_char)
+ break;
+ if (r != q && new_rows >= rows + logo_lines) {
+ save = kmalloc(logo_lines * new_cols * 2, GFP_KERNEL);
+ if (save) {
+ int i = cols < new_cols ? cols : new_cols;
+ scr_memsetw(save, erase, logo_lines * new_cols * 2);
+ r = q - step;
+ for (cnt = 0; cnt < logo_lines; cnt++, r += i)
+ scr_memcpyw(save + cnt * new_cols, r, 2 * i);
+ r = q;
+ }
+ }
+ if (r == q) {
+ /* We can scroll screen down */
+ r = q - step - cols;
+ for (cnt = rows - logo_lines; cnt > 0; cnt--) {
+ scr_memcpyw(r + step, r, vc->vc_size_row);
+ r -= cols;
+ }
+ if (!save) {
+ vc->vc_y += logo_lines;
+ vc->vc_pos += logo_lines * vc->vc_size_row;
+ }
+ }
+ scr_memsetw((unsigned short *) vc->vc_origin,
+ erase,
+ vc->vc_size_row * logo_lines);
+
+ if (CON_IS_VISIBLE(vc) && vt_cons[vc->vc_num]->vc_mode == KD_TEXT) {
+ fbcon_clear_margins(vc, 0);
+ update_screen(vc->vc_num);
+ }
+
+ if (save) {
+ q = (unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ rows);
+ scr_memcpyw(q, save, logo_lines * new_cols * 2);
+ vc->vc_y += logo_lines;
+ vc->vc_pos += logo_lines * vc->vc_size_row;
+ kfree(save);
+ }
+
+ if (logo_lines > vc->vc_bottom) {
+ logo_shown = -1;
+ printk(KERN_INFO
+ "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
+ } else if (logo_shown != -3) {
+ logo_shown = -2;
+ vc->vc_top = logo_lines;
+ }
+}
+
+#ifdef CONFIG_FB_TILEBLITTING
+static void set_blitting_type(struct vc_data *vc, struct fb_info *info,
+ struct display *p)
+{
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+
+ if ((info->flags & FBINFO_MISC_TILEBLITTING))
+ fbcon_set_tileops(vc, info, p, ops);
+ else
+ fbcon_set_bitops(ops);
+}
+#else
+static void set_blitting_type(struct vc_data *vc, struct fb_info *info,
+ struct display *p)
+{
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+
+ info->flags &= ~FBINFO_MISC_TILEBLITTING;
+ fbcon_set_bitops(ops);
+}
+#endif /* CONFIG_MISC_TILEBLITTING */
+
/**
* set_con2fb_map - map console to frame buffer device
* @unit: virtual console number to map
* @newidx: frame buffer index to map virtual console to
+ * @user: user request
*
* Maps a virtual console @unit to a frame buffer device
* @newidx.
*/
-static int set_con2fb_map(int unit, int newidx)
+static int set_con2fb_map(int unit, int newidx, int user)
{
struct vc_data *vc = vc_cons[unit].d;
int oldidx = con2fb_map[unit];
@@ -355,12 +498,12 @@ static int set_con2fb_map(int unit, int newidx)
if (oldidx == newidx)
return 0;
- if (!vc)
- return -ENODEV;
+ if (!info)
+ return -EINVAL;
if (!search_for_mapped_con()) {
info_idx = newidx;
- return fbcon_takeover();
+ return fbcon_takeover(0);
}
if (oldidx != -1)
@@ -370,17 +513,35 @@ static int set_con2fb_map(int unit, int newidx)
acquire_console_sem();
con2fb_map[unit] = newidx;
+
if (!found) {
+ struct fbcon_ops *ops = NULL;
+ int err = 0;
if (!try_module_get(info->fbops->owner)) {
- con2fb_map[unit] = oldidx;
- release_console_sem();
- return -ENODEV;
+ err = -ENODEV;
}
- if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) {
- module_put(info->fbops->owner);
+
+ if (!err && info->fbops->fb_open &&
+ info->fbops->fb_open(info, 0)) {
+ err = -ENODEV;
+ }
+
+ if (!err) {
+ ops = kmalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+ if (!ops)
+ err = -ENOMEM;
+ }
+
+ if (!err) {
+ info->fbcon_par = ops;
+ set_blitting_type(vc, info, NULL);
+ }
+
+ if (err) {
con2fb_map[unit] = oldidx;
+ module_put(info->fbops->owner);
release_console_sem();
- return -ENODEV;
+ return err;
}
}
@@ -399,11 +560,14 @@ static int set_con2fb_map(int unit, int newidx)
release_console_sem();
return -ENODEV;
}
+
if (oldinfo->queue.func == fb_flashcursor)
del_timer_sync(&oldinfo->cursor_timer);
+
+ kfree(oldinfo->fbcon_par);
module_put(oldinfo->fbops->owner);
}
- info->currcon = -1;
+
if (!found) {
if (!info->queue.func || info->queue.func == fb_flashcursor) {
if (!info->queue.func)
@@ -416,259 +580,31 @@ static int set_con2fb_map(int unit, int newidx)
add_timer(&info->cursor_timer);
}
}
- if (info->fbops->fb_set_par)
- info->fbops->fb_set_par(info);
- fbcon_set_disp(info, vc);
- release_console_sem();
- return 0;
-}
-
-/*
- * Accelerated handlers.
- */
-static inline int get_color(struct vc_data *vc, struct fb_info *info,
- u16 c, int is_fg)
-{
- int depth = fb_get_color_depth(info);
- int color = 0;
-
- if (depth != 1)
- color = (is_fg) ? attr_fgcol((vc->vc_hi_font_mask) ? 9 : 8, c)
- : attr_bgcol((vc->vc_hi_font_mask) ? 13 : 12, c);
-
- switch (depth) {
- case 1:
- {
- /* 0 or 1 */
- int fg = (info->fix.visual != FB_VISUAL_MONO01) ? 1 : 0;
- int bg = (info->fix.visual != FB_VISUAL_MONO01) ? 0 : 1;
-
- color = (is_fg) ? fg : bg;
- break;
- }
- case 2:
- /*
- * Scale down 16-colors to 4 colors. Default 4-color palette
- * is grayscale.
- */
- color /= 4;
- break;
- case 3:
- /*
- * Last 8 entries of default 16-color palette is a more intense
- * version of the first 8 (i.e., same chrominance, different
- * luminance).
- */
- color &= 7;
- break;
- }
- return color;
-}
+ info->currcon = fg_console;
+ con2fb_map_boot[unit] = newidx;
-#define FBCON_ATTRIBUTE_UNDERLINE 1
-#define FBCON_ATTRIBUTE_REVERSE 2
-#define FBCON_ATTRIBUTE_BOLD 4
-
-static inline int get_attribute(struct fb_info *info, u16 c)
-{
- int attribute = 0;
-
- if (fb_get_color_depth(info) == 1) {
- if (attr_underline(c))
- attribute |= FBCON_ATTRIBUTE_UNDERLINE;
- if (attr_reverse(c))
- attribute |= FBCON_ATTRIBUTE_REVERSE;
- if (attr_bold(c))
- attribute |= FBCON_ATTRIBUTE_BOLD;
- }
-
- return attribute;
-}
-
-static inline void update_attr(u8 *dst, u8 *src, int attribute,
- struct vc_data *vc)
-{
- int i, offset = (vc->vc_font.height < 10) ? 1 : 2;
- int width = (vc->vc_font.width + 7) >> 3;
- unsigned int cellsize = vc->vc_font.height * width;
- u8 c;
-
- offset = cellsize - (offset * width);
- for (i = 0; i < cellsize; i++) {
- c = src[i];
- if (attribute & FBCON_ATTRIBUTE_UNDERLINE && i >= offset)
- c = 0xff;
- if (attribute & FBCON_ATTRIBUTE_BOLD)
- c |= c >> 1;
- if (attribute & FBCON_ATTRIBUTE_REVERSE)
- c = ~c;
- dst[i] = c;
- }
-}
-
-void accel_bmove(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int dy, int dx, int height, int width)
-{
- struct fb_copyarea area;
-
- area.sx = sx * vc->vc_font.width;
- area.sy = sy * vc->vc_font.height;
- area.dx = dx * vc->vc_font.width;
- area.dy = dy * vc->vc_font.height;
- area.height = height * vc->vc_font.height;
- area.width = width * vc->vc_font.width;
-
- info->fbops->fb_copyarea(info, &area);
-}
-
-void accel_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
-{
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- struct fb_fillrect region;
-
- region.color = attr_bgcol_ec(bgshift, vc);
- region.dx = sx * vc->vc_font.width;
- region.dy = sy * vc->vc_font.height;
- region.width = width * vc->vc_font.width;
- region.height = height * vc->vc_font.height;
- region.rop = ROP_COPY;
-
- info->fbops->fb_fillrect(info, &region);
-}
-
-void accel_putcs(struct vc_data *vc, struct fb_info *info,
- const unsigned short *s, int count, int yy, int xx)
-{
- void (*move_unaligned)(struct fb_info *info, struct fb_pixmap *buf,
- u8 *dst, u32 d_pitch, u8 *src, u32 idx,
- u32 height, u32 shift_high, u32 shift_low,
- u32 mod);
- void (*move_aligned)(struct fb_info *info, struct fb_pixmap *buf,
- u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch,
- u32 height);
- unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
- unsigned int width = (vc->vc_font.width + 7) >> 3;
- unsigned int cellsize = vc->vc_font.height * width;
- unsigned int maxcnt = info->pixmap.size/cellsize;
- unsigned int scan_align = info->pixmap.scan_align - 1;
- unsigned int buf_align = info->pixmap.buf_align - 1;
- unsigned int shift_low = 0, mod = vc->vc_font.width % 8;
- unsigned int shift_high = 8, pitch, cnt, size, k;
- unsigned int idx = vc->vc_font.width >> 3;
- unsigned int attribute = get_attribute(info, scr_readw(s));
- struct fb_image image;
- u8 *src, *dst, *buf = NULL;
-
- if (attribute) {
- buf = kmalloc(cellsize, GFP_KERNEL);
- if (!buf)
- return;
- }
-
- image.fg_color = get_color(vc, info, scr_readw(s), 1);
- image.bg_color = get_color(vc, info, scr_readw(s), 0);
-
- image.dx = xx * vc->vc_font.width;
- image.dy = yy * vc->vc_font.height;
- image.height = vc->vc_font.height;
- image.depth = 1;
-
- if (info->pixmap.outbuf && info->pixmap.inbuf) {
- move_aligned = fb_iomove_buf_aligned;
- move_unaligned = fb_iomove_buf_unaligned;
- } else {
- move_aligned = fb_sysmove_buf_aligned;
- move_unaligned = fb_sysmove_buf_unaligned;
- }
- while (count) {
- if (count > maxcnt)
- cnt = k = maxcnt;
- else
- cnt = k = count;
-
- image.width = vc->vc_font.width * cnt;
- pitch = ((image.width + 7) >> 3) + scan_align;
- pitch &= ~scan_align;
- size = pitch * image.height + buf_align;
- size &= ~buf_align;
- dst = fb_get_buffer_offset(info, &info->pixmap, size);
- image.data = dst;
- if (mod) {
- while (k--) {
- src = vc->vc_font.data + (scr_readw(s++)&
- charmask)*cellsize;
-
- if (attribute) {
- update_attr(buf, src, attribute, vc);
- src = buf;
- }
+ if (info->fbops->fb_set_par)
+ info->fbops->fb_set_par(info);
- move_unaligned(info, &info->pixmap, dst, pitch,
- src, idx, image.height,
- shift_high, shift_low, mod);
- shift_low += mod;
- dst += (shift_low >= 8) ? width : width - 1;
- shift_low &= 7;
- shift_high = 8 - shift_low;
- }
- } else {
- while (k--) {
- src = vc->vc_font.data + (scr_readw(s++)&
- charmask)*cellsize;
+ if (vc)
+ fbcon_set_disp(info, vc);
+ else
+ fbcon_preset_disp(info, unit);
- if (attribute) {
- update_attr(buf, src, attribute, vc);
- src = buf;
- }
+ if (fg_console == 0 && !user && logo_shown != -3) {
+ struct vc_data *vc = vc_cons[fg_console].d;
+ struct fb_info *fg_info = registered_fb[(int) con2fb_map[fg_console]];
- move_aligned(info, &info->pixmap, dst, pitch,
- src, idx, image.height);
- dst += width;
- }
- }
- info->fbops->fb_imageblit(info, &image);
- image.dx += cnt * vc->vc_font.width;
- count -= cnt;
+ fbcon_prepare_logo(vc, fg_info, vc->vc_cols, vc->vc_rows,
+ vc->vc_cols, vc->vc_rows);
}
- if (buf)
- kfree(buf);
+ switch_screen(fg_console);
+ release_console_sem();
+ return 0;
}
-void accel_clear_margins(struct vc_data *vc, struct fb_info *info,
- int bottom_only)
-{
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- unsigned int cw = vc->vc_font.width;
- unsigned int ch = vc->vc_font.height;
- unsigned int rw = info->var.xres - (vc->vc_cols*cw);
- unsigned int bh = info->var.yres - (vc->vc_rows*ch);
- unsigned int rs = info->var.xres - rw;
- unsigned int bs = info->var.yres - bh;
- struct fb_fillrect region;
-
- region.color = attr_bgcol_ec(bgshift, vc);
- region.rop = ROP_COPY;
-
- if (rw && !bottom_only) {
- region.dx = info->var.xoffset + rs;
- region.dy = 0;
- region.width = rw;
- region.height = info->var.yres_virtual;
- info->fbops->fb_fillrect(info, &region);
- }
-
- if (bh) {
- region.dx = info->var.xoffset;
- region.dy = info->var.yoffset + bs;
- region.width = rs;
- region.height = bh;
- info->fbops->fb_fillrect(info, &region);
- }
-}
-
/*
* Low Level Operations
*/
@@ -722,6 +658,7 @@ static const char *fbcon_startup(void)
struct font_desc *font = NULL;
struct module *owner;
struct fb_info *info = NULL;
+ struct fbcon_ops *ops;
int rows, cols;
int irqres;
@@ -748,6 +685,16 @@ static const char *fbcon_startup(void)
module_put(owner);
return NULL;
}
+
+ ops = kmalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+ if (!ops) {
+ module_put(owner);
+ return NULL;
+ }
+
+ info->fbcon_par = ops;
+ set_blitting_type(vc, info, NULL);
+
if (info->fix.type != FB_TYPE_TEXT) {
if (fbcon_softback_size) {
if (!softback_buf) {
@@ -777,7 +724,7 @@ static const char *fbcon_startup(void)
if (!p->fontdata) {
if (!fontname[0] || !(font = find_font(fontname)))
font = get_default_font(info->var.xres,
- info->var.yres);
+ info->var.yres);
vc->vc_font.width = font->width;
vc->vc_font.height = font->height;
vc->vc_font.data = p->fontdata = font->data;
@@ -793,7 +740,6 @@ static const char *fbcon_startup(void)
DPRINTK("res: %dx%d-%d\n", info->var.xres,
info->var.yres,
info->var.bits_per_pixel);
- con_set_default_unimap(vc->vc_num);
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI) {
@@ -873,15 +819,15 @@ static void fbcon_init(struct vc_data *vc, int init)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct vc_data **default_mode = vc->vc_display_fg;
+ struct vc_data *svc = *default_mode;
struct display *t, *p = &fb_display[vc->vc_num];
int display_fg = (*default_mode)->vc_num;
int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256;
- unsigned short *save = NULL, *r, *q;
int cap = info->flags;
if (info_idx == -1 || info == NULL)
return;
- if (vc->vc_num != display_fg || (info->flags & FBINFO_MODULE) ||
+ if (vc->vc_num != display_fg || logo_shown == -3 ||
(info->fix.type == FB_TYPE_TEXT))
logo = 0;
@@ -900,7 +846,6 @@ static void fbcon_init(struct vc_data *vc, int init)
p->userfont = t->userfont;
if (p->userfont)
REFCOUNT(p->fontdata)++;
- con_copy_unimap(vc->vc_num, display_fg);
}
if (p->userfont)
charcnt = FNTCHARCNT(p->fontdata);
@@ -913,6 +858,12 @@ static void fbcon_init(struct vc_data *vc, int init)
if (vc->vc_can_do_color)
vc->vc_complement_mask <<= 1;
}
+
+ if (!*svc->vc_uni_pagedir_loc)
+ con_set_default_unimap(display_fg);
+ if (!*vc->vc_uni_pagedir_loc)
+ con_copy_unimap(vc->vc_num, display_fg);
+
cols = vc->vc_cols;
rows = vc->vc_rows;
new_cols = info->var.xres / vc->vc_font.width;
@@ -945,75 +896,8 @@ static void fbcon_init(struct vc_data *vc, int init)
vc->vc_rows = new_rows;
}
- if (logo) {
- /* Need to make room for the logo */
- int cnt, erase = vc->vc_video_erase_char;
- int step;
-
- /*
- * remove underline attribute from erase character
- * if black and white framebuffer.
- */
- if (fb_get_color_depth(info) == 1)
- erase &= ~0x400;
- logo_height = fb_prepare_logo(info);
- logo_lines = (logo_height + vc->vc_font.height - 1) /
- vc->vc_font.height;
- q = (unsigned short *) (vc->vc_origin +
- vc->vc_size_row * rows);
- step = logo_lines * cols;
- for (r = q - logo_lines * cols; r < q; r++)
- if (scr_readw(r) != vc->vc_video_erase_char)
- break;
- if (r != q && new_rows >= rows + logo_lines) {
- save = kmalloc(logo_lines * new_cols * 2, GFP_KERNEL);
- if (save) {
- int i = cols < new_cols ? cols : new_cols;
- scr_memsetw(save, erase, logo_lines * new_cols * 2);
- r = q - step;
- for (cnt = 0; cnt < logo_lines; cnt++, r += i)
- scr_memcpyw(save + cnt * new_cols, r, 2 * i);
- r = q;
- }
- }
- if (r == q) {
- /* We can scroll screen down */
- r = q - step - cols;
- for (cnt = rows - logo_lines; cnt > 0; cnt--) {
- scr_memcpyw(r + step, r, vc->vc_size_row);
- r -= cols;
- }
- if (!save) {
- vc->vc_y += logo_lines;
- vc->vc_pos += logo_lines * vc->vc_size_row;
- }
- }
- scr_memsetw((unsigned short *) vc->vc_origin,
- erase,
- vc->vc_size_row * logo_lines);
-
- if (CON_IS_VISIBLE(vc) && vt_cons[vc->vc_num]->vc_mode == KD_TEXT) {
- accel_clear_margins(vc, info, 0);
- update_screen(vc->vc_num);
- }
- if (save) {
- q = (unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- rows);
- scr_memcpyw(q, save, logo_lines * new_cols * 2);
- vc->vc_y += logo_lines;
- vc->vc_pos += logo_lines * vc->vc_size_row;
- kfree(save);
- }
- if (logo_lines > vc->vc_bottom) {
- logo_shown = -1;
- printk(KERN_INFO
- "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
- } else {
- logo_shown = -2;
- vc->vc_top = logo_lines;
- }
- }
+ if (logo)
+ fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
if (vc->vc_num == display_fg && softback_buf) {
int l = fbcon_softback_size / vc->vc_size_row;
@@ -1043,7 +927,7 @@ static void fbcon_deinit(struct vc_data *vc)
* This system is now divided into two levels because of complications
* caused by hardware scrolling. Top level functions:
*
- * fbcon_bmove(), fbcon_clear(), fbcon_putc()
+ * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
*
* handles y values in range [0, scr_height-1] that correspond to real
* screen positions. y_wrap shift means that first line of bitmap may be
@@ -1074,7 +958,8 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
int width)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
-
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+
struct display *p = &fb_display[vc->vc_num];
u_int y_break;
@@ -1091,11 +976,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
y_break = p->vrows - p->yscroll;
if (sy < y_break && sy + height - 1 >= y_break) {
u_int b = y_break - sy;
- accel_clear(vc, info, real_y(p, sy), sx, b, width);
- accel_clear(vc, info, real_y(p, sy + b), sx, height - b,
+ ops->clear(vc, info, real_y(p, sy), sx, b, width);
+ ops->clear(vc, info, real_y(p, sy + b), sx, height - b,
width);
} else
- accel_clear(vc, info, real_y(p, sy), sx, height, width);
+ ops->clear(vc, info, real_y(p, sy), sx, height, width);
}
static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
@@ -1103,6 +988,7 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
if (!info->fbops->fb_blank && console_blanked)
return;
@@ -1112,7 +998,9 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
if (vt_cons[vc->vc_num]->vc_mode != KD_TEXT)
return;
- accel_putcs(vc, info, s, count, real_y(p, ypos), xpos);
+ ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+ get_color(vc, info, scr_readw(s), 1),
+ get_color(vc, info, scr_readw(s), 0));
}
static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
@@ -1120,137 +1008,39 @@ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
fbcon_putcs(vc, (const unsigned short *) &c, 1, ypos, xpos);
}
+static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
+{
+ struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+
+ ops->clear_margins(vc, info, bottom_only);
+}
+
static void fbcon_cursor(struct vc_data *vc, int mode)
{
- struct fb_cursor cursor;
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
- unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
struct display *p = &fb_display[vc->vc_num];
- int w = (vc->vc_font.width + 7) >> 3, c;
- int y = real_y(p, vc->vc_y), fg, bg;
- int attribute;
- u8 *src;
+ int y = real_y(p, vc->vc_y);
+ int c = scr_readw((u16 *) vc->vc_pos);
+ info->cursor.flash = 1;
if (mode & CM_SOFTBACK) {
mode &= ~CM_SOFTBACK;
if (softback_lines) {
- if (y + softback_lines >= vc->vc_rows)
+ if (y + softback_lines >= vc->vc_rows) {
mode = CM_ERASE;
+ info->cursor.flash = 0;
+ }
else
y += softback_lines;
}
} else if (softback_lines)
fbcon_set_origin(vc);
- c = scr_readw((u16 *) vc->vc_pos);
- attribute = get_attribute(info, c);
- src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
- if (attribute) {
- u8 *dst;
-
- dst = kmalloc(w * vc->vc_font.height, GFP_ATOMIC);
- if (!dst)
- return;
- if (info->cursor.data)
- kfree(info->cursor.data);
- info->cursor.data = dst;
- update_attr(dst, src, attribute, vc);
- src = dst;
- }
-
- cursor.image.data = src;
- cursor.set = FB_CUR_SETCUR;
- cursor.image.depth = 1;
-
- switch (mode) {
- case CM_ERASE:
- if (info->cursor.rop == ROP_XOR) {
- info->cursor.enable = 0;
- info->cursor.rop = ROP_COPY;
- info->fbops->fb_cursor(info, &cursor);
- }
- break;
- case CM_MOVE:
- case CM_DRAW:
- info->cursor.enable = 1;
- fg = get_color(vc, info, c, 1);
- bg = get_color(vc, info, c, 0);
-
- if (info->cursor.image.fg_color != fg ||
- info->cursor.image.bg_color != bg) {
- cursor.image.fg_color = fg;
- cursor.image.bg_color = bg;
- cursor.set |= FB_CUR_SETCMAP;
- }
-
- if ((info->cursor.image.dx != (vc->vc_font.width * vc->vc_x)) ||
- (info->cursor.image.dy != (vc->vc_font.height * y))) {
- cursor.image.dx = vc->vc_font.width * vc->vc_x;
- cursor.image.dy = vc->vc_font.height * y;
- cursor.set |= FB_CUR_SETPOS;
- }
-
- if (info->cursor.image.height != vc->vc_font.height ||
- info->cursor.image.width != vc->vc_font.width) {
- cursor.image.height = vc->vc_font.height;
- cursor.image.width = vc->vc_font.width;
- cursor.set |= FB_CUR_SETSIZE;
- }
-
- if (info->cursor.hot.x || info->cursor.hot.y) {
- cursor.hot.x = cursor.hot.y = 0;
- cursor.set |= FB_CUR_SETHOT;
- }
-
- if ((cursor.set & FB_CUR_SETSIZE) ||
- ((vc->vc_cursor_type & 0x0f) != p->cursor_shape)
- || info->cursor.mask == NULL) {
- char *mask = kmalloc(w*vc->vc_font.height, GFP_ATOMIC);
- int cur_height, size, i = 0;
- u8 msk = 0xff;
-
- if (!mask)
- return;
-
- if (info->cursor.mask)
- kfree(info->cursor.mask);
- info->cursor.mask = mask;
- p->cursor_shape = vc->vc_cursor_type & 0x0f;
- cursor.set |= FB_CUR_SETSHAPE;
-
- switch (vc->vc_cursor_type & 0x0f) {
- case CUR_NONE:
- cur_height = 0;
- break;
- case CUR_UNDERLINE:
- cur_height = (vc->vc_font.height < 10) ? 1 : 2;
- break;
- case CUR_LOWER_THIRD:
- cur_height = vc->vc_font.height/3;
- break;
- case CUR_LOWER_HALF:
- cur_height = vc->vc_font.height >> 1;
- break;
- case CUR_TWO_THIRDS:
- cur_height = (vc->vc_font.height << 1)/3;
- break;
- case CUR_BLOCK:
- default:
- cur_height = vc->vc_font.height;
- break;
- }
- size = (vc->vc_font.height - cur_height) * w;
- while (size--)
- mask[i++] = ~msk;
- size = cur_height * w;
- while (size--)
- mask[i++] = msk;
- }
- info->cursor.rop = ROP_XOR;
- info->fbops->fb_cursor(info, &cursor);
- vbl_cursor_cnt = CURSOR_DRAW_DELAY;
- break;
- }
+ ops->cursor(vc, info, p, mode, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
+ vbl_cursor_cnt = CURSOR_DRAW_DELAY;
}
static int scrollback_phys_max = 0;
@@ -1264,10 +1054,29 @@ int update_var(int con, struct fb_info *info)
return 0;
}
+/*
+ * If no vc is existent yet, just set struct display
+ */
+static void fbcon_preset_disp(struct fb_info *info, int unit)
+{
+ struct display *p = &fb_display[unit];
+ struct display *t = &fb_display[fg_console];
+
+ info->var.xoffset = info->var.yoffset = p->yscroll = 0;
+ if (var_to_display(p, &info->var, info))
+ return;
+
+ p->fontdata = t->fontdata;
+ p->userfont = t->userfont;
+ if (p->userfont)
+ REFCOUNT(p->fontdata)++;
+}
+
static void fbcon_set_disp(struct fb_info *info, struct vc_data *vc)
{
struct display *p = &fb_display[vc->vc_num], *t;
struct vc_data **default_mode = vc->vc_display_fg;
+ struct vc_data *svc = *default_mode;
int display_fg = (*default_mode)->vc_num;
int rows, cols, charcnt = 256;
@@ -1282,7 +1091,6 @@ static void fbcon_set_disp(struct fb_info *info, struct vc_data *vc)
p->userfont = t->userfont;
if (p->userfont)
REFCOUNT(p->fontdata)++;
- con_copy_unimap(vc->vc_num, display_fg);
}
if (p->userfont)
charcnt = FNTCHARCNT(p->fontdata);
@@ -1296,6 +1104,12 @@ static void fbcon_set_disp(struct fb_info *info, struct vc_data *vc)
if (vc->vc_can_do_color)
vc->vc_complement_mask <<= 1;
}
+
+ if (!*svc->vc_uni_pagedir_loc)
+ con_set_default_unimap(display_fg);
+ if (!*vc->vc_uni_pagedir_loc)
+ con_copy_unimap(vc->vc_num, display_fg);
+
cols = info->var.xres / vc->vc_font.width;
rows = info->var.yres / vc->vc_font.height;
vc_resize(vc->vc_num, cols, rows);
@@ -1314,7 +1128,6 @@ static void fbcon_set_disp(struct fb_info *info, struct vc_data *vc)
}
}
}
- switch_screen(fg_console);
}
static __inline__ void ywrap_up(struct vc_data *vc, int count)
@@ -1357,18 +1170,19 @@ static __inline__ void ypan_up(struct vc_data *vc, int count)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
-
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+
p->yscroll += count;
if (p->yscroll > p->vrows - vc->vc_rows) {
- accel_bmove(vc, info, p->vrows - vc->vc_rows,
- 0, 0, 0, vc->vc_rows, vc->vc_cols);
+ ops->bmove(vc, info, p->vrows - vc->vc_rows,
+ 0, 0, 0, vc->vc_rows, vc->vc_cols);
p->yscroll -= p->vrows - vc->vc_rows;
}
info->var.xoffset = 0;
info->var.yoffset = p->yscroll * vc->vc_font.height;
info->var.vmode &= ~FB_VMODE_YWRAP;
update_var(vc->vc_num, info);
- accel_clear_margins(vc, info, 1);
+ fbcon_clear_margins(vc, 1);
scrollback_max += count;
if (scrollback_max > scrollback_phys_max)
scrollback_max = scrollback_phys_max;
@@ -1393,7 +1207,7 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
if (redraw)
fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
update_var(vc->vc_num, info);
- accel_clear_margins(vc, info, 1);
+ fbcon_clear_margins(vc, 1);
scrollback_max += count;
if (scrollback_max > scrollback_phys_max)
scrollback_max = scrollback_phys_max;
@@ -1404,18 +1218,19 @@ static __inline__ void ypan_down(struct vc_data *vc, int count)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
p->yscroll -= count;
if (p->yscroll < 0) {
- accel_bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
- 0, vc->vc_rows, vc->vc_cols);
+ ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
+ 0, vc->vc_rows, vc->vc_cols);
p->yscroll += p->vrows - vc->vc_rows;
}
info->var.xoffset = 0;
info->var.yoffset = p->yscroll * vc->vc_font.height;
info->var.vmode &= ~FB_VMODE_YWRAP;
update_var(vc->vc_num, info);
- accel_clear_margins(vc, info, 1);
+ fbcon_clear_margins(vc, 1);
scrollback_max -= count;
if (scrollback_max < 0)
scrollback_max = 0;
@@ -1439,7 +1254,7 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
if (redraw)
fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
update_var(vc->vc_num, info);
- accel_clear_margins(vc, info, 1);
+ fbcon_clear_margins(vc, 1);
scrollback_max -= count;
if (scrollback_max < 0)
scrollback_max = 0;
@@ -1449,7 +1264,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
static void fbcon_redraw_softback(struct vc_data *vc, struct display *p,
long delta)
{
- struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
int count = vc->vc_rows;
unsigned short *d, *s;
unsigned long n;
@@ -1506,16 +1320,16 @@ static void fbcon_redraw_softback(struct vc_data *vc, struct display *p,
if (attr != (c & 0xff00)) {
attr = c & 0xff00;
if (s > start) {
- accel_putcs(vc, info, start, s - start,
- real_y(p, line), x);
+ fbcon_putcs(vc, start, s - start,
+ line, x);
x += s - start;
start = s;
}
}
if (c == scr_readw(d)) {
if (s > start) {
- accel_putcs(vc, info, start, s - start,
- real_y(p, line), x);
+ fbcon_putcs(vc, start, s - start,
+ line, x);
x += s - start + 1;
start = s + 1;
} else {
@@ -1527,8 +1341,7 @@ static void fbcon_redraw_softback(struct vc_data *vc, struct display *p,
d++;
} while (s < le);
if (s > start)
- accel_putcs(vc, info, start, s - start,
- real_y(p, line), x);
+ fbcon_putcs(vc, start, s - start, line, x);
line++;
if (d == (u16 *) softback_end)
d = (u16 *) softback_buf;
@@ -1544,7 +1357,6 @@ static void fbcon_redraw_softback(struct vc_data *vc, struct display *p,
static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
int line, int count, int dy)
{
- struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
unsigned short *s = (unsigned short *)
(vc->vc_origin + vc->vc_size_row * line);
@@ -1560,8 +1372,8 @@ static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
if (attr != (c & 0xff00)) {
attr = c & 0xff00;
if (s > start) {
- accel_putcs(vc, info, start, s - start,
- real_y(p, dy), x);
+ fbcon_putcs(vc, start, s - start,
+ dy, x);
x += s - start;
start = s;
}
@@ -1570,8 +1382,7 @@ static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
s++;
} while (s < le);
if (s > start)
- accel_putcs(vc, info, start, s - start,
- real_y(p, dy), x);
+ fbcon_putcs(vc, start, s - start, dy, x);
console_conditional_schedule();
dy++;
}
@@ -1582,7 +1393,6 @@ static void fbcon_redraw(struct vc_data *vc, struct display *p,
{
unsigned short *d = (unsigned short *)
(vc->vc_origin + vc->vc_size_row * line);
- struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
unsigned short *s = d + offset;
while (count--) {
@@ -1597,16 +1407,16 @@ static void fbcon_redraw(struct vc_data *vc, struct display *p,
if (attr != (c & 0xff00)) {
attr = c & 0xff00;
if (s > start) {
- accel_putcs(vc, info, start, s - start,
- real_y(p, line), x);
+ fbcon_putcs(vc, start, s - start,
+ line, x);
x += s - start;
start = s;
}
}
if (c == scr_readw(d)) {
if (s > start) {
- accel_putcs(vc, info, start, s - start,
- real_y(p, line), x);
+ fbcon_putcs(vc, start, s - start,
+ line, x);
x += s - start + 1;
start = s + 1;
} else {
@@ -1620,8 +1430,7 @@ static void fbcon_redraw(struct vc_data *vc, struct display *p,
d++;
} while (s < le);
if (s > start)
- accel_putcs(vc, info, start, s - start,
- real_y(p, line), x);
+ fbcon_putcs(vc, start, s - start, line, x);
console_conditional_schedule();
if (offset > 0)
line++;
@@ -1664,6 +1473,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (!info->fbops->fb_blank && console_blanked)
@@ -1690,10 +1500,10 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
goto redraw_up;
switch (p->scrollmode) {
case SCROLL_MOVE:
- accel_bmove(vc, info, t + count, 0, t, 0,
- b - t - count, vc->vc_cols);
- accel_clear(vc, info, b - count, 0, count,
- vc->vc_cols);
+ ops->bmove(vc, info, t + count, 0, t, 0,
+ b - t - count, vc->vc_cols);
+ ops->clear(vc, info, b - count, 0, count,
+ vc->vc_cols);
break;
case SCROLL_WRAP_MOVE:
@@ -1759,8 +1569,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
redraw_up:
fbcon_redraw(vc, p, t, b - t - count,
count * vc->vc_cols);
- accel_clear(vc, info, real_y(p, b - count), 0,
- count, vc->vc_cols);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
(b - count)),
@@ -1775,9 +1584,9 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
count = vc->vc_rows;
switch (p->scrollmode) {
case SCROLL_MOVE:
- accel_bmove(vc, info, t, 0, t + count, 0,
- b - t - count, vc->vc_cols);
- accel_clear(vc, info, t, 0, count, vc->vc_cols);
+ ops->bmove(vc, info, t, 0, t + count, 0,
+ b - t - count, vc->vc_cols);
+ ops->clear(vc, info, t, 0, count, vc->vc_cols);
break;
case SCROLL_WRAP_MOVE:
@@ -1841,8 +1650,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
redraw_down:
fbcon_redraw(vc, p, b - 1, b - t - count,
-count * vc->vc_cols);
- accel_clear(vc, info, real_y(p, t), 0, count,
- vc->vc_cols);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
t),
@@ -1882,6 +1690,7 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
int dy, int dx, int height, int width, u_int y_break)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
u_int b;
if (sy < y_break && sy + height > y_break) {
@@ -1915,8 +1724,8 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
}
return;
}
- accel_bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
- height, width);
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
}
static __inline__ void updatescrollmode(struct display *p, struct fb_info *info,
@@ -2010,11 +1819,12 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
static int fbcon_switch(struct vc_data *vc)
{
- struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
+ struct fb_info *info;
struct display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var;
- int i;
+ int i, prev_console, do_set_par = 0;
+ info = registered_fb[(int) con2fb_map[vc->vc_num]];
if (softback_top) {
int l = fbcon_softback_size / vc->vc_size_row;
if (softback_lines)
@@ -2039,6 +1849,8 @@ static int fbcon_switch(struct vc_data *vc)
logo_shown = -1;
}
+ prev_console = info->currcon;
+
/*
* FIXME: If we have multiple fbdev's loaded, we need to
* update all info->currcon. Perhaps, we can place this
@@ -2053,7 +1865,6 @@ static int fbcon_switch(struct vc_data *vc)
}
memset(&var, 0, sizeof(struct fb_var_screeninfo));
- fb_videomode_to_var(&var, p->mode);
display_to_var(&var, p);
var.activate = FB_ACTIVATE_NOW;
@@ -2065,12 +1876,18 @@ static int fbcon_switch(struct vc_data *vc)
info->var.yoffset = info->var.xoffset = p->yscroll = 0;
fb_set_var(info, &var);
- if (info->flags & FBINFO_MISC_MODESWITCH) {
+ if (prev_console != -1 &&
+ registered_fb[(int) con2fb_map[prev_console]] != info)
+ do_set_par = 1;
+
+ if (do_set_par || info->flags & FBINFO_MISC_MODESWITCH) {
if (info->fbops->fb_set_par)
info->fbops->fb_set_par(info);
info->flags &= ~FBINFO_MISC_MODESWITCH;
}
+ set_blitting_type(vc, info, p);
+
vc->vc_can_do_color = (fb_get_color_depth(info) != 1);
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
updatescrollmode(p, info, vc);
@@ -2096,8 +1913,9 @@ static int fbcon_switch(struct vc_data *vc)
fbcon_set_palette(vc, color_table);
if (vt_cons[vc->vc_num]->vc_mode == KD_TEXT)
- accel_clear_margins(vc, info, 0);
+ fbcon_clear_margins(vc, 0);
if (logo_shown == -2) {
+
logo_shown = fg_console;
/* This is protected above by initmem_freed */
fb_show_logo(info);
@@ -2143,6 +1961,7 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
}
fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ info->cursor.flash = (!blank);
if (!info->fbops->fb_blank) {
if (blank) {
@@ -2155,14 +1974,11 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
height = vc->vc_rows;
y_break = p->vrows - p->yscroll;
if (height > y_break) {
- accel_clear(vc, info, real_y(p, 0),
- 0, y_break, vc->vc_cols);
- accel_clear(vc, info, real_y(p, y_break),
- 0, height - y_break,
+ fbcon_clear(vc, 0, 0, y_break, vc->vc_cols);
+ fbcon_clear(vc, y_break, 0, height - y_break,
vc->vc_cols);
} else
- accel_clear(vc, info, real_y(p, 0),
- 0, height, vc->vc_cols);
+ fbcon_clear(vc, 0, 0, height, vc->vc_cols);
vc->vc_video_erase_char = oldc;
} else
update_screen(vc->vc_num);
@@ -2329,7 +2145,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
}
} else if (CON_IS_VISIBLE(vc)
&& vt_cons[vc->vc_num]->vc_mode == KD_TEXT) {
- accel_clear_margins(vc, info, 0);
+ fbcon_clear_margins(vc, 0);
update_screen(vc->vc_num);
}
@@ -2672,9 +2488,14 @@ static int fbcon_set_origin(struct vc_data *vc)
static void fbcon_suspended(struct fb_info *info)
{
+ struct vc_data *vc = NULL;
+
+ if (info->currcon < 0)
+ return;
+ vc = vc_cons[info->currcon].d;
+
/* Clear cursor, restore saved data */
- info->cursor.enable = 0;
- info->fbops->fb_cursor(info, &info->cursor);
+ fbcon_cursor(vc, CM_ERASE);
}
static void fbcon_resumed(struct fb_info *info)
@@ -2753,11 +2574,22 @@ static int fbcon_mode_deleted(struct fb_info *info,
static int fbcon_fb_registered(int idx)
{
- int ret = 0;
+ int ret = 0, i;
if (info_idx == -1) {
- info_idx = idx;
- ret = fbcon_takeover();
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ if (con2fb_map_boot[i] == idx) {
+ info_idx = idx;
+ break;
+ }
+ }
+ if (info_idx != -1)
+ ret = fbcon_takeover(1);
+ } else {
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ if (con2fb_map_boot[i] == idx)
+ set_con2fb_map(i, idx, 0);
+ }
}
return ret;
@@ -2791,7 +2623,8 @@ static int fbcon_event_notify(struct notifier_block *self,
break;
case FB_EVENT_SET_CONSOLE_MAP:
con2fb = (struct fb_con2fbmap *) event->data;
- ret = set_con2fb_map(con2fb->console - 1, con2fb->framebuffer);
+ ret = set_con2fb_map(con2fb->console - 1,
+ con2fb->framebuffer, 1);
break;
case FB_EVENT_GET_CONSOLE_MAP:
con2fb = (struct fb_con2fbmap *) event->data;
@@ -2854,7 +2687,7 @@ int __init fb_console_init(void)
break;
}
}
- fbcon_takeover();
+ fbcon_takeover(0);
}
return 0;
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index a89001a40856..2b8d413772d9 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -48,6 +48,19 @@ struct display {
struct fb_videomode *mode;
};
+struct fbcon_ops {
+ void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width);
+ void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int height, int width);
+ void (*putcs)(struct vc_data *vc, struct fb_info *info,
+ const unsigned short *s, int count, int yy, int xx,
+ int fg, int bg);
+ void (*clear_margins)(struct vc_data *vc, struct fb_info *info,
+ int bottom_only);
+ void (*cursor)(struct vc_data *vc, struct fb_info *info,
+ struct display *p, int mode, int fg, int bg);
+};
/*
* Attribute Decoding
*/
@@ -72,6 +85,13 @@ struct display {
#define attr_blink(s) \
((s) & 0x8000)
+/* Font */
+#define REFCOUNT(fd) (((int *)(fd))[-1])
+#define FNTSIZE(fd) (((int *)(fd))[-2])
+#define FNTCHARCNT(fd) (((int *)(fd))[-3])
+#define FNTSUM(fd) (((int *)(fd))[-4])
+#define FONT_EXTRA_WORDS 4
+
/*
* Scroll Method
*/
@@ -129,5 +149,9 @@ struct display {
#define SCROLL_PAN_REDRAW 0x005
extern int fb_console_init(void);
-
+#ifdef CONFIG_FB_TILEBLITTING
+extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info,
+ struct display *p, struct fbcon_ops *ops);
+#endif
+extern void fbcon_set_bitops(struct fbcon_ops *ops);
#endif /* _VIDEO_FBCON_H */
diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c
new file mode 100644
index 000000000000..93927180b298
--- /dev/null
+++ b/drivers/video/console/tileblit.c
@@ -0,0 +1,146 @@
+/*
+ * linux/drivers/video/console/tileblit.c -- Tile Blitting Operation
+ *
+ * Copyright (C) 2004 Antonino Daplas <adaplas @pol.net>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fb.h>
+#include <linux/vt_kern.h>
+#include <linux/console.h>
+#include <asm/types.h>
+#include "fbcon.h"
+
+static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_tilearea area;
+
+ area.sx = sx;
+ area.sy = sy;
+ area.dx = dx;
+ area.dy = dy;
+ area.height = height;
+ area.width = width;
+
+ info->tileops->fb_tilecopy(info, &area);
+}
+
+static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int height, int width)
+{
+ struct fb_tilerect rect;
+ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ int fgshift = (vc->vc_hi_font_mask) ? 9 : 8;
+
+ rect.index = vc->vc_video_erase_char &
+ ((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
+ rect.fg = attr_fgcol_ec(fgshift, vc);
+ rect.bg = attr_bgcol_ec(bgshift, vc);
+ rect.sx = sx;
+ rect.sy = sy;
+ rect.width = width;
+ rect.height = height;
+ rect.rop = ROP_COPY;
+
+ info->tileops->fb_tilefill(info, &rect);
+}
+
+static void tile_putcs(struct vc_data *vc, struct fb_info *info,
+ const unsigned short *s, int count, int yy, int xx,
+ int fg, int bg)
+{
+ struct fb_tileblit blit;
+ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+ int size = sizeof(u32) * count, i;
+
+ blit.sx = xx;
+ blit.sy = yy;
+ blit.width = count;
+ blit.height = 1;
+ blit.fg = fg;
+ blit.bg = bg;
+ blit.length = count;
+ blit.indices = (u32 *) fb_get_buffer_offset(info, &info->pixmap, size);
+ for (i = 0; i < count; i++)
+ blit.indices[i] = (u32)(scr_readw(s++) & charmask);
+
+ info->tileops->fb_tileblit(info, &blit);
+}
+
+static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
+ int bottom_only)
+{
+ return;
+}
+
+static void tile_cursor(struct vc_data *vc, struct fb_info *info,
+ struct display *p, int mode, int fg, int bg)
+{
+ struct fb_tilecursor cursor;
+
+ cursor.sx = vc->vc_x;
+ cursor.sy = vc->vc_y;
+ cursor.mode = (mode == CM_ERASE) ? 0 : 1;
+ cursor.fg = fg;
+ cursor.bg = bg;
+
+ switch (vc->vc_cursor_type & 0x0f) {
+ case CUR_NONE:
+ cursor.shape = FB_TILE_CURSOR_NONE;
+ break;
+ case CUR_UNDERLINE:
+ cursor.shape = FB_TILE_CURSOR_UNDERLINE;
+ break;
+ case CUR_LOWER_THIRD:
+ cursor.shape = FB_TILE_CURSOR_LOWER_THIRD;
+ break;
+ case CUR_LOWER_HALF:
+ cursor.shape = FB_TILE_CURSOR_LOWER_HALF;
+ break;
+ case CUR_TWO_THIRDS:
+ cursor.shape = FB_TILE_CURSOR_TWO_THIRDS;
+ break;
+ case CUR_BLOCK:
+ default:
+ cursor.shape = FB_TILE_CURSOR_BLOCK;
+ break;
+ }
+
+ info->tileops->fb_tilecursor(info, &cursor);
+}
+
+void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info,
+ struct display *p, struct fbcon_ops *ops)
+{
+ struct fb_tilemap map;
+
+ ops->bmove = tile_bmove;
+ ops->clear = tile_clear;
+ ops->putcs = tile_putcs;
+ ops->clear_margins = tile_clear_margins;
+ ops->cursor = tile_cursor;
+
+ if (p) {
+ map.width = vc->vc_font.width;
+ map.height = vc->vc_font.height;
+ map.depth = 1;
+ map.length = (p->userfont) ?
+ FNTCHARCNT(p->fontdata) : 256;
+ map.data = p->fontdata;
+ info->tileops->fb_settile(info, &map);
+ }
+}
+
+EXPORT_SYMBOL(fbcon_set_tileops);
+
+MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
+MODULE_DESCRIPTION("Tile Blitting Operation");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 0e91d6b577e4..61181b2d7a19 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -901,8 +901,10 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
for (i = 0; i < MAX_NR_CONSOLES; i++) {
struct vc_data *c = vc_cons[i].d;
- if (c && c->vc_sw == &vga_con)
+ if (c && c->vc_sw == &vga_con) {
+ c->vc_font.height = fontheight;
vc_resize(c->vc_num, 0, rows); /* Adjust console size */
+ }
}
return 0;
}
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 4dca34fdf767..0e3ab898b1e8 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1399,6 +1399,8 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
cfb->fb.var.xres, cfb->fb.var.yres,
h_sync / 1000, h_sync % 1000, v_sync);
+ if (cfb->dev)
+ cfb->fb.device = &cfb->dev->dev;
err = register_framebuffer(&cfb->fb);
failed:
@@ -1722,7 +1724,7 @@ int __init cyber2000fb_init(void)
#ifndef MODULE
char *option = NULL;
- if (fb_get_options("cyber2000fb", NULL))
+ if (fb_get_options("cyber2000fb", &option))
return -ENODEV;
cyber2000fb_setup(option);
#endif
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index ef5cea5a0fb4..c21b42e4b09a 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -368,6 +368,9 @@ int fb_prepare_logo(struct fb_info *info)
memset(&fb_logo, 0, sizeof(struct logo_data));
+ if (info->flags & FBINFO_MISC_TILEBLITTING)
+ return 0;
+
if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
depth = info->var.blue.length;
if (info->var.red.length < depth)
@@ -504,7 +507,8 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
struct inode *inode = file->f_dentry->d_inode;
int fbidx = iminor(inode);
struct fb_info *info = registered_fb[fbidx];
- u32 *buffer, *dst, *src;
+ u32 *buffer, *dst;
+ u32 __iomem *src;
int c, i, cnt = 0, err = 0;
unsigned long total_size;
@@ -534,7 +538,7 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (!buffer)
return -ENOMEM;
- src = (u32 *) (info->screen_base + p);
+ src = (u32 __iomem *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
@@ -546,12 +550,12 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
*dst++ = fb_readl(src++);
if (c & 3) {
u8 *dst8 = (u8 *) dst;
- u8 *src8 = (u8 *) src;
+ u8 __iomem *src8 = (u8 __iomem *) src;
for (i = c & 3; i--;)
*dst8++ = fb_readb(src8++);
- src = (u32 *) src8;
+ src = (u32 __iomem *) src8;
}
if (copy_to_user(buf, buffer, c)) {
@@ -575,7 +579,8 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
struct inode *inode = file->f_dentry->d_inode;
int fbidx = iminor(inode);
struct fb_info *info = registered_fb[fbidx];
- u32 *buffer, *dst, *src;
+ u32 *buffer, *src;
+ u32 __iomem *dst;
int c, i, cnt = 0, err;
unsigned long total_size;
@@ -607,7 +612,7 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
if (!buffer)
return -ENOMEM;
- dst = (u32 *) (info->screen_base + p);
+ dst = (u32 __iomem *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
@@ -623,12 +628,12 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
fb_writel(*src++, dst++);
if (c & 3) {
u8 *src8 = (u8 *) src;
- u8 *dst8 = (u8 *) dst;
+ u8 __iomem *dst8 = (u8 __iomem *) dst;
for (i = c & 3; i--; )
fb_writeb(*src8++, dst8++);
- dst = (u32 *) dst8;
+ dst = (u32 __iomem *) dst8;
}
*ppos += c;
buf += c;
@@ -654,7 +659,8 @@ fb_load_cursor_image(struct fb_info *info)
u8 *data = (u8 *) info->cursor.image.data;
if (info->sprite.outbuf)
- info->sprite.outbuf(info, info->sprite.addr, data, width);
+ info->sprite.outbuf(info, info->sprite.addr, data,
+ width);
else
memcpy(info->sprite.addr, data, width);
}
@@ -679,6 +685,7 @@ fb_cursor(struct fb_info *info, struct fb_cursor_user __user *sprite)
cursor.image.cmap.blue = info->cursor.image.cmap.blue;
cursor.image.cmap.transp = info->cursor.image.cmap.transp;
cursor.data = NULL;
+ cursor.flash = 0;
if (cursor.set & FB_CUR_SETCUR)
info->cursor.enable = 1;
@@ -847,20 +854,28 @@ int
fb_blank(struct fb_info *info, int blank)
{
/* ??? Variable sized stack allocation. */
- u16 black[info->cmap.len];
struct fb_cmap cmap;
+ u16 *black = NULL;
+ int err = 0;
if (info->fbops->fb_blank && !info->fbops->fb_blank(blank, info))
return 0;
if (blank) {
- memset(black, 0, info->cmap.len * sizeof(u16));
- cmap.red = cmap.green = cmap.blue = black;
- cmap.transp = info->cmap.transp ? black : NULL;
- cmap.start = info->cmap.start;
- cmap.len = info->cmap.len;
+ black = kmalloc(sizeof(u16) * info->cmap.len, GFP_KERNEL);
+ if (!black) {
+ memset(black, 0, info->cmap.len * sizeof(u16));
+ cmap.red = cmap.green = cmap.blue = black;
+ cmap.transp = info->cmap.transp ? black : NULL;
+ cmap.start = info->cmap.start;
+ cmap.len = info->cmap.len;
+ }
} else
cmap = info->cmap;
- return fb_set_cmap(&cmap, info);
+
+ err = fb_set_cmap(&cmap, info);
+ kfree(black);
+
+ return err;
}
static int
@@ -948,14 +963,11 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
#endif /* CONFIG_KMOD */
if (!registered_fb[con2fb.framebuffer])
return -EINVAL;
- if (con2fb.console > 0 && con2fb.console < MAX_NR_CONSOLES) {
- event.info = info;
- event.data = &con2fb;
- return notifier_call_chain(&fb_notifier_list,
- FB_EVENT_SET_CONSOLE_MAP,
- &event);
- }
- return -EINVAL;
+ event.info = info;
+ event.data = &con2fb;
+ return notifier_call_chain(&fb_notifier_list,
+ FB_EVENT_SET_CONSOLE_MAP,
+ &event);
case FBIOBLANK:
acquire_console_sem();
i = fb_blank(info, arg);
@@ -1144,7 +1156,8 @@ register_framebuffer(struct fb_info *fb_info)
break;
fb_info->node = i;
- c = class_simple_device_add(fb_class, MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
+ c = class_simple_device_add(fb_class, MKDEV(FB_MAJOR, i),
+ fb_info->device, "fb%d", i);
if (IS_ERR(c)) {
/* Not fatal */
printk(KERN_WARNING "Unable to create class_device for framebuffer %d; errno = %ld\n", i, PTR_ERR(c));
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 8b3bbe0c641b..4fec33dc8e31 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -51,6 +51,8 @@ struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
if (size)
info->par = p + fb_info_size;
+ info->device = dev;
+
return info;
#undef PADDING
#undef BYTES_PER_LONG
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index 3adb65df1fac..cda492a55f61 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -292,18 +292,7 @@ static int __devinit fm2fb_probe(struct zorro_dev *z,
return 0;
}
-int __init fm2fb_setup(char *options);
-
-int __init fm2fb_init(void)
-{
- char *option = NULL;
-
- if (fb_get_options("fm2fb", &option))
- return -ENODEV;
- fm2fb_setup(option);
- return zorro_register_driver(&fm2fb_driver);
-}
-
+int __init fm2fb_setup(char *options)
{
char *this_opt;
@@ -319,5 +308,15 @@ int __init fm2fb_init(void)
return 0;
}
+int __init fm2fb_init(void)
+{
+ char *option = NULL;
+
+ if (fb_get_options("fm2fb", &option))
+ return -ENODEV;
+ fm2fb_setup(option);
+ return zorro_register_driver(&fm2fb_driver);
+}
+
module_init(fm2fb_init);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/i810/i810.h b/drivers/video/i810/i810.h
index 68b64233a1aa..4df3b7766ff5 100644
--- a/drivers/video/i810/i810.h
+++ b/drivers/video/i810/i810.h
@@ -222,7 +222,7 @@ struct mode_registers {
struct heap_data {
unsigned long physical;
- __u8 *virtual;
+ __u8 __iomem *virtual;
u32 offset;
u32 size;
};
@@ -256,7 +256,7 @@ struct i810fb_par {
u32 pseudo_palette[17];
u32 pci_state[16];
unsigned long mmio_start_phys;
- u8 *mmio_start_virtual;
+ u8 __iomem *mmio_start_virtual;
u32 pitch;
u32 pixconf;
u32 watermark;
diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
index 71f649f55592..9921db8a48c7 100644
--- a/drivers/video/i810/i810_accel.c
+++ b/drivers/video/i810/i810_accel.c
@@ -32,7 +32,7 @@ extern void flush_cache(void);
/************************************************************/
/* BLT Engine Routines */
-static inline void i810_report_error(u8 *mmio)
+static inline void i810_report_error(u8 __iomem *mmio)
{
printk("IIR : 0x%04x\n"
"EIR : 0x%04x\n"
@@ -59,7 +59,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
{
struct i810fb_par *par = (struct i810fb_par *) info->par;
u32 head, count = WAIT_COUNT, tail;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
tail = par->cur_tail;
while (count--) {
@@ -89,7 +89,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
static inline int wait_for_engine_idle(struct fb_info *info)
{
struct i810fb_par *par = (struct i810fb_par *) info->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
int count = WAIT_COUNT;
if (wait_for_space(info, par->iring.size)) /* flush */
@@ -133,7 +133,7 @@ static inline u32 begin_iring(struct fb_info *info, u32 space)
*/
static inline void end_iring(struct i810fb_par *par)
{
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(IRING, mmio, par->cur_tail);
}
@@ -326,7 +326,7 @@ static inline void load_front(int offset, struct fb_info *info)
static inline void i810fb_iring_enable(struct i810fb_par *par, u32 mode)
{
u32 tmp;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
tmp = i810_readl(IRING + 12, mmio);
if (mode == OFF)
@@ -451,7 +451,7 @@ int i810fb_sync(struct fb_info *info)
void i810fb_load_front(u32 offset, struct fb_info *info)
{
struct i810fb_par *par = (struct i810fb_par *) info->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
if (!info->var.accel_flags || par->dev_flags & LOCKUP)
i810_writel(DPLYBASE, mmio, par->fb.physical + offset);
@@ -472,7 +472,7 @@ void i810fb_init_ringbuffer(struct fb_info *info)
{
struct i810fb_par *par = (struct i810fb_par *) info->par;
u32 tmp1, tmp2;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
wait_for_engine_idle(info);
i810fb_iring_enable(par, OFF);
diff --git a/drivers/video/i810/i810_gtf.c b/drivers/video/i810/i810_gtf.c
index 7d0c02ca0a42..814698b90fb6 100644
--- a/drivers/video/i810/i810_gtf.c
+++ b/drivers/video/i810/i810_gtf.c
@@ -124,7 +124,8 @@ void i810fb_encode_registers(const struct fb_var_screeninfo *var,
struct i810fb_par *par, u32 xres, u32 yres)
{
int n, blank_s, blank_e;
- u8 *mmio = par->mmio_start_virtual, msr = 0;
+ u8 __iomem *mmio = par->mmio_start_virtual;
+ u8 msr = 0;
/* Horizontal */
/* htotal */
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 6acde2569975..144c82785f59 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -121,7 +121,7 @@ static int dcolor __initdata = 0;
* DESCRIPTION:
* Blanks/unblanks the display
*/
-static void i810_screen_off(u8 *mmio, u8 mode)
+static void i810_screen_off(u8 __iomem *mmio, u8 mode)
{
u32 count = WAIT_COUNT;
u8 val;
@@ -145,7 +145,7 @@ static void i810_screen_off(u8 *mmio, u8 mode)
* Turns off DRAM refresh. Must be off for only 2 vsyncs
* before data becomes corrupt
*/
-static void i810_dram_off(u8 *mmio, u8 mode)
+static void i810_dram_off(u8 __iomem *mmio, u8 mode)
{
u8 val;
@@ -164,7 +164,7 @@ static void i810_dram_off(u8 *mmio, u8 mode)
* The IBM VGA standard allows protection of certain VGA registers.
* This will protect or unprotect them.
*/
-static void i810_protect_regs(u8 *mmio, int mode)
+static void i810_protect_regs(u8 __iomem *mmio, int mode)
{
u8 reg;
@@ -187,7 +187,7 @@ static void i810_protect_regs(u8 *mmio, int mode)
static void i810_load_pll(struct i810fb_par *par)
{
u32 tmp1, tmp2;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
tmp1 = par->regs.M | par->regs.N << 16;
tmp2 = i810_readl(DCLK_2D, mmio);
@@ -212,7 +212,7 @@ static void i810_load_pll(struct i810fb_par *par)
*/
static void i810_load_vga(struct i810fb_par *par)
{
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
/* interlace */
i810_writeb(CR_INDEX_CGA, mmio, CR70);
@@ -255,7 +255,7 @@ static void i810_load_vga(struct i810fb_par *par)
*/
static void i810_load_vgax(struct i810fb_par *par)
{
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writeb(CR_INDEX_CGA, mmio, CR30);
i810_writeb(CR_DATA_CGA, mmio, par->regs.cr30);
@@ -281,7 +281,8 @@ static void i810_load_vgax(struct i810fb_par *par)
static void i810_load_2d(struct i810fb_par *par)
{
u32 tmp;
- u8 tmp8, *mmio = par->mmio_start_virtual;
+ u8 tmp8;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(FW_BLC, mmio, par->watermark);
tmp = i810_readl(PIXCONF, mmio);
@@ -301,7 +302,7 @@ static void i810_load_2d(struct i810fb_par *par)
* i810_hires - enables high resolution mode
* @mmio: address of register space
*/
-static void i810_hires(u8 *mmio)
+static void i810_hires(u8 __iomem *mmio)
{
u8 val;
@@ -321,7 +322,8 @@ static void i810_hires(u8 *mmio)
static void i810_load_pitch(struct i810fb_par *par)
{
u32 tmp, pitch;
- u8 val, *mmio = par->mmio_start_virtual;
+ u8 val;
+ u8 __iomem *mmio = par->mmio_start_virtual;
pitch = par->pitch >> 3;
i810_writeb(SR_INDEX, mmio, SR01);
@@ -351,9 +353,10 @@ static void i810_load_pitch(struct i810fb_par *par)
*/
static void i810_load_color(struct i810fb_par *par)
{
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
u32 reg1;
u16 reg2;
+
reg1 = i810_readl(PIXCONF, mmio) & ~(0xF0000 | 1 << 27);
reg2 = i810_readw(BLTCNTL, mmio) & ~0x30;
@@ -372,7 +375,7 @@ static void i810_load_color(struct i810fb_par *par)
*/
static void i810_load_regs(struct i810fb_par *par)
{
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_screen_off(mmio, OFF);
i810_protect_regs(mmio, OFF);
@@ -390,7 +393,7 @@ static void i810_load_regs(struct i810fb_par *par)
}
static void i810_write_dac(u8 regno, u8 red, u8 green, u8 blue,
- u8 *mmio)
+ u8 __iomem *mmio)
{
i810_writeb(CLUT_INDEX_WRITE, mmio, regno);
i810_writeb(CLUT_DATA, mmio, red);
@@ -399,7 +402,7 @@ static void i810_write_dac(u8 regno, u8 red, u8 green, u8 blue,
}
static void i810_read_dac(u8 regno, u8 *red, u8 *green, u8 *blue,
- u8 *mmio)
+ u8 __iomem *mmio)
{
i810_writeb(CLUT_INDEX_READ, mmio, regno);
*red = i810_readb(CLUT_DATA, mmio);
@@ -413,7 +416,7 @@ static void i810_read_dac(u8 regno, u8 *red, u8 *green, u8 *blue,
static void i810_restore_pll(struct i810fb_par *par)
{
u32 tmp1, tmp2;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
tmp1 = par->hw_state.dclk_2d;
tmp2 = i810_readl(DCLK_2D, mmio);
@@ -433,7 +436,7 @@ static void i810_restore_pll(struct i810fb_par *par)
static void i810_restore_dac(struct i810fb_par *par)
{
u32 tmp1, tmp2;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
tmp1 = par->hw_state.pixconf;
tmp2 = i810_readl(PIXCONF, mmio);
@@ -444,7 +447,8 @@ static void i810_restore_dac(struct i810fb_par *par)
static void i810_restore_vgax(struct i810fb_par *par)
{
- u8 i, j, *mmio = par->mmio_start_virtual;
+ u8 i, j;
+ u8 __iomem *mmio = par->mmio_start_virtual;
for (i = 0; i < 4; i++) {
i810_writeb(CR_INDEX_CGA, mmio, CR30+i);
@@ -477,7 +481,8 @@ static void i810_restore_vgax(struct i810fb_par *par)
static void i810_restore_vga(struct i810fb_par *par)
{
- u8 i, *mmio = par->mmio_start_virtual;
+ u8 i;
+ u8 __iomem *mmio = par->mmio_start_virtual;
for (i = 0; i < 10; i++) {
i810_writeb(CR_INDEX_CGA, mmio, CR00 + i);
@@ -491,7 +496,8 @@ static void i810_restore_vga(struct i810fb_par *par)
static void i810_restore_addr_map(struct i810fb_par *par)
{
- u8 tmp, *mmio = par->mmio_start_virtual;
+ u8 tmp;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_writeb(GR_INDEX, mmio, GR10);
tmp = i810_readb(GR_DATA, mmio);
@@ -505,7 +511,7 @@ static void i810_restore_2d(struct i810fb_par *par)
{
u32 tmp_long;
u16 tmp_word;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
tmp_word = i810_readw(BLTCNTL, mmio);
tmp_word &= ~(3 << 4);
@@ -534,7 +540,7 @@ static void i810_restore_2d(struct i810fb_par *par)
static void i810_restore_vga_state(struct i810fb_par *par)
{
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_screen_off(mmio, OFF);
i810_protect_regs(mmio, OFF);
@@ -556,7 +562,8 @@ static void i810_restore_vga_state(struct i810fb_par *par)
static void i810_save_vgax(struct i810fb_par *par)
{
- u8 i, *mmio = par->mmio_start_virtual;
+ u8 i;
+ u8 __iomem *mmio = par->mmio_start_virtual;
for (i = 0; i < 4; i++) {
i810_writeb(CR_INDEX_CGA, mmio, CR30 + i);
@@ -579,7 +586,8 @@ static void i810_save_vgax(struct i810fb_par *par)
static void i810_save_vga(struct i810fb_par *par)
{
- u8 i, *mmio = par->mmio_start_virtual;
+ u8 i;
+ u8 __iomem *mmio = par->mmio_start_virtual;
for (i = 0; i < 10; i++) {
i810_writeb(CR_INDEX_CGA, mmio, CR00 + i);
@@ -593,7 +601,7 @@ static void i810_save_vga(struct i810fb_par *par)
static void i810_save_2d(struct i810fb_par *par)
{
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
par->hw_state.dclk_2d = i810_readl(DCLK_2D, mmio);
par->hw_state.dclk_1d = i810_readl(DCLK_1D, mmio);
@@ -716,7 +724,7 @@ static void i810_calc_dclk(u32 freq, u32 *m, u32 *n, u32 *p)
* Description:
* Shows or hides the hardware cursor
*/
-void i810_enable_cursor(u8 *mmio, int mode)
+void i810_enable_cursor(u8 __iomem *mmio, int mode)
{
u32 temp;
@@ -729,7 +737,7 @@ void i810_enable_cursor(u8 *mmio, int mode)
static void i810_reset_cursor_image(struct i810fb_par *par)
{
- u8 *addr = par->cursor_heap.virtual;
+ u8 __iomem *addr = par->cursor_heap.virtual;
int i, j;
for (i = 64; i--; ) {
@@ -744,7 +752,7 @@ static void i810_reset_cursor_image(struct i810fb_par *par)
static void i810_load_cursor_image(int width, int height, u8 *data,
struct i810fb_par *par)
{
- u8 *addr = par->cursor_heap.virtual;
+ u8 __iomem *addr = par->cursor_heap.virtual;
int i, j, w = width/8;
int mod = width % 8, t_mask, d_mask;
@@ -766,8 +774,8 @@ static void i810_load_cursor_image(int width, int height, u8 *data,
static void i810_load_cursor_colors(int fg, int bg, struct fb_info *info)
{
struct i810fb_par *par = (struct i810fb_par *) info->par;
- u8 *mmio = par->mmio_start_virtual, temp;
- u8 red, green, blue, trans;
+ u8 __iomem *mmio = par->mmio_start_virtual;
+ u8 red, green, blue, trans, temp;
i810fb_getcolreg(bg, &red, &green, &blue, &trans, info);
@@ -796,7 +804,7 @@ static void i810_load_cursor_colors(int fg, int bg, struct fb_info *info)
*/
static void i810_init_cursor(struct i810fb_par *par)
{
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
i810_enable_cursor(mmio, OFF);
i810_writel(CURBASE, mmio, par->cursor_heap.physical);
@@ -1124,7 +1132,8 @@ static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
u8 *transp, struct fb_info *info)
{
struct i810fb_par *par = (struct i810fb_par *) info->par;
- u8 *mmio = par->mmio_start_virtual, temp;
+ u8 __iomem *mmio = par->mmio_start_virtual;
+ u8 temp;
if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
if ((info->var.green.length == 5 && regno > 31) ||
@@ -1167,7 +1176,7 @@ static int i810fb_open(struct fb_info *info, int user)
if (count == 0) {
memset(&par->state, 0, sizeof(struct vgastate));
par->state.flags = VGA_SAVE_CMAP;
- par->state.vgabase = (caddr_t) par->mmio_start_virtual;
+ par->state.vgabase = par->mmio_start_virtual;
save_vga(&par->state);
i810_save_vga_state(par);
@@ -1203,7 +1212,8 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
struct fb_info *info)
{
struct i810fb_par *par = (struct i810fb_par *) info->par;
- u8 *mmio = par->mmio_start_virtual, temp;
+ u8 __iomem *mmio = par->mmio_start_virtual;
+ u8 temp;
int i;
if (regno > 255) return 1;
@@ -1308,7 +1318,7 @@ static int i810fb_pan_display(struct fb_var_screeninfo *var,
static int i810fb_blank (int blank_mode, struct fb_info *info)
{
struct i810fb_par *par = (struct i810fb_par *) info->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
int mode = 0, pwr, scr_off = 0;
pwr = i810_readl(PWR_CLKC, mmio);
@@ -1391,7 +1401,7 @@ static int i810fb_check_var(struct fb_var_screeninfo *var,
static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct i810fb_par *par = (struct i810fb_par *)info->par;
- u8 *mmio = par->mmio_start_virtual;
+ u8 __iomem *mmio = par->mmio_start_virtual;
if (!info->var.accel_flags || par->dev_flags & LOCKUP)
return soft_cursor(info, cursor);
@@ -1724,7 +1734,8 @@ static void __devinit i810_init_defaults(struct i810fb_par *par,
*/
static void __devinit i810_init_device(struct i810fb_par *par)
{
- u8 reg, *mmio = par->mmio_start_virtual;
+ u8 reg;
+ u8 __iomem *mmio = par->mmio_start_virtual;
if (mtrr) set_mtrr(par);
@@ -1855,20 +1866,13 @@ static int __devinit i810fb_init_pci (struct pci_dev *dev,
int i, err = -1, vfreq, hfreq, pixclock;
i = 0;
- if (!(info = kmalloc(sizeof(struct fb_info), GFP_KERNEL))) {
- i810fb_release_resource(info, par);
- return -ENOMEM;
- }
- memset(info, 0, sizeof(struct fb_info));
- if(!(par = kmalloc(sizeof(struct i810fb_par), GFP_KERNEL))) {
- i810fb_release_resource(info, par);
+ info = framebuffer_alloc(sizeof(struct i810fb_par), &dev->dev);
+ if (!info)
return -ENOMEM;
- }
- memset(par, 0, sizeof(struct i810fb_par));
+ par = (struct i810fb_par *) info->par;
par->dev = dev;
- info->par = par;
if (!(info->pixmap.addr = kmalloc(64*1024, GFP_KERNEL))) {
i810fb_release_resource(info, par);
@@ -1941,38 +1945,36 @@ static int __devinit i810fb_init_pci (struct pci_dev *dev,
static void i810fb_release_resource(struct fb_info *info,
struct i810fb_par *par)
{
- if (par) {
- unset_mtrr(par);
- if (par->drm_agp) {
- drm_agp_t *agp = par->drm_agp;
- struct gtt_data *gtt = &par->i810_gtt;
-
- if (par->i810_gtt.i810_cursor_memory)
- agp->free_memory(gtt->i810_cursor_memory);
- if (par->i810_gtt.i810_fb_memory)
- agp->free_memory(gtt->i810_fb_memory);
-
- inter_module_put("drm_agp");
- par->drm_agp = NULL;
- }
+ unset_mtrr(par);
+ if (par->drm_agp) {
+ drm_agp_t *agp = par->drm_agp;
+ struct gtt_data *gtt = &par->i810_gtt;
+
+ if (par->i810_gtt.i810_cursor_memory)
+ agp->free_memory(gtt->i810_cursor_memory);
+ if (par->i810_gtt.i810_fb_memory)
+ agp->free_memory(gtt->i810_fb_memory);
+
+ inter_module_put("drm_agp");
+ par->drm_agp = NULL;
+ }
- if (par->mmio_start_virtual)
- iounmap(par->mmio_start_virtual);
- if (par->aperture.virtual)
- iounmap(par->aperture.virtual);
+ if (par->mmio_start_virtual)
+ iounmap(par->mmio_start_virtual);
+ if (par->aperture.virtual)
+ iounmap(par->aperture.virtual);
- if (par->res_flags & FRAMEBUFFER_REQ)
- release_mem_region(par->aperture.physical,
- par->aperture.size);
- if (par->res_flags & MMIO_REQ)
- release_mem_region(par->mmio_start_phys, MMIO_SIZE);
+ if (par->res_flags & FRAMEBUFFER_REQ)
+ release_mem_region(par->aperture.physical,
+ par->aperture.size);
+ if (par->res_flags & MMIO_REQ)
+ release_mem_region(par->mmio_start_phys, MMIO_SIZE);
- if (par->res_flags & PCI_DEVICE_ENABLED)
- pci_disable_device(par->dev);
+ if (par->res_flags & PCI_DEVICE_ENABLED)
+ pci_disable_device(par->dev);
+
+ framebuffer_release(info);
- kfree(par);
- }
- kfree(info);
}
static void __exit i810fb_remove_pci(struct pci_dev *dev)
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index dd6b16c845c2..f9d77b0f51ab 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -531,6 +531,7 @@ int __init igafb_init(void)
info->var = default_var;
info->fix = igafb_fix;
info->pseudo_palette = (void *)(par + 1);
+ info->device = &pdev->dev;
if (!iga_init(info, par)) {
iounmap((void *)par->io_base);
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c
index c4a07f27c18c..d51e8f080fe1 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/imsttfb.c
@@ -1524,6 +1524,7 @@ imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
info->par = par;
info->pseudo_palette = (void *) (par + 1);
+ info->device = &pdev->dev;
init_imstt(info);
pci_set_drvdata(pdev, info);
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
index 2352a9142a77..51a6de200f73 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/kyro/fbdev.c
@@ -735,6 +735,7 @@ static int __devinit kyrofb_probe(struct pci_dev *pdev,
fb_memset(info->screen_base, 0, size);
+ info->device = &pdev->dev;
if (register_framebuffer(info) < 0)
goto out_unmap;
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index fd38e0536e94..8f14c9b300c2 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -411,12 +411,7 @@ static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
CRITBEGIN
-#ifdef __BIG_ENDIAN
- WaitTillIdle();
- mga_outl(M_OPMODE, M_OPMODE_8BPP);
-#else
mga_fifo(3);
-#endif
if (easy)
mga_outl(M_DWGCTL, M_DWG_ILOAD | M_DWG_SGNZERO | M_DWG_SHIFTZERO | M_DWG_BMONOWF | M_DWG_LINEAR | M_DWG_REPLACE);
else
@@ -432,32 +427,24 @@ static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
mga_writel(mmio, M_AR3, 0);
if (easy) {
mga_writel(mmio, M_YDSTLEN | M_EXEC, ydstlen);
- mga_memcpy_toio(mmio, 0, chardata, xlen);
+ mga_memcpy_toio(mmio, chardata, xlen);
} else {
mga_writel(mmio, M_AR5, 0);
mga_writel(mmio, M_YDSTLEN | M_EXEC, ydstlen);
if ((step & 3) == 0) {
/* Great. Source has 32bit aligned lines, so we can feed them
directly to the accelerator. */
- mga_memcpy_toio(mmio, 0, chardata, charcell);
+ mga_memcpy_toio(mmio, chardata, charcell);
} else if (step == 1) {
/* Special case for 1..8bit widths */
while (height--) {
-#ifdef __LITTLE_ENDIAN
mga_writel(mmio, 0, *chardata);
-#else
- mga_writel(mmio, 0, (*chardata) << 24);
-#endif
chardata++;
}
} else if (step == 2) {
/* Special case for 9..15bit widths */
while (height--) {
-#ifdef __LITTLE_ENDIAN
mga_writel(mmio, 0, *(u_int16_t*)chardata);
-#else
- mga_writel(mmio, 0, (*(u_int16_t*)chardata) << 16);
-#endif
chardata += 2;
}
} else {
@@ -474,9 +461,6 @@ static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
}
}
WaitTillIdle();
-#ifdef __BIG_ENDIAN
- mga_outl(M_OPMODE, ACCESS_FBINFO(accel.m_opmode));
-#endif
CRITEND
}
@@ -486,7 +470,7 @@ static void matroxfb_imageblit(struct fb_info* info, const struct fb_image* imag
DBG_HEAVY(__FUNCTION__);
- if (image->depth == 0) {
+ if (image->depth == 1) {
u_int32_t fgx, bgx;
fgx = ((u_int32_t*)info->pseudo_palette)[image->fg_color];
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index e2a2290b628a..47439c4d276a 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1143,6 +1143,7 @@ static int matroxfb_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
return err;
}
+ case VIDIOC_S_CTRL_OLD:
case VIDIOC_S_CTRL:
{
struct v4l2_control ctrl;
@@ -1750,6 +1751,12 @@ static int initMatrox2(WPMINFO struct board* b){
ACCESS_FBINFO(fbcon.pseudo_palette) = ACCESS_FBINFO(cmap);
/* after __init time we are like module... no logo */
ACCESS_FBINFO(fbcon.flags) = hotplug ? FBINFO_FLAG_MODULE : FBINFO_FLAG_DEFAULT;
+ ACCESS_FBINFO(fbcon.flags) |= FBINFO_PARTIAL_PAN_OK | /* Prefer panning for scroll under MC viewer/edit */
+ FBINFO_HWACCEL_COPYAREA | /* We have hw-assisted bmove */
+ FBINFO_HWACCEL_FILLRECT | /* And fillrect */
+ FBINFO_HWACCEL_IMAGEBLIT | /* And imageblit */
+ FBINFO_HWACCEL_XPAN | /* And we support both horizontal */
+ FBINFO_HWACCEL_YPAN; /* And vertical panning */
ACCESS_FBINFO(video.len_usable) &= PAGE_MASK;
fb_alloc_cmap(&ACCESS_FBINFO(fbcon.cmap), 256, 1);
@@ -1864,6 +1871,7 @@ static int initMatrox2(WPMINFO struct board* b){
/* We do not have to set currcon to 0... register_framebuffer do it for us on first console
* and we do not want currcon == 0 for subsequent framebuffers */
+ ACCESS_FBINFO(fbcon).device = &ACCESS_FBINFO(pcidev)->dev;
if (register_framebuffer(&ACCESS_FBINFO(fbcon)) < 0) {
goto failVideoIO;
}
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index 79a6873f7a59..e62fcc84054b 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -93,29 +93,6 @@
#endif /* MATROXFB_DEBUG */
-#if !defined(__i386__) && !defined(__x86_64__)
-#ifndef ioremap_nocache
-#define ioremap_nocache(X,Y) ioremap(X,Y)
-#endif
-#endif
-
-#if defined(__alpha__) || defined(__mc68000__) || defined(__i386__) || defined(__x86_64__)
-#define READx_WORKS
-#define MEMCPYTOIO_WORKS
-#else
-/* ppc/ppc64 must use __raw_{read,write}[bwl] as we drive adapter
- in big-endian mode for compatibility with XFree mga driver, and
- so we do not want little-endian {read,write}[bwl] */
-#define READx_FAILS
-#define MEMCPYTOIO_WRITEL
-#endif
-
-#if defined(__mc68000__)
-#define MAP_BUSTOVIRT
-#else
-#define MAP_IOREMAP
-#endif
-
#ifdef DEBUG
#define dprintk(X...) printk(X)
#else
@@ -155,22 +132,13 @@
#endif
typedef struct {
- u_int8_t __iomem* vaddr;
+ void __iomem* vaddr;
} vaddr_t;
-#ifdef READx_WORKS
static inline unsigned int mga_readb(vaddr_t va, unsigned int offs) {
return readb(va.vaddr + offs);
}
-static inline unsigned int mga_readw(vaddr_t va, unsigned int offs) {
- return readw(va.vaddr + offs);
-}
-
-static inline u_int32_t mga_readl(vaddr_t va, unsigned int offs) {
- return readl(va.vaddr + offs);
-}
-
static inline void mga_writeb(vaddr_t va, unsigned int offs, u_int8_t value) {
writeb(value, va.vaddr + offs);
}
@@ -179,62 +147,42 @@ static inline void mga_writew(vaddr_t va, unsigned int offs, u_int16_t value) {
writew(value, va.vaddr + offs);
}
-static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
- writel(value, va.vaddr + offs);
-}
-#else
-static inline unsigned int mga_readb(vaddr_t va, unsigned int offs) {
- return __raw_readb(va.vaddr + offs);
-}
-
-static inline unsigned int mga_readw(vaddr_t va, unsigned int offs) {
- return __raw_readw(va.vaddr + offs);
-}
-
static inline u_int32_t mga_readl(vaddr_t va, unsigned int offs) {
- return __raw_readl(va.vaddr + offs);
-}
-
-static inline void mga_writeb(vaddr_t va, unsigned int offs, u_int8_t value) {
- __raw_writeb(value, va.vaddr + offs);
-}
-
-static inline void mga_writew(vaddr_t va, unsigned int offs, u_int16_t value) {
- __raw_writew(value, va.vaddr + offs);
+ return readl(va.vaddr + offs);
}
static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
- __raw_writel(value, va.vaddr + offs);
+ writel(value, va.vaddr + offs);
}
-#endif
-static inline void mga_memcpy_toio(vaddr_t va, unsigned int offs, const void* src, int len) {
-#ifdef MEMCPYTOIO_WORKS
- memcpy_toio(va.vaddr + offs, src, len);
-#elif defined(MEMCPYTOIO_WRITEL)
- if (offs & 3) {
+static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
+#if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
+ /*
+ * memcpy_toio works for us if:
+ * (1) Copies data as 32bit quantities, not byte after byte,
+ * (2) Performs LE ordered stores, and
+ * (3) It copes with unaligned source (destination is guaranteed to be page
+ * aligned and length is guaranteed to be multiple of 4).
+ */
+ memcpy_toio(va.vaddr, src, len);
+#else
+ u_int32_t __iomem* addr = va.vaddr;
+
+ if ((unsigned long)src & 3) {
while (len >= 4) {
- mga_writel(va, offs, get_unaligned((u32 *)src));
- offs += 4;
+ writel(get_unaligned((u32 *)src), addr);
+ addr++;
len -= 4;
src += 4;
}
} else {
while (len >= 4) {
- mga_writel(va, offs, *(u32 *)src);
- offs += 4;
+ writel(*(u32 *)src, addr);
+ addr++;
len -= 4;
src += 4;
}
}
- if (len) {
- u_int32_t tmp;
-
- memcpy(&tmp, src, len);
- mga_writel(va, offs, tmp);
- }
-#else
-#error "Sorry, do not know how to write block of data to device"
#endif
}
@@ -252,25 +200,15 @@ static inline void __iomem* vaddr_va(vaddr_t va) {
#define MGA_IOREMAP_FB MGA_IOREMAP_NOCACHE
#define MGA_IOREMAP_MMIO MGA_IOREMAP_NOCACHE
static inline int mga_ioremap(unsigned long phys, unsigned long size, int flags, vaddr_t* virt) {
-#ifdef MAP_IOREMAP
if (flags & MGA_IOREMAP_NOCACHE)
virt->vaddr = ioremap_nocache(phys, size);
else
virt->vaddr = ioremap(phys, size);
-#else
-#ifdef MAP_BUSTOVIRT
- virt->vaddr = bus_to_virt(phys);
-#else
-#error "Your architecture does not have neither ioremap nor bus_to_virt... Giving up"
-#endif
-#endif
return (virt->vaddr == 0); /* 0, !0... 0, error_code in future */
}
static inline void mga_iounmap(vaddr_t va) {
-#ifdef MAP_IOREMAP
iounmap(va.vaddr);
-#endif
}
struct my_timming {
@@ -774,11 +712,11 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
#define DAC_XGENIOCTRL 0x2A
#define DAC_XGENIODATA 0x2B
-#define M_C2CTL 0x3E10
+#define M_C2CTL 0x3C10
-#ifdef __LITTLE_ENDIAN
-#define MX_OPTION_BSWAP 0x00000000
+#define MX_OPTION_BSWAP 0x00000000
+#ifdef __LITTLE_ENDIAN
#define M_OPMODE_4BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_LE | M_OPMODE_DMA_BLIT)
#define M_OPMODE_8BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_LE | M_OPMODE_DMA_BLIT)
#define M_OPMODE_16BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_LE | M_OPMODE_DMA_BLIT)
@@ -786,29 +724,23 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
#define M_OPMODE_32BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_LE | M_OPMODE_DMA_BLIT)
#else
#ifdef __BIG_ENDIAN
-#define MX_OPTION_BSWAP 0x80000000
-
-#define M_OPMODE_4BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_LE | M_OPMODE_DMA_BLIT) /* TODO */
-#define M_OPMODE_8BPP (M_OPMODE_DMA_BE_8BPP | M_OPMODE_DIR_BE_8BPP | M_OPMODE_DMA_BLIT)
-#define M_OPMODE_16BPP (M_OPMODE_DMA_BE_16BPP | M_OPMODE_DIR_BE_16BPP | M_OPMODE_DMA_BLIT)
-#define M_OPMODE_24BPP (M_OPMODE_DMA_BE_8BPP | M_OPMODE_DIR_BE_8BPP | M_OPMODE_DMA_BLIT) /* TODO, ?32 */
-#define M_OPMODE_32BPP (M_OPMODE_DMA_BE_32BPP | M_OPMODE_DIR_BE_32BPP | M_OPMODE_DMA_BLIT)
+#define M_OPMODE_4BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_LE | M_OPMODE_DMA_BLIT) /* TODO */
+#define M_OPMODE_8BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_BE_8BPP | M_OPMODE_DMA_BLIT)
+#define M_OPMODE_16BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_BE_16BPP | M_OPMODE_DMA_BLIT)
+#define M_OPMODE_24BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_BE_8BPP | M_OPMODE_DMA_BLIT) /* TODO, ?32 */
+#define M_OPMODE_32BPP (M_OPMODE_DMA_LE | M_OPMODE_DIR_BE_32BPP | M_OPMODE_DMA_BLIT)
#else
#error "Byte ordering have to be defined. Cannot continue."
#endif
#endif
-#define mga_inb(addr) mga_readb(ACCESS_FBINFO(mmio.vbase), (addr))
-#define mga_inl(addr) mga_readl(ACCESS_FBINFO(mmio.vbase), (addr))
-#define mga_outb(addr,val) mga_writeb(ACCESS_FBINFO(mmio.vbase), (addr), (val))
-#define mga_outw(addr,val) mga_writew(ACCESS_FBINFO(mmio.vbase), (addr), (val))
-#define mga_outl(addr,val) mga_writel(ACCESS_FBINFO(mmio.vbase), (addr), (val))
-#define mga_readr(port,idx) (mga_outb((port),(idx)), mga_inb((port)+1))
-#ifdef __LITTLE_ENDIAN
-#define mga_setr(addr,port,val) mga_outw(addr, ((val)<<8) | (port))
-#else
-#define mga_setr(addr,port,val) do { mga_outb(addr, port); mga_outb((addr)+1, val); } while (0)
-#endif
+#define mga_inb(addr) mga_readb(ACCESS_FBINFO(mmio.vbase), (addr))
+#define mga_inl(addr) mga_readl(ACCESS_FBINFO(mmio.vbase), (addr))
+#define mga_outb(addr,val) mga_writeb(ACCESS_FBINFO(mmio.vbase), (addr), (val))
+#define mga_outw(addr,val) mga_writew(ACCESS_FBINFO(mmio.vbase), (addr), (val))
+#define mga_outl(addr,val) mga_writel(ACCESS_FBINFO(mmio.vbase), (addr), (val))
+#define mga_readr(port,idx) (mga_outb((port),(idx)), mga_inb((port)+1))
+#define mga_setr(addr,port,val) mga_outw(addr, ((val)<<8) | (port))
#define mga_fifo(n) do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n))
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 75d19696ffe4..c5230bbe1da4 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -603,6 +603,8 @@ static int matroxfb_dh_regit(CPMINFO struct matroxfb_dh_fb_info* m2info) {
m2info->fbcon.fbops = &matroxfb_dh_ops;
m2info->fbcon.flags = FBINFO_FLAG_DEFAULT;
+ m2info->fbcon.flags |= FBINFO_HWACCEL_XPAN |
+ FBINFO_HWACCEL_YPAN;
m2info->fbcon.currcon = -1;
m2info->fbcon.pseudo_palette = m2info->cmap;
fb_alloc_cmap(&m2info->fbcon.cmap, 256, 1);
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index a6ecf9674f40..f4b20704db6c 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -939,6 +939,7 @@ static int __devinit pvr2fb_pci_probe(struct pci_dev *pdev,
pvr2_fix.mmio_start = pci_resource_start(pdev, 1);
pvr2_fix.mmio_len = pci_resource_len(pdev, 1);
+ fbinfo->device = &pdev->dev;
return pvr2fb_common_init();
}
diff --git a/drivers/video/radeonfb.c b/drivers/video/radeonfb.c
index eedb2b8e93a5..82f8e1529498 100644
--- a/drivers/video/radeonfb.c
+++ b/drivers/video/radeonfb.c
@@ -3040,7 +3040,7 @@ static int radeonfb_pci_register (struct pci_dev *pdev,
pci_set_drvdata(pdev, rinfo);
rinfo->next = board_list;
board_list = rinfo;
-
+ ((struct fb_info *) rinfo)->device = &pdev->dev;
if (register_framebuffer ((struct fb_info *) rinfo) < 0) {
printk ("radeonfb: could not register framebuffer\n");
iounmap(rinfo->fb_base);
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 825b39dbe91a..f8772da6a59a 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -1858,21 +1858,17 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
NVTRACE_ENTER();
assert(pd != NULL);
- info = kmalloc(sizeof(struct fb_info), GFP_KERNEL);
+ info = framebuffer_alloc(sizeof(struct riva_par), &pd->dev);
+
if (!info)
goto err_out;
- default_par = kmalloc(sizeof(struct riva_par), GFP_KERNEL);
- if (!default_par)
- goto err_out_kfree;
-
- memset(info, 0, sizeof(struct fb_info));
- memset(default_par, 0, sizeof(struct riva_par));
+ default_par = (struct riva_par *) info->par;
default_par->pdev = pd;
info->pixmap.addr = kmalloc(64 * 1024, GFP_KERNEL);
if (info->pixmap.addr == NULL)
- goto err_out_kfree1;
+ goto err_out_kfree;
memset(info->pixmap.addr, 0, 64 * 1024);
if (pci_enable_device(pd)) {
@@ -1896,7 +1892,7 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
if(default_par->riva.Architecture == 0) {
printk(KERN_ERR PFX "unknown NV_ARCH\n");
- goto err_out_kfree1;
+ goto err_out_free_base0;
}
if(default_par->riva.Architecture == NV_ARCH_10 ||
default_par->riva.Architecture == NV_ARCH_20 ||
@@ -2001,7 +1997,6 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
fb_destroy_modedb(info->monspecs.modedb);
info->monspecs.modedb_len = 0;
info->monspecs.modedb = NULL;
-
if (register_framebuffer(info) < 0) {
printk(KERN_ERR PFX
"error registering riva framebuffer\n");
@@ -2040,10 +2035,8 @@ err_out_request:
pci_disable_device(pd);
err_out_enable:
kfree(info->pixmap.addr);
-err_out_kfree1:
- kfree(default_par);
err_out_kfree:
- kfree(info);
+ framebuffer_release(info);
err_out:
return -ENODEV;
}
@@ -2077,8 +2070,7 @@ static void __exit rivafb_remove(struct pci_dev *pd)
pci_release_regions(pd);
pci_disable_device(pd);
kfree(info->pixmap.addr);
- kfree(par);
- kfree(info);
+ framebuffer_release(info);
pci_set_drvdata(pd, NULL);
NVTRACE_LEAVE();
}
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index e3ea32b6cafb..46dfcce9fcaf 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -1507,6 +1507,7 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
fb_alloc_cmap(&info->cmap, 256, 0);
/* register fb */
+ info->device = &pdev->dev;
if (register_framebuffer(info) < 0) {
eprintk("can't register framebuffer.\n");
goto fail;
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 503ecc3569e8..263a40288118 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -202,7 +202,6 @@ static unsigned long do_lfb_size(struct tdfx_par *par, unsigned short);
*/
static int nopan = 0;
static int nowrap = 1; // not implemented (yet)
-static int inverse = 0;
static char *mode_option __initdata = NULL;
/* -------------------------------------------------------------------------
@@ -921,7 +920,6 @@ static void tdfxfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect
tdfx_outl(par, COMMAND_2D, COMMAND_2D_FILLRECT | (tdfx_rop << 24));
tdfx_outl(par, DSTSIZE, rect->width | (rect->height << 16));
tdfx_outl(par, LAUNCH_2D, rect->dx | (rect->dy << 16));
- banshee_wait_idle(info);
}
/*
@@ -957,7 +955,6 @@ static void tdfxfb_copyarea(struct fb_info *info, const struct fb_copyarea *area
tdfx_outl(par, DSTSIZE, area->width | (area->height << 16));
tdfx_outl(par, DSTXY, dx | (dy << 16));
tdfx_outl(par, LAUNCH_2D, sx | (sy << 16));
- banshee_wait_idle(info);
}
static void tdfxfb_imageblit(struct fb_info *info, const struct fb_image *image)
@@ -1025,7 +1022,6 @@ static void tdfxfb_imageblit(struct fb_info *info, const struct fb_image *image)
case 2: tdfx_outl(par, LAUNCH_2D,*(u16*)chardata); break;
case 3: tdfx_outl(par, LAUNCH_2D,*(u16*)chardata | ((chardata[3]) << 24)); break;
}
- banshee_wait_idle(info);
}
#endif /* CONFIG_FB_3DFX_ACCEL */
@@ -1397,10 +1393,7 @@ void tdfxfb_setup(char *options)
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
- if (!strcmp(this_opt, "inverse")) {
- inverse = 1;
- fb_invert_cmaps();
- } else if(!strcmp(this_opt, "nopan")) {
+ if(!strcmp(this_opt, "nopan")) {
nopan = 1;
} else if(!strcmp(this_opt, "nowrap")) {
nowrap = 1;
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index 07ee3202e92c..ffe811038a9a 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1454,6 +1454,7 @@ tgafb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
tgafb_set_par(&all->info);
tgafb_init_fix(&all->info);
+ all->info.device = &pdev->dev;
if (register_framebuffer(&all->info) < 0) {
printk(KERN_ERR "tgafb: Could not register framebuffer\n");
ret = -EINVAL;
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index aea0e05c882f..83184c78606a 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -1164,6 +1164,7 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, const struct pci_de
default_var.accel_flags &= ~FB_ACCELF_TEXT;
default_var.activate |= FB_ACTIVATE_NOW;
fb_info.var = default_var;
+ fb_info.device = &dev->dev;
if (register_framebuffer(&fb_info) < 0) {
output("Could not register Trident framebuffer\n");
return -EINVAL;
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index c5c649d9b1ae..9429e6c63708 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -49,7 +49,8 @@ static struct fb_fix_screeninfo vesafb_fix __initdata = {
static int inverse = 0;
static int mtrr = 1;
-static int vram __initdata = 0; /* Set amount of memory to be used */
+static int vram_remap __initdata = 0; /* Set amount of memory to be used */
+static int vram_total __initdata = 0; /* Set total amount of memory */
static int pmi_setpal = 0; /* pmi for palette changes ??? */
static int ypan = 0; /* 0..nothing, 1..ypan, 2..ywrap */
static unsigned short *pmi_base = NULL;
@@ -209,8 +210,10 @@ int __init vesafb_setup(char *options)
mtrr=1;
else if (! strcmp(this_opt, "nomtrr"))
mtrr=0;
- else if (! strncmp(this_opt, "vram:", 5))
- vram = simple_strtoul(this_opt+5, NULL, 0);
+ else if (! strncmp(this_opt, "vtotal:", 7))
+ vram_total = simple_strtoul(this_opt+7, NULL, 0);
+ else if (! strncmp(this_opt, "vremap:", 7))
+ vram_remap = simple_strtoul(this_opt+7, NULL, 0);
}
return 0;
}
@@ -220,6 +223,9 @@ static int __init vesafb_probe(struct device *device)
struct platform_device *dev = to_platform_device(device);
struct fb_info *info;
int i, err;
+ unsigned int size_vmode;
+ unsigned int size_remap;
+ unsigned int size_total;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
return -ENXIO;
@@ -231,32 +237,41 @@ static int __init vesafb_probe(struct device *device)
vesafb_defined.xres = screen_info.lfb_width;
vesafb_defined.yres = screen_info.lfb_height;
vesafb_fix.line_length = screen_info.lfb_linelength;
-
- /* Allocate enough memory for double buffering */
- vesafb_fix.smem_len = screen_info.lfb_width * screen_info.lfb_height * vesafb_defined.bits_per_pixel >> 2;
-
- /* check that we don't remap more memory than old cards have */
- if (vesafb_fix.smem_len > (screen_info.lfb_size * 65536))
- vesafb_fix.smem_len = screen_info.lfb_size * 65536;
-
- /* Set video size according to vram boot option */
- if (vram)
- vesafb_fix.smem_len = vram * 1024 * 1024;
-
vesafb_fix.visual = (vesafb_defined.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
- /* limit framebuffer size to 16 MB. Otherwise we'll eat tons of
- * kernel address space for nothing if the gfx card has alot of
- * memory (>= 128 MB isn't uncommon these days ...) */
- if (vesafb_fix.smem_len > 16 * 1024 * 1024)
- vesafb_fix.smem_len = 16 * 1024 * 1024;
+ /* size_vmode -- that is the amount of memory needed for the
+ * used video mode, i.e. the minimum amount of
+ * memory we need. */
+ size_vmode = vesafb_defined.yres * vesafb_fix.line_length;
+
+ /* size_total -- all video memory we have. Used for mtrr
+ * entries, ressource allocation and bounds
+ * checking. */
+ size_total = screen_info.lfb_size * 65536;
+ if (vram_total)
+ size_total = vram_total * 1024 * 1024;
+ if (size_total < size_vmode)
+ size_total = size_vmode;
+
+ /* size_remap -- the amount of video memory we are going to
+ * use for vesafb. With modern cards it is no
+ * option to simply use size_total as that
+ * wastes plenty of kernel address space. */
+ size_remap = size_vmode * 2;
+ if (vram_remap)
+ size_remap = vram_remap * 1024 * 1024;
+ if (size_remap < size_vmode)
+ size_remap = size_vmode;
+ if (size_remap > size_total)
+ size_remap = size_total;
+ vesafb_fix.smem_len = size_remap;
#ifndef __i386__
screen_info.vesapm_seg = 0;
#endif
- if (!request_mem_region(vesafb_fix.smem_start, vesafb_fix.smem_len, "vesafb")) {
+ if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
printk(KERN_WARNING
"vesafb: abort, cannot reserve video memory at 0x%lx\n",
vesafb_fix.smem_start);
@@ -281,8 +296,10 @@ static int __init vesafb_probe(struct device *device)
goto err;
}
- printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
- vesafb_fix.smem_start, info->screen_base, vesafb_fix.smem_len/1024);
+ printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, "
+ "using %dk, total %dk\n",
+ vesafb_fix.smem_start, info->screen_base,
+ size_remap/1024, size_total/1024);
printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
@@ -362,7 +379,7 @@ static int __init vesafb_probe(struct device *device)
request_region(0x3c0, 32, "vesafb");
if (mtrr) {
- int temp_size = vesafb_fix.smem_len;
+ int temp_size = size_total;
/* Find the largest power-of-two */
while (temp_size & (temp_size - 1))
temp_size &= (temp_size - 1);
@@ -393,7 +410,7 @@ static int __init vesafb_probe(struct device *device)
return 0;
err:
framebuffer_release(info);
- release_mem_region(vesafb_fix.smem_start, vesafb_fix.smem_len);
+ release_mem_region(vesafb_fix.smem_start, size_total);
return err;
}
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 2e132fb61562..f2b053c0f214 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -123,7 +123,7 @@ static struct fb_fix_screeninfo vga16fb_fix __initdata = {
suitable instruction is the x86 bitwise OR. The following
read-modify-write routine should optimize to one such bitwise
OR. */
-static inline void rmw(volatile char *p)
+static inline void rmw(volatile char __iomem *p)
{
readb(p);
writeb(1, p);
@@ -883,7 +883,7 @@ void vga_8planes_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
char oldmask = selectmask();
int line_ofs, height;
char oldop, oldsr;
- char *where;
+ char __iomem *where;
dx /= 4;
where = info->screen_base + dx + rect->dy * info->fix.line_length;
@@ -932,7 +932,7 @@ void vga_8planes_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void vga16fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
int x, x2, y2, vxres, vyres, width, height, line_ofs;
- char *dst;
+ char __iomem *dst;
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
@@ -1012,7 +1012,8 @@ void vga_8planes_copyarea(struct fb_info *info, const struct fb_copyarea *area)
char oldsr = setsr(0xf);
int height, line_ofs, x;
u32 sx, dx, width;
- char *dest, *src;
+ char __iomem *dest;
+ char __iomem *src;
height = area->height;
@@ -1063,7 +1064,8 @@ void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
int x, x2, y2, old_dx, old_dy, vxres, vyres;
int height, width, line_ofs;
- char *dst = NULL, *src = NULL;
+ char __iomem *dst = NULL;
+ char __iomem *src = NULL;
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
@@ -1174,7 +1176,7 @@ void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *image)
char oldmask = selectmask();
const char *cdat = image->data;
u32 dx = image->dx;
- char *where;
+ char __iomem *where;
int y;
dx /= 4;
@@ -1198,10 +1200,11 @@ void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *image)
void vga_imageblit_expand(struct fb_info *info, const struct fb_image *image)
{
- char *where = info->screen_base + (image->dx/8) +
+ char __iomem *where = info->screen_base + (image->dx/8) +
image->dy * info->fix.line_length;
struct vga16fb_par *par = (struct vga16fb_par *) info->par;
- char *cdat = (char *) image->data, *dst;
+ char *cdat = (char *) image->data;
+ char __iomem *dst;
int x, y;
switch (info->fix.type) {
@@ -1265,9 +1268,11 @@ void vga_imageblit_color(struct fb_info *info, const struct fb_image *image)
* Draw logo
*/
struct vga16fb_par *par = (struct vga16fb_par *) info->par;
- char *where = info->screen_base + image->dy * info->fix.line_length +
+ char __iomem *where =
+ info->screen_base + image->dy * info->fix.line_length +
image->dx/8;
- const char *cdat = image->data, *dst;
+ const char *cdat = image->data;
+ char __iomem *dst;
int x, y;
switch (info->fix.type) {
@@ -1306,7 +1311,7 @@ void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
if (image->depth == 1)
vga_imageblit_expand(info, image);
- else if (image->depth <= info->var.bits_per_pixel)
+ else
vga_imageblit_color(info, image);
}
@@ -1354,7 +1359,7 @@ int __init vga16fb_init(void)
/* XXX share VGA_FB_PHYS and I/O region with vgacon and others */
- vga16fb.screen_base = (void *)VGA_MAP_MEM(VGA_FB_PHYS);
+ vga16fb.screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS);
if (!vga16fb.screen_base) {
printk(KERN_ERR "vga16fb: unable to map device\n");
ret = -ENOMEM;
diff --git a/fs/Kconfig b/fs/Kconfig
index c8cf86596948..0c605757d63d 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -919,6 +919,27 @@ config TMPFS
See <file:Documentation/filesystems/tmpfs.txt> for details.
+config TMPFS_XATTR
+ bool "tmpfs Extended Attributes"
+ depends on TMPFS
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page, or visit
+ <http://acl.bestbits.at/> for details).
+
+ If unsure, say N.
+
+config TMPFS_SECURITY
+ bool "tmpfs Security Labels"
+ depends on TMPFS_XATTR
+ help
+ Security labels support alternative access control models
+ implemented by security modules like SELinux. This option
+ enables an extended attribute handler for file security
+ labels in the tmpfs filesystem.
+ If you are not using a security module that requires using
+ extended attributes for file security labels, say N.
+
config HUGETLBFS
bool "HugeTLB file system support"
depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || X86_64 || BROKEN
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 955dbef62b69..c8775699fb44 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -100,7 +100,7 @@ static int afs_init(void)
goto error;
#endif
-#ifdef CONFIG_KEYS
+#ifdef CONFIG_KEYS_TURNED_OFF
ret = afs_key_register();
if (ret < 0)
goto error_cache;
@@ -142,7 +142,7 @@ static int afs_init(void)
error_kafstimod:
afs_kafstimod_stop();
error_keys:
-#ifdef CONFIG_KEYS
+#ifdef CONFIG_KEYS_TURNED_OFF
afs_key_unregister();
error_cache:
#endif
@@ -169,7 +169,7 @@ static void __exit afs_exit(void)
afs_kafstimod_stop();
afs_kafsasyncd_stop();
afs_cell_purge();
-#ifdef CONFIG_KEYS
+#ifdef CONFIG_KEYS_TURNED_OFF
afs_key_unregister();
#endif
#ifdef AFS_CACHING_SUPPORT
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 1ff9adb6a8da..82ef8ed2fabc 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -19,7 +19,6 @@
#include <linux/smp_lock.h>
#include "autofs_i.h"
-static struct dentry *autofs4_dir_lookup(struct inode *,struct dentry *, struct nameidata *);
static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
static int autofs4_dir_unlink(struct inode *,struct dentry *);
static int autofs4_dir_rmdir(struct inode *,struct dentry *);
@@ -29,7 +28,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file);
static int autofs4_dir_close(struct inode *inode, struct file *file);
static int autofs4_dir_readdir(struct file * filp, void * dirent, filldir_t filldir);
static int autofs4_root_readdir(struct file * filp, void * dirent, filldir_t filldir);
-static struct dentry *autofs4_root_lookup(struct inode *,struct dentry *, struct nameidata *);
+static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *);
static int autofs4_dcache_readdir(struct file *, void *, filldir_t);
struct file_operations autofs4_root_operations = {
@@ -48,7 +47,7 @@ struct file_operations autofs4_dir_operations = {
};
struct inode_operations autofs4_root_inode_operations = {
- .lookup = autofs4_root_lookup,
+ .lookup = autofs4_lookup,
.unlink = autofs4_dir_unlink,
.symlink = autofs4_dir_symlink,
.mkdir = autofs4_dir_mkdir,
@@ -56,7 +55,7 @@ struct inode_operations autofs4_root_inode_operations = {
};
struct inode_operations autofs4_dir_inode_operations = {
- .lookup = autofs4_dir_lookup,
+ .lookup = autofs4_lookup,
.unlink = autofs4_dir_unlink,
.symlink = autofs4_dir_symlink,
.mkdir = autofs4_dir_mkdir,
@@ -439,23 +438,8 @@ static struct dentry_operations autofs4_dentry_operations = {
.d_release = autofs4_dentry_release,
};
-/* Lookups in non-root dirs never find anything - if it's there, it's
- already in the dcache */
-static struct dentry *autofs4_dir_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
-{
-#if 0
- DPRINTK("ignoring lookup of %.*s/%.*s",
- dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
- dentry->d_name.len, dentry->d_name.name);
-#endif
-
- dentry->d_fsdata = NULL;
- d_add(dentry, NULL);
- return NULL;
-}
-
/* Lookups in the root directory */
-static struct dentry *autofs4_root_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct autofs_sb_info *sbi;
int oz_mode;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 5c3f09b86172..28aac88114de 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -666,7 +666,7 @@ int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
EXPORT_SYMBOL(blkdev_get);
-int blkdev_open(struct inode * inode, struct file * filp)
+static int blkdev_open(struct inode * inode, struct file * filp)
{
struct block_device *bdev;
int res;
@@ -695,8 +695,6 @@ int blkdev_open(struct inode * inode, struct file * filp)
return res;
}
-EXPORT_SYMBOL(blkdev_open);
-
int blkdev_put(struct block_device *bdev)
{
int ret = 0;
@@ -798,8 +796,6 @@ struct file_operations def_blk_fops = {
.sendfile = generic_file_sendfile,
};
-EXPORT_SYMBOL(def_blk_fops);
-
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
{
int res;
diff --git a/fs/buffer.c b/fs/buffer.c
index 4ec2acb57946..2a75b3f9efe4 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -45,34 +45,6 @@ static void invalidate_bh_lrus(void);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
-struct bh_wait_queue {
- struct buffer_head *bh;
- wait_queue_t wait;
-};
-
-#define __DEFINE_BH_WAIT(name, b, f) \
- struct bh_wait_queue name = { \
- .bh = b, \
- .wait = { \
- .task = current, \
- .flags = f, \
- .func = bh_wake_function, \
- .task_list = \
- LIST_HEAD_INIT(name.wait.task_list),\
- }, \
- }
-#define DEFINE_BH_WAIT(name, bh) __DEFINE_BH_WAIT(name, bh, 0)
-#define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \
- __DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE)
-
-/*
- * Hashed waitqueue_head's for wait_on_buffer()
- */
-#define BH_WAIT_TABLE_ORDER 7
-static struct bh_wait_queue_head {
- wait_queue_head_t wqh;
-} ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
-
inline void
init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
@@ -80,71 +52,32 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
bh->b_private = private;
}
-/*
- * Return the address of the waitqueue_head to be used for this
- * buffer_head
- */
-wait_queue_head_t *bh_waitq_head(struct buffer_head *bh)
-{
- return &bh_wait_queue_heads[hash_ptr(bh, BH_WAIT_TABLE_ORDER)].wqh;
-}
-EXPORT_SYMBOL(bh_waitq_head);
-
-void wake_up_buffer(struct buffer_head *bh)
-{
- wait_queue_head_t *wq = bh_waitq_head(bh);
-
- smp_mb();
- if (waitqueue_active(wq))
- __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh);
-}
-EXPORT_SYMBOL(wake_up_buffer);
-
-static int bh_wake_function(wait_queue_t *wait, unsigned mode,
- int sync, void *key)
-{
- struct buffer_head *bh = key;
- struct bh_wait_queue *wq;
-
- wq = container_of(wait, struct bh_wait_queue, wait);
- if (wq->bh != bh || buffer_locked(bh))
- return 0;
- else
- return autoremove_wake_function(wait, mode, sync, key);
-}
-
-static void sync_buffer(struct buffer_head *bh)
+static int sync_buffer(void *word)
{
struct block_device *bd;
+ struct buffer_head *bh
+ = container_of(word, struct buffer_head, b_state);
smp_mb();
bd = bh->b_bdev;
if (bd)
blk_run_address_space(bd->bd_inode->i_mapping);
+ io_schedule();
+ return 0;
}
void fastcall __lock_buffer(struct buffer_head *bh)
{
- wait_queue_head_t *wqh = bh_waitq_head(bh);
- DEFINE_BH_WAIT_EXCLUSIVE(wait, bh);
-
- do {
- prepare_to_wait_exclusive(wqh, &wait.wait,
- TASK_UNINTERRUPTIBLE);
- if (buffer_locked(bh)) {
- sync_buffer(bh);
- io_schedule();
- }
- } while (test_set_buffer_locked(bh));
- finish_wait(wqh, &wait.wait);
+ wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+ TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_buffer);
void fastcall unlock_buffer(struct buffer_head *bh)
{
clear_buffer_locked(bh);
- smp_mb__after_clear_bit();
- wake_up_buffer(bh);
+ smp_mb();
+ wake_up_bit(&bh->b_state, BH_Lock);
}
/*
@@ -154,17 +87,7 @@ void fastcall unlock_buffer(struct buffer_head *bh)
*/
void __wait_on_buffer(struct buffer_head * bh)
{
- wait_queue_head_t *wqh = bh_waitq_head(bh);
- DEFINE_BH_WAIT(wait, bh);
-
- do {
- prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
- if (buffer_locked(bh)) {
- sync_buffer(bh);
- io_schedule();
- }
- } while (buffer_locked(bh));
- finish_wait(wqh, &wait.wait);
+ wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
}
static void
@@ -3123,14 +3046,11 @@ static int buffer_cpu_notify(struct notifier_block *self,
void __init buffer_init(void)
{
- int i;
int nrpages;
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
SLAB_PANIC, init_buffer_head, NULL);
- for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
- init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 087be36211eb..2a1924078059 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -7,8 +7,8 @@ or out of order NULL pointer checks in little used error paths).
Version 1.21
------------
-Add new mount parm to control whether mode check (vfs_permission) is done on
-the client. If Unix extensions are enabled and the uids on the client
+Add new mount parm to control whether mode check (generic_permission) is done
+on the client. If Unix extensions are enabled and the uids on the client
and server do not match, client permission checks are meaningless on
server uids that do not exist on the client (this does not affect the
normal ACL check which occurs on the server). Fix default uid
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 729cdba71530..996fc298a306 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -200,7 +200,7 @@ static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
on the client (above and beyond ACL on servers) for
servers which do not support setting and viewing mode bits,
so allowing client to check permissions is useful */
- return vfs_permission(inode, mask);
+ return generic_permission(inode, mask, NULL);
}
static kmem_cache_t *cifs_inode_cachep;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 2edf63444ccb..5642ccc235e8 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -115,6 +115,7 @@
#include <linux/random.h>
#include <linux/filter.h>
#include <linux/msdos_fs.h>
+#include <linux/pktcdvd.h>
#include <linux/hiddev.h>
diff --git a/fs/dcache.c b/fs/dcache.c
index ec5668a4136e..9918e4fab786 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -722,7 +722,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
dentry->d_fsdata = NULL;
dentry->d_mounted = 0;
dentry->d_cookie = NULL;
- dentry->d_bucket = NULL;
INIT_HLIST_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
@@ -851,12 +850,6 @@ struct dentry * d_alloc_anon(struct inode *inode)
res->d_sb = inode->i_sb;
res->d_parent = res;
res->d_inode = inode;
-
- /*
- * Set d_bucket to an "impossible" bucket address so
- * that d_move() doesn't get a false positive
- */
- res->d_bucket = NULL;
res->d_flags |= DCACHE_DISCONNECTED;
res->d_flags &= ~DCACHE_UNHASHED;
list_add(&res->d_alias, &inode->i_dentry);
@@ -979,8 +972,6 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
dentry = hlist_entry(node, struct dentry, d_hash);
- smp_rmb();
-
if (dentry->d_name.hash != hash)
continue;
if (dentry->d_parent != parent)
@@ -989,13 +980,6 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
spin_lock(&dentry->d_lock);
/*
- * If lookup ends up in a different bucket due to concurrent
- * rename, fail it
- */
- if (unlikely(dentry->d_bucket != head))
- goto terminate;
-
- /*
* Recheck the dentry after taking the lock - d_move may have
* changed things. Don't bother checking the hash because we're
* about to compare the whole name anyway.
@@ -1003,7 +987,11 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
if (dentry->d_parent != parent)
goto next;
- qstr = rcu_dereference(&dentry->d_name);
+ /*
+ * It is safe to compare names since d_move() cannot
+ * change the qstr (protected by d_lock).
+ */
+ qstr = &dentry->d_name;
if (parent->d_op && parent->d_op->d_compare) {
if (parent->d_op->d_compare(parent, qstr, name))
goto next;
@@ -1018,7 +1006,6 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
atomic_inc(&dentry->d_count);
found = dentry;
}
-terminate:
spin_unlock(&dentry->d_lock);
break;
next:
@@ -1110,6 +1097,13 @@ void d_delete(struct dentry * dentry)
spin_unlock(&dcache_lock);
}
+static void __d_rehash(struct dentry * entry, struct hlist_head *list)
+{
+
+ entry->d_flags &= ~DCACHE_UNHASHED;
+ hlist_add_head_rcu(&entry->d_hash, list);
+}
+
/**
* d_rehash - add an entry back to the hash
* @entry: dentry to add to the hash
@@ -1123,10 +1117,8 @@ void d_rehash(struct dentry * entry)
spin_lock(&dcache_lock);
spin_lock(&entry->d_lock);
- entry->d_flags &= ~DCACHE_UNHASHED;
+ __d_rehash(entry, list);
spin_unlock(&entry->d_lock);
- entry->d_bucket = list;
- hlist_add_head_rcu(&entry->d_hash, list);
spin_unlock(&dcache_lock);
}
@@ -1204,6 +1196,8 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
void d_move(struct dentry * dentry, struct dentry * target)
{
+ struct hlist_head *list;
+
if (!dentry->d_inode)
printk(KERN_WARNING "VFS: moving negative dcache entry\n");
@@ -1223,13 +1217,12 @@ void d_move(struct dentry * dentry, struct dentry * target)
/* Move the dentry to the target hash queue, if on different bucket */
if (dentry->d_flags & DCACHE_UNHASHED)
goto already_unhashed;
- if (dentry->d_bucket != target->d_bucket) {
- hlist_del_rcu(&dentry->d_hash);
+
+ hlist_del_rcu(&dentry->d_hash);
+
already_unhashed:
- dentry->d_bucket = target->d_bucket;
- hlist_add_head_rcu(&dentry->d_hash, target->d_bucket);
- dentry->d_flags &= ~DCACHE_UNHASHED;
- }
+ list = d_hash(target->d_parent, target->d_name.hash);
+ __d_rehash(dentry, list);
/* Unhash the target: dput() will then get rid of it */
__d_drop(target);
@@ -1239,7 +1232,6 @@ already_unhashed:
/* Switch the names.. */
switch_names(dentry, target);
- smp_wmb();
do_switch(dentry->d_name.len, target->d_name.len);
do_switch(dentry->d_name.hash, target->d_name.hash);
@@ -1656,8 +1648,6 @@ EXPORT_SYMBOL(dget_locked);
EXPORT_SYMBOL(dput);
EXPORT_SYMBOL(find_inode_number);
EXPORT_SYMBOL(have_submounts);
-EXPORT_SYMBOL(is_subdir);
EXPORT_SYMBOL(names_cachep);
-EXPORT_SYMBOL(shrink_dcache_anon);
EXPORT_SYMBOL(shrink_dcache_parent);
EXPORT_SYMBOL(shrink_dcache_sb);
diff --git a/fs/devfs/base.c b/fs/devfs/base.c
index a62d9412e73e..65e299a2d97e 100644
--- a/fs/devfs/base.c
+++ b/fs/devfs/base.c
@@ -1802,7 +1802,6 @@ static int __init devfs_setup(char *str)
__setup("devfs=", devfs_setup);
-EXPORT_SYMBOL(devfs_mk_symlink);
EXPORT_SYMBOL(devfs_mk_dir);
EXPORT_SYMBOL(devfs_remove);
diff --git a/fs/devpts/Makefile b/fs/devpts/Makefile
index 5eb7a942a0b7..5800df2e50c8 100644
--- a/fs/devpts/Makefile
+++ b/fs/devpts/Makefile
@@ -5,5 +5,4 @@
obj-$(CONFIG_UNIX98_PTYS) += devpts.o
devpts-$(CONFIG_UNIX98_PTYS) := inode.o
-devpts-$(CONFIG_DEVPTS_FS_XATTR) += xattr.o
devpts-$(CONFIG_DEVPTS_FS_SECURITY) += xattr_security.o
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 55ac11f70a04..49e8641609d5 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -18,10 +18,28 @@
#include <linux/mount.h>
#include <linux/tty.h>
#include <linux/devpts_fs.h>
-#include "xattr.h"
+#include <linux/xattr.h>
#define DEVPTS_SUPER_MAGIC 0x1cd1
+extern struct xattr_handler devpts_xattr_security_handler;
+
+static struct xattr_handler *devpts_xattr_handlers[] = {
+#ifdef CONFIG_DEVPTS_FS_SECURITY
+ &devpts_xattr_security_handler,
+#endif
+ NULL
+};
+
+struct inode_operations devpts_file_inode_operations = {
+#ifdef CONFIG_DEVPTS_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
+#endif
+};
+
static struct vfsmount *devpts_mnt;
static struct dentry *devpts_root;
@@ -84,6 +102,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
s->s_blocksize_bits = 10;
s->s_magic = DEVPTS_SUPER_MAGIC;
s->s_op = &devpts_sops;
+ s->s_xattr = devpts_xattr_handlers;
inode = new_inode(s);
if (!inode)
@@ -134,13 +153,6 @@ static struct dentry *get_node(int num)
return lookup_one_len(s, root, sprintf(s, "%d", num));
}
-static struct inode_operations devpts_file_inode_operations = {
- .setxattr = devpts_setxattr,
- .getxattr = devpts_getxattr,
- .listxattr = devpts_listxattr,
- .removexattr = devpts_removexattr,
-};
-
int devpts_pty_new(struct tty_struct *tty)
{
int number = tty->index;
@@ -209,10 +221,7 @@ void devpts_pty_kill(int number)
static int __init init_devpts_fs(void)
{
- int err = init_devpts_xattr();
- if (err)
- return err;
- err = register_filesystem(&devpts_fs_type);
+ int err = register_filesystem(&devpts_fs_type);
if (!err) {
devpts_mnt = kern_mount(&devpts_fs_type);
if (IS_ERR(devpts_mnt))
@@ -225,7 +234,6 @@ static void __exit exit_devpts_fs(void)
{
unregister_filesystem(&devpts_fs_type);
mntput(devpts_mnt);
- exit_devpts_xattr();
}
module_init(init_devpts_fs)
diff --git a/fs/devpts/xattr.c b/fs/devpts/xattr.c
deleted file mode 100644
index db7e15c4fc30..000000000000
--- a/fs/devpts/xattr.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- File: fs/devpts/xattr.c
-
- Derived from fs/ext3/xattr.c, changed in the following ways:
- drop everything related to persistent storage of EAs
- pass dentry rather than inode to internal methods
- only presently define a handler for security modules
-*/
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <asm/semaphore.h>
-#include "xattr.h"
-
-static struct devpts_xattr_handler *devpts_xattr_handlers[DEVPTS_XATTR_INDEX_MAX];
-static rwlock_t devpts_handler_lock = RW_LOCK_UNLOCKED;
-
-int
-devpts_xattr_register(int name_index, struct devpts_xattr_handler *handler)
-{
- int error = -EINVAL;
-
- if (name_index > 0 && name_index <= DEVPTS_XATTR_INDEX_MAX) {
- write_lock(&devpts_handler_lock);
- if (!devpts_xattr_handlers[name_index-1]) {
- devpts_xattr_handlers[name_index-1] = handler;
- error = 0;
- }
- write_unlock(&devpts_handler_lock);
- }
- return error;
-}
-
-void
-devpts_xattr_unregister(int name_index, struct devpts_xattr_handler *handler)
-{
- if (name_index > 0 || name_index <= DEVPTS_XATTR_INDEX_MAX) {
- write_lock(&devpts_handler_lock);
- devpts_xattr_handlers[name_index-1] = NULL;
- write_unlock(&devpts_handler_lock);
- }
-}
-
-static inline const char *
-strcmp_prefix(const char *a, const char *a_prefix)
-{
- while (*a_prefix && *a == *a_prefix) {
- a++;
- a_prefix++;
- }
- return *a_prefix ? NULL : a;
-}
-
-/*
- * Decode the extended attribute name, and translate it into
- * the name_index and name suffix.
- */
-static inline struct devpts_xattr_handler *
-devpts_xattr_resolve_name(const char **name)
-{
- struct devpts_xattr_handler *handler = NULL;
- int i;
-
- if (!*name)
- return NULL;
- read_lock(&devpts_handler_lock);
- for (i=0; i<DEVPTS_XATTR_INDEX_MAX; i++) {
- if (devpts_xattr_handlers[i]) {
- const char *n = strcmp_prefix(*name,
- devpts_xattr_handlers[i]->prefix);
- if (n) {
- handler = devpts_xattr_handlers[i];
- *name = n;
- break;
- }
- }
- }
- read_unlock(&devpts_handler_lock);
- return handler;
-}
-
-static inline struct devpts_xattr_handler *
-devpts_xattr_handler(int name_index)
-{
- struct devpts_xattr_handler *handler = NULL;
- if (name_index > 0 && name_index <= DEVPTS_XATTR_INDEX_MAX) {
- read_lock(&devpts_handler_lock);
- handler = devpts_xattr_handlers[name_index-1];
- read_unlock(&devpts_handler_lock);
- }
- return handler;
-}
-
-/*
- * Inode operation getxattr()
- *
- * dentry->d_inode->i_sem down
- */
-ssize_t
-devpts_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size)
-{
- struct devpts_xattr_handler *handler;
-
- handler = devpts_xattr_resolve_name(&name);
- if (!handler)
- return -EOPNOTSUPP;
- return handler->get(dentry, name, buffer, size);
-}
-
-/*
- * Inode operation listxattr()
- *
- * dentry->d_inode->i_sem down
- */
-ssize_t
-devpts_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
- struct devpts_xattr_handler *handler = NULL;
- int i, error = 0;
- unsigned int size = 0;
- char *buf;
-
- read_lock(&devpts_handler_lock);
-
- for (i=0; i<DEVPTS_XATTR_INDEX_MAX; i++) {
- handler = devpts_xattr_handlers[i];
- if (handler)
- size += handler->list(dentry, NULL);
- }
-
- if (!buffer) {
- error = size;
- goto out;
- } else {
- error = -ERANGE;
- if (size > buffer_size)
- goto out;
- }
-
- buf = buffer;
- for (i=0; i<DEVPTS_XATTR_INDEX_MAX; i++) {
- handler = devpts_xattr_handlers[i];
- if (handler)
- buf += handler->list(dentry, buf);
- }
- error = size;
-
-out:
- read_unlock(&devpts_handler_lock);
- return size;
-}
-
-/*
- * Inode operation setxattr()
- *
- * dentry->d_inode->i_sem down
- */
-int
-devpts_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
-{
- struct devpts_xattr_handler *handler;
-
- if (size == 0)
- value = ""; /* empty EA, do not remove */
- handler = devpts_xattr_resolve_name(&name);
- if (!handler)
- return -EOPNOTSUPP;
- return handler->set(dentry, name, value, size, flags);
-}
-
-/*
- * Inode operation removexattr()
- *
- * dentry->d_inode->i_sem down
- */
-int
-devpts_removexattr(struct dentry *dentry, const char *name)
-{
- struct devpts_xattr_handler *handler;
-
- handler = devpts_xattr_resolve_name(&name);
- if (!handler)
- return -EOPNOTSUPP;
- return handler->set(dentry, name, NULL, 0, XATTR_REPLACE);
-}
-
-int __init
-init_devpts_xattr(void)
-{
-#ifdef CONFIG_DEVPTS_FS_SECURITY
- int err;
-
- err = devpts_xattr_register(DEVPTS_XATTR_INDEX_SECURITY,
- &devpts_xattr_security_handler);
- if (err)
- return err;
-#endif
-
- return 0;
-}
-
-void
-exit_devpts_xattr(void)
-{
-#ifdef CONFIG_DEVPTS_FS_SECURITY
- devpts_xattr_unregister(DEVPTS_XATTR_INDEX_SECURITY,
- &devpts_xattr_security_handler);
-#endif
-
-}
diff --git a/fs/devpts/xattr.h b/fs/devpts/xattr.h
deleted file mode 100644
index ecd74a0986a6..000000000000
--- a/fs/devpts/xattr.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- File: fs/devpts/xattr.h
-
- Derived from fs/ext3/xattr.h, changed in the following ways:
- drop everything related to persistent storage of EAs
- pass dentry rather than inode to internal methods
- only presently define a handler for security modules
-*/
-
-#include <linux/config.h>
-#include <linux/xattr.h>
-
-/* Name indexes */
-#define DEVPTS_XATTR_INDEX_MAX 10
-#define DEVPTS_XATTR_INDEX_SECURITY 1
-
-# ifdef CONFIG_DEVPTS_FS_XATTR
-
-struct devpts_xattr_handler {
- char *prefix;
- size_t (*list)(struct dentry *dentry, char *buffer);
- int (*get)(struct dentry *dentry, const char *name, void *buffer,
- size_t size);
- int (*set)(struct dentry *dentry, const char *name, const void *buffer,
- size_t size, int flags);
-};
-
-extern int devpts_xattr_register(int, struct devpts_xattr_handler *);
-extern void devpts_xattr_unregister(int, struct devpts_xattr_handler *);
-
-extern int devpts_setxattr(struct dentry *, const char *, const void *, size_t, int);
-extern ssize_t devpts_getxattr(struct dentry *, const char *, void *, size_t);
-extern ssize_t devpts_listxattr(struct dentry *, char *, size_t);
-extern int devpts_removexattr(struct dentry *, const char *);
-
-extern int init_devpts_xattr(void);
-extern void exit_devpts_xattr(void);
-
-# else /* CONFIG_DEVPTS_FS_XATTR */
-# define devpts_setxattr NULL
-# define devpts_getxattr NULL
-# define devpts_listxattr NULL
-# define devpts_removexattr NULL
-
-static inline int
-init_devpts_xattr(void)
-{
- return 0;
-}
-
-static inline void
-exit_devpts_xattr(void)
-{
-}
-
-# endif /* CONFIG_DEVPTS_FS_XATTR */
-
-extern struct devpts_xattr_handler devpts_xattr_security_handler;
-
diff --git a/fs/devpts/xattr_security.c b/fs/devpts/xattr_security.c
index 4291d7b35f20..864cb5c79baa 100644
--- a/fs/devpts/xattr_security.c
+++ b/fs/devpts/xattr_security.c
@@ -1,38 +1,45 @@
/*
- * File: fs/devpts/xattr_security.c
+ * Security xattr support for devpts.
+ *
+ * Author: Stephen Smalley <sds@epoch.ncsc.mil>
+ * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
*/
-
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/security.h>
-#include "xattr.h"
+#include <linux/xattr.h>
static size_t
-devpts_xattr_security_list(struct dentry *dentry, char *buffer)
+devpts_xattr_security_list(struct inode *inode, char *list, size_t list_len,
+ const char *name, size_t name_len)
{
- return security_inode_listsecurity(dentry, buffer);
+ return security_inode_listsecurity(inode, list, list_len);
}
static int
-devpts_xattr_security_get(struct dentry *dentry, const char *name,
+devpts_xattr_security_get(struct inode *inode, const char *name,
void *buffer, size_t size)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return security_inode_getsecurity(dentry, name, buffer, size);
+ return security_inode_getsecurity(inode, name, buffer, size);
}
static int
-devpts_xattr_security_set(struct dentry *dentry, const char *name,
+devpts_xattr_security_set(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return security_inode_setsecurity(dentry, name, value, size, flags);
+ return security_inode_setsecurity(inode, name, value, size, flags);
}
-struct devpts_xattr_handler devpts_xattr_security_handler = {
+struct xattr_handler devpts_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = devpts_xattr_security_list,
.get = devpts_xattr_security_get,
diff --git a/fs/exec.c b/fs/exec.c
index e715541b2db4..4915bffb045d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -34,6 +34,7 @@
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
+#include <linux/key.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/swap.h>
@@ -848,8 +849,10 @@ int flush_old_exec(struct linux_binprm * bprm)
if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
- (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP))
+ (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
+ suid_keys(current);
current->mm->dumpable = 0;
+ }
/* An exec changes our domain. We are no longer part of the thread
group */
@@ -883,7 +886,7 @@ int prepare_binprm(struct linux_binprm *bprm)
mode = inode->i_mode;
/*
* Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
- * vfs_permission lets a non-executable through
+ * generic_permission lets a non-executable through
*/
if (!(mode & 0111)) /* with at least _one_ execute bit set */
return -EACCES;
@@ -943,6 +946,11 @@ static inline int unsafe_exec(struct task_struct *p)
void compute_creds(struct linux_binprm *bprm)
{
int unsafe;
+
+ if (bprm->e_uid != current->uid)
+ suid_keys(current);
+ exec_keys(current);
+
task_lock(current);
unsafe = unsafe_exec(current);
security_bprm_apply_creds(bprm, unsafe);
@@ -1179,8 +1187,6 @@ out_ret:
return retval;
}
-EXPORT_SYMBOL(do_execve);
-
int set_binfmt(struct linux_binfmt *new)
{
struct linux_binfmt *old = current->binfmt;
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 89d1df91411f..fb716b3f5ee0 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -280,60 +280,24 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
return error;
}
-/*
- * Inode operation permission().
- *
- * inode->i_sem: don't care
- */
-int
-ext2_permission(struct inode *inode, int mask, struct nameidata *nd)
+static int
+ext2_check_acl(struct inode *inode, int mask)
{
- int mode = inode->i_mode;
-
- /* Nobody gets write access to a read-only fs */
- if ((mask & MAY_WRITE) && IS_RDONLY(inode) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- return -EROFS;
- /* Nobody gets write access to an immutable file */
- if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
- return -EACCES;
- if (current->fsuid == inode->i_uid) {
- mode >>= 6;
- } else if (test_opt(inode->i_sb, POSIX_ACL)) {
- struct posix_acl *acl;
-
- /* The access ACL cannot grant access if the group class
- permission bits don't contain all requested permissions. */
- if (((mode >> 3) & mask & S_IRWXO) != mask)
- goto check_groups;
- acl = ext2_get_acl(inode, ACL_TYPE_ACCESS);
- if (acl) {
- int error = posix_acl_permission(inode, acl, mask);
- posix_acl_release(acl);
- if (error == -EACCES)
- goto check_capabilities;
- return error;
- } else
- goto check_groups;
- } else {
-check_groups:
- if (in_group_p(inode->i_gid))
- mode >>= 3;
+ struct posix_acl *acl = ext2_get_acl(inode, ACL_TYPE_ACCESS);
+
+ if (acl) {
+ int error = posix_acl_permission(inode, acl, mask);
+ posix_acl_release(acl);
+ return error;
}
- if ((mode & mask & S_IRWXO) == mask)
- return 0;
-check_capabilities:
- /* Allowed to override Discretionary Access Control? */
- if (!(mask & MAY_EXEC) ||
- (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
- if (capable(CAP_DAC_OVERRIDE))
- return 0;
- /* Read and search granted if capable(CAP_DAC_READ_SEARCH) */
- if (capable(CAP_DAC_READ_SEARCH) && ((mask == MAY_READ) ||
- (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))))
- return 0;
- return -EACCES;
+ return -EAGAIN;
+}
+
+int
+ext2_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+ return generic_permission(inode, mask, ext2_check_acl);
}
/*
@@ -429,27 +393,27 @@ ext2_acl_chmod(struct inode *inode)
* Extended attribut handlers
*/
static size_t
-ext2_xattr_list_acl_access(char *list, struct inode *inode,
- const char *name, int name_len)
+ext2_xattr_list_acl_access(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
{
const size_t size = sizeof(XATTR_NAME_ACL_ACCESS);
if (!test_opt(inode->i_sb, POSIX_ACL))
return 0;
- if (list)
+ if (list && size <= list_size)
memcpy(list, XATTR_NAME_ACL_ACCESS, size);
return size;
}
static size_t
-ext2_xattr_list_acl_default(char *list, struct inode *inode,
- const char *name, int name_len)
+ext2_xattr_list_acl_default(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
{
const size_t size = sizeof(XATTR_NAME_ACL_DEFAULT);
if (!test_opt(inode->i_sb, POSIX_ACL))
return 0;
- if (list)
+ if (list && size <= list_size)
memcpy(list, XATTR_NAME_ACL_DEFAULT, size);
return size;
}
@@ -541,45 +505,16 @@ ext2_xattr_set_acl_default(struct inode *inode, const char *name,
return ext2_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
}
-struct ext2_xattr_handler ext2_xattr_acl_access_handler = {
+struct xattr_handler ext2_xattr_acl_access_handler = {
.prefix = XATTR_NAME_ACL_ACCESS,
.list = ext2_xattr_list_acl_access,
.get = ext2_xattr_get_acl_access,
.set = ext2_xattr_set_acl_access,
};
-struct ext2_xattr_handler ext2_xattr_acl_default_handler = {
+struct xattr_handler ext2_xattr_acl_default_handler = {
.prefix = XATTR_NAME_ACL_DEFAULT,
.list = ext2_xattr_list_acl_default,
.get = ext2_xattr_get_acl_default,
.set = ext2_xattr_set_acl_default,
};
-
-void
-exit_ext2_acl(void)
-{
- ext2_xattr_unregister(EXT2_XATTR_INDEX_POSIX_ACL_ACCESS,
- &ext2_xattr_acl_access_handler);
- ext2_xattr_unregister(EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT,
- &ext2_xattr_acl_default_handler);
-}
-
-int __init
-init_ext2_acl(void)
-{
- int error;
-
- error = ext2_xattr_register(EXT2_XATTR_INDEX_POSIX_ACL_ACCESS,
- &ext2_xattr_acl_access_handler);
- if (error)
- goto fail;
- error = ext2_xattr_register(EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT,
- &ext2_xattr_acl_default_handler);
- if (error)
- goto fail;
- return 0;
-
-fail:
- exit_ext2_acl();
- return error;
-}
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
index b0a1c4b38be7..f613bc33d0a2 100644
--- a/fs/ext2/acl.h
+++ b/fs/ext2/acl.h
@@ -63,9 +63,6 @@ extern int ext2_permission (struct inode *, int, struct nameidata *);
extern int ext2_acl_chmod (struct inode *);
extern int ext2_init_acl (struct inode *, struct inode *);
-extern int init_ext2_acl(void);
-extern void exit_ext2_acl(void);
-
#else
#include <linux/sched.h>
#define ext2_permission NULL
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 713fb9b07545..f5e86141ec54 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -57,10 +57,12 @@ struct file_operations ext2_file_operations = {
struct inode_operations ext2_file_inode_operations = {
.truncate = ext2_truncate,
- .setxattr = ext2_setxattr,
- .getxattr = ext2_getxattr,
+#ifdef CONFIG_EXT2_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext2_listxattr,
- .removexattr = ext2_removexattr,
+ .removexattr = generic_removexattr,
+#endif
.setattr = ext2_setattr,
.permission = ext2_permission,
};
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 4abd2f06b387..661a751d15f0 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -395,19 +395,23 @@ struct inode_operations ext2_dir_inode_operations = {
.rmdir = ext2_rmdir,
.mknod = ext2_mknod,
.rename = ext2_rename,
- .setxattr = ext2_setxattr,
- .getxattr = ext2_getxattr,
+#ifdef CONFIG_EXT2_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext2_listxattr,
- .removexattr = ext2_removexattr,
+ .removexattr = generic_removexattr,
+#endif
.setattr = ext2_setattr,
.permission = ext2_permission,
};
struct inode_operations ext2_special_inode_operations = {
- .setxattr = ext2_setxattr,
- .getxattr = ext2_getxattr,
+#ifdef CONFIG_EXT2_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext2_listxattr,
- .removexattr = ext2_removexattr,
+ .removexattr = generic_removexattr,
+#endif
.setattr = ext2_setattr,
.permission = ext2_permission,
};
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index ad3face04fe4..497650e32d7f 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -800,6 +800,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
*/
sb->s_op = &ext2_sops;
sb->s_export_op = &ext2_export_ops;
+ sb->s_xattr = ext2_xattr_handlers;
root = iget(sb, EXT2_ROOT_INO);
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c
index 4680ea7ecc12..9f7bac01d557 100644
--- a/fs/ext2/symlink.c
+++ b/fs/ext2/symlink.c
@@ -32,17 +32,21 @@ struct inode_operations ext2_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
- .setxattr = ext2_setxattr,
- .getxattr = ext2_getxattr,
+#ifdef CONFIG_EXT2_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext2_listxattr,
- .removexattr = ext2_removexattr,
+ .removexattr = generic_removexattr,
+#endif
};
struct inode_operations ext2_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ext2_follow_link,
- .setxattr = ext2_setxattr,
- .getxattr = ext2_getxattr,
+#ifdef CONFIG_EXT2_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext2_listxattr,
- .removexattr = ext2_removexattr,
+ .removexattr = generic_removexattr,
+#endif
};
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index c8642719c170..fffddd16b064 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -6,6 +6,9 @@
* Fix by Harrison Xing <harrison@mountainviewdata.com>.
* Extended attributes for symlinks and special files added per
* suggestion of Luka Renko <luka.renko@hermes.si>.
+ * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
+ * Red Hat Inc.
+ *
*/
/*
@@ -62,8 +65,6 @@
#include "acl.h"
/* These symbols may be needed by a module. */
-EXPORT_SYMBOL(ext2_xattr_register);
-EXPORT_SYMBOL(ext2_xattr_unregister);
EXPORT_SYMBOL(ext2_xattr_get);
EXPORT_SYMBOL(ext2_xattr_list);
EXPORT_SYMBOL(ext2_xattr_set);
@@ -104,101 +105,40 @@ static void ext2_xattr_rehash(struct ext2_xattr_header *,
struct ext2_xattr_entry *);
static struct mb_cache *ext2_xattr_cache;
-static struct ext2_xattr_handler *ext2_xattr_handlers[EXT2_XATTR_INDEX_MAX];
-static rwlock_t ext2_handler_lock = RW_LOCK_UNLOCKED;
-
-int
-ext2_xattr_register(int name_index, struct ext2_xattr_handler *handler)
-{
- int error = -EINVAL;
- if (name_index > 0 && name_index <= EXT2_XATTR_INDEX_MAX) {
- write_lock(&ext2_handler_lock);
- if (!ext2_xattr_handlers[name_index-1]) {
- ext2_xattr_handlers[name_index-1] = handler;
- error = 0;
- }
- write_unlock(&ext2_handler_lock);
- }
- return error;
-}
-
-void
-ext2_xattr_unregister(int name_index, struct ext2_xattr_handler *handler)
-{
- if (name_index > 0 || name_index <= EXT2_XATTR_INDEX_MAX) {
- write_lock(&ext2_handler_lock);
- ext2_xattr_handlers[name_index-1] = NULL;
- write_unlock(&ext2_handler_lock);
- }
-}
-
-static inline const char *
-strcmp_prefix(const char *a, const char *a_prefix)
-{
- while (*a_prefix && *a == *a_prefix) {
- a++;
- a_prefix++;
- }
- return *a_prefix ? NULL : a;
-}
+static struct xattr_handler *ext2_xattr_handler_map[EXT2_XATTR_INDEX_MAX] = {
+ [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler,
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+ [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext2_xattr_acl_access_handler,
+ [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext2_xattr_acl_default_handler,
+#endif
+ [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler,
+#ifdef CONFIG_EXT2_FS_SECURITY
+ [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler,
+#endif
+};
-/*
- * Decode the extended attribute name, and translate it into
- * the name_index and name suffix.
- */
-static struct ext2_xattr_handler *
-ext2_xattr_resolve_name(const char **name)
-{
- struct ext2_xattr_handler *handler = NULL;
- int i;
-
- if (!*name)
- return NULL;
- read_lock(&ext2_handler_lock);
- for (i=0; i<EXT2_XATTR_INDEX_MAX; i++) {
- if (ext2_xattr_handlers[i]) {
- const char *n = strcmp_prefix(*name,
- ext2_xattr_handlers[i]->prefix);
- if (n) {
- handler = ext2_xattr_handlers[i];
- *name = n;
- break;
- }
- }
- }
- read_unlock(&ext2_handler_lock);
- return handler;
-}
+struct xattr_handler *ext2_xattr_handlers[] = {
+ &ext2_xattr_user_handler,
+ &ext2_xattr_trusted_handler,
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+ &ext2_xattr_acl_access_handler,
+ &ext2_xattr_acl_default_handler,
+#endif
+#ifdef CONFIG_EXT2_FS_SECURITY
+ &ext2_xattr_security_handler,
+#endif
+ NULL
+};
-static inline struct ext2_xattr_handler *
+static inline struct xattr_handler *
ext2_xattr_handler(int name_index)
{
- struct ext2_xattr_handler *handler = NULL;
- if (name_index > 0 && name_index <= EXT2_XATTR_INDEX_MAX) {
- read_lock(&ext2_handler_lock);
- handler = ext2_xattr_handlers[name_index-1];
- read_unlock(&ext2_handler_lock);
- }
- return handler;
-}
+ struct xattr_handler *handler = NULL;
-/*
- * Inode operation getxattr()
- *
- * dentry->d_inode->i_sem: don't care
- */
-ssize_t
-ext2_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size)
-{
- struct ext2_xattr_handler *handler;
- struct inode *inode = dentry->d_inode;
-
- handler = ext2_xattr_resolve_name(&name);
- if (!handler)
- return -EOPNOTSUPP;
- return handler->get(inode, name, buffer, size);
+ if (name_index > 0 && name_index <= EXT2_XATTR_INDEX_MAX)
+ handler = ext2_xattr_handler_map[name_index];
+ return handler;
}
/*
@@ -213,43 +153,6 @@ ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
}
/*
- * Inode operation setxattr()
- *
- * dentry->d_inode->i_sem: down
- */
-int
-ext2_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
-{
- struct ext2_xattr_handler *handler;
- struct inode *inode = dentry->d_inode;
-
- if (size == 0)
- value = ""; /* empty EA, do not remove */
- handler = ext2_xattr_resolve_name(&name);
- if (!handler)
- return -EOPNOTSUPP;
- return handler->set(inode, name, value, size, flags);
-}
-
-/*
- * Inode operation removexattr()
- *
- * dentry->d_inode->i_sem: down
- */
-int
-ext2_removexattr(struct dentry *dentry, const char *name)
-{
- struct ext2_xattr_handler *handler;
- struct inode *inode = dentry->d_inode;
-
- handler = ext2_xattr_resolve_name(&name);
- if (!handler)
- return -EOPNOTSUPP;
- return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
-}
-
-/*
* ext2_xattr_get()
*
* Copy an extended attribute into the buffer
@@ -367,8 +270,8 @@ ext2_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
{
struct buffer_head *bh = NULL;
struct ext2_xattr_entry *entry;
- size_t size = 0;
- char *buf, *end;
+ char *end;
+ size_t rest = buffer_size;
int error;
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
@@ -394,44 +297,40 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
error = -EIO;
goto cleanup;
}
- /* compute the size required for the list of attribute names */
- for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
- entry = EXT2_XATTR_NEXT(entry)) {
- struct ext2_xattr_handler *handler;
- struct ext2_xattr_entry *next =
- EXT2_XATTR_NEXT(entry);
+
+ /* check the on-disk data structure */
+ entry = FIRST_ENTRY(bh);
+ while (!IS_LAST_ENTRY(entry)) {
+ struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry);
+
if ((char *)next >= end)
goto bad_block;
-
- handler = ext2_xattr_handler(entry->e_name_index);
- if (handler)
- size += handler->list(NULL, inode, entry->e_name,
- entry->e_name_len);
+ entry = next;
}
-
if (ext2_xattr_cache_insert(bh))
ea_idebug(inode, "cache insert failed");
- if (!buffer) {
- error = size;
- goto cleanup;
- } else {
- error = -ERANGE;
- if (size > buffer_size)
- goto cleanup;
- }
/* list the attribute names */
- buf = buffer;
for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
entry = EXT2_XATTR_NEXT(entry)) {
- struct ext2_xattr_handler *handler;
-
- handler = ext2_xattr_handler(entry->e_name_index);
- if (handler)
- buf += handler->list(buf, inode, entry->e_name,
- entry->e_name_len);
+ struct xattr_handler *handler =
+ ext2_xattr_handler(entry->e_name_index);
+
+ if (handler) {
+ size_t size = handler->list(inode, buffer, rest,
+ entry->e_name,
+ entry->e_name_len);
+ if (buffer) {
+ if (size > rest) {
+ error = -ERANGE;
+ goto cleanup;
+ }
+ buffer += size;
+ }
+ rest -= size;
+ }
}
- error = size;
+ error = buffer_size - rest; /* total size */
cleanup:
brelse(bh);
@@ -1120,66 +1019,16 @@ static void ext2_xattr_rehash(struct ext2_xattr_header *header,
int __init
init_ext2_xattr(void)
{
- int err;
-
- err = ext2_xattr_register(EXT2_XATTR_INDEX_USER,
- &ext2_xattr_user_handler);
- if (err)
- return err;
- err = ext2_xattr_register(EXT2_XATTR_INDEX_TRUSTED,
- &ext2_xattr_trusted_handler);
- if (err)
- goto out;
-#ifdef CONFIG_EXT2_FS_SECURITY
- err = ext2_xattr_register(EXT2_XATTR_INDEX_SECURITY,
- &ext2_xattr_security_handler);
- if (err)
- goto out1;
-#endif
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- err = init_ext2_acl();
- if (err)
- goto out2;
-#endif
ext2_xattr_cache = mb_cache_create("ext2_xattr", NULL,
sizeof(struct mb_cache_entry) +
sizeof(struct mb_cache_entry_index), 1, 6);
- if (!ext2_xattr_cache) {
- err = -ENOMEM;
- goto out3;
- }
+ if (!ext2_xattr_cache)
+ return -ENOMEM;
return 0;
-out3:
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- exit_ext2_acl();
-out2:
-#endif
-#ifdef CONFIG_EXT2_FS_SECURITY
- ext2_xattr_unregister(EXT2_XATTR_INDEX_SECURITY,
- &ext2_xattr_security_handler);
-out1:
-#endif
- ext2_xattr_unregister(EXT2_XATTR_INDEX_TRUSTED,
- &ext2_xattr_trusted_handler);
-out:
- ext2_xattr_unregister(EXT2_XATTR_INDEX_USER,
- &ext2_xattr_user_handler);
- return err;
}
void
exit_ext2_xattr(void)
{
mb_cache_destroy(ext2_xattr_cache);
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- exit_ext2_acl();
-#endif
-#ifdef CONFIG_EXT2_FS_SECURITY
- ext2_xattr_unregister(EXT2_XATTR_INDEX_SECURITY,
- &ext2_xattr_security_handler);
-#endif
- ext2_xattr_unregister(EXT2_XATTR_INDEX_TRUSTED,
- &ext2_xattr_trusted_handler);
- ext2_xattr_unregister(EXT2_XATTR_INDEX_USER,
- &ext2_xattr_user_handler);
}
diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h
index 6268bcdaf753..65ea5f9e723d 100644
--- a/fs/ext2/xattr.h
+++ b/fs/ext2/xattr.h
@@ -57,23 +57,13 @@ struct ext2_xattr_entry {
# ifdef CONFIG_EXT2_FS_XATTR
-struct ext2_xattr_handler {
- char *prefix;
- size_t (*list)(char *list, struct inode *inode, const char *name,
- int name_len);
- int (*get)(struct inode *inode, const char *name, void *buffer,
- size_t size);
- int (*set)(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags);
-};
-
-extern int ext2_xattr_register(int, struct ext2_xattr_handler *);
-extern void ext2_xattr_unregister(int, struct ext2_xattr_handler *);
+extern struct xattr_handler ext2_xattr_user_handler;
+extern struct xattr_handler ext2_xattr_trusted_handler;
+extern struct xattr_handler ext2_xattr_acl_access_handler;
+extern struct xattr_handler ext2_xattr_acl_default_handler;
+extern struct xattr_handler ext2_xattr_security_handler;
-extern int ext2_setxattr(struct dentry *, const char *, const void *, size_t, int);
-extern ssize_t ext2_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t ext2_listxattr(struct dentry *, char *, size_t);
-extern int ext2_removexattr(struct dentry *, const char *);
extern int ext2_xattr_get(struct inode *, int, const char *, void *, size_t);
extern int ext2_xattr_list(struct inode *, char *, size_t);
@@ -85,11 +75,9 @@ extern void ext2_xattr_put_super(struct super_block *);
extern int init_ext2_xattr(void);
extern void exit_ext2_xattr(void);
+extern struct xattr_handler *ext2_xattr_handlers[];
+
# else /* CONFIG_EXT2_FS_XATTR */
-# define ext2_setxattr NULL
-# define ext2_getxattr NULL
-# define ext2_listxattr NULL
-# define ext2_removexattr NULL
static inline int
ext2_xattr_get(struct inode *inode, int name_index,
@@ -132,9 +120,7 @@ exit_ext2_xattr(void)
{
}
-# endif /* CONFIG_EXT2_FS_XATTR */
+#define ext2_xattr_handlers NULL
-extern struct ext2_xattr_handler ext2_xattr_user_handler;
-extern struct ext2_xattr_handler ext2_xattr_trusted_handler;
-extern struct ext2_xattr_handler ext2_xattr_security_handler;
+# endif /* CONFIG_EXT2_FS_XATTR */
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 837abd7bd1ca..6a6c59fbe599 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -11,17 +11,18 @@
#include "xattr.h"
static size_t
-ext2_xattr_security_list(char *list, struct inode *inode,
- const char *name, int name_len)
+ext2_xattr_security_list(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
{
const int prefix_len = sizeof(XATTR_SECURITY_PREFIX)-1;
+ const size_t total_len = prefix_len + name_len + 1;
- if (list) {
+ if (list && total_len <= list_size) {
memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
- return prefix_len + name_len + 1;
+ return total_len;
}
static int
@@ -44,7 +45,7 @@ ext2_xattr_security_set(struct inode *inode, const char *name,
value, size, flags);
}
-struct ext2_xattr_handler ext2_xattr_security_handler = {
+struct xattr_handler ext2_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext2_xattr_security_list,
.get = ext2_xattr_security_get,
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index d9478aed723f..52b30ee6a25f 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -15,20 +15,21 @@
#define XATTR_TRUSTED_PREFIX "trusted."
static size_t
-ext2_xattr_trusted_list(char *list, struct inode *inode,
- const char *name, int name_len)
+ext2_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
{
const int prefix_len = sizeof(XATTR_TRUSTED_PREFIX)-1;
+ const size_t total_len = prefix_len + name_len + 1;
if (!capable(CAP_SYS_ADMIN))
return 0;
- if (list) {
+ if (list && total_len <= list_size) {
memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
- return prefix_len + name_len + 1;
+ return total_len;
}
static int
@@ -55,7 +56,7 @@ ext2_xattr_trusted_set(struct inode *inode, const char *name,
value, size, flags);
}
-struct ext2_xattr_handler ext2_xattr_trusted_handler = {
+struct xattr_handler ext2_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.list = ext2_xattr_trusted_list,
.get = ext2_xattr_trusted_get,
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index be1558761064..0c03ea131a94 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -14,20 +14,21 @@
#define XATTR_USER_PREFIX "user."
static size_t
-ext2_xattr_user_list(char *list, struct inode *inode,
- const char *name, int name_len)
+ext2_xattr_user_list(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
{
- const int prefix_len = sizeof(XATTR_USER_PREFIX)-1;
+ const size_t prefix_len = sizeof(XATTR_USER_PREFIX)-1;
+ const size_t total_len = prefix_len + name_len + 1;
if (!test_opt(inode->i_sb, XATTR_USER))
return 0;
- if (list) {
+ if (list && total_len <= list_size) {
memcpy(list, XATTR_USER_PREFIX, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
- return prefix_len + name_len + 1;
+ return total_len;
}
static int
@@ -68,23 +69,9 @@ ext2_xattr_user_set(struct inode *inode, const char *name,
value, size, flags);
}
-struct ext2_xattr_handler ext2_xattr_user_handler = {
+struct xattr_handler ext2_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.list = ext2_xattr_user_list,
.get = ext2_xattr_user_get,
.set = ext2_xattr_user_set,
};
-
-int __init
-init_ext2_xattr_user(void)
-{
- return ext2_xattr_register(EXT2_XATTR_INDEX_USER,
- &ext2_xattr_user_handler);
-}
-
-void
-exit_ext2_xattr_user(void)
-{
- ext2_xattr_unregister(EXT2_XATTR_INDEX_USER,
- &ext2_xattr_user_handler);
-}
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index a3cf77de0e43..4a18653e831d 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -285,60 +285,24 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
return error;
}
-/*
- * Inode operation permission().
- *
- * inode->i_sem: don't care
- */
-int
-ext3_permission(struct inode *inode, int mask, struct nameidata *nd)
+static int
+ext3_check_acl(struct inode *inode, int mask)
{
- int mode = inode->i_mode;
-
- /* Nobody gets write access to a read-only fs */
- if ((mask & MAY_WRITE) && IS_RDONLY(inode) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- return -EROFS;
- /* Nobody gets write access to an immutable file */
- if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
- return -EACCES;
- if (current->fsuid == inode->i_uid) {
- mode >>= 6;
- } else if (test_opt(inode->i_sb, POSIX_ACL)) {
- struct posix_acl *acl;
-
- /* The access ACL cannot grant access if the group class
- permission bits don't contain all requested permissions. */
- if (((mode >> 3) & mask & S_IRWXO) != mask)
- goto check_groups;
- acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
- if (acl) {
- int error = posix_acl_permission(inode, acl, mask);
- posix_acl_release(acl);
- if (error == -EACCES)
- goto check_capabilities;
- return error;
- } else
- goto check_groups;
- } else {
-check_groups:
- if (in_group_p(inode->i_gid))
- mode >>= 3;
+ struct posix_acl *acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
+
+ if (acl) {
+ int error = posix_acl_permission(inode, acl, mask);
+ posix_acl_release(acl);
+ return error;
}
- if ((mode & mask & S_IRWXO) == mask)
- return 0;
-check_capabilities:
- /* Allowed to override Discretionary Access Control? */
- if (!(mask & MAY_EXEC) ||
- (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
- if (capable(CAP_DAC_OVERRIDE))
- return 0;
- /* Read and search granted if capable(CAP_DAC_READ_SEARCH) */
- if (capable(CAP_DAC_READ_SEARCH) && ((mask == MAY_READ) ||
- (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))))
- return 0;
- return -EACCES;
+ return -EAGAIN;
+}
+
+int
+ext3_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+ return generic_permission(inode, mask, ext3_check_acl);
}
/*
@@ -452,27 +416,27 @@ out:
* Extended attribute handlers
*/
static size_t
-ext3_xattr_list_acl_access(char *list, struct inode *inode,
- const char *name, int name_len)
+ext3_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len,
+ const char *name, size_t name_len)
{
const size_t size = sizeof(XATTR_NAME_ACL_ACCESS);
if (!test_opt(inode->i_sb, POSIX_ACL))
return 0;
- if (list)
+ if (list && size <= list_len)
memcpy(list, XATTR_NAME_ACL_ACCESS, size);
return size;
}
static size_t
-ext3_xattr_list_acl_default(char *list, struct inode *inode,
- const char *name, int name_len)
+ext3_xattr_list_acl_default(struct inode *inode, char *list, size_t list_len,
+ const char *name, size_t name_len)
{
const size_t size = sizeof(XATTR_NAME_ACL_DEFAULT);
if (!test_opt(inode->i_sb, POSIX_ACL))
return 0;
- if (list)
+ if (list && size <= list_len)
memcpy(list, XATTR_NAME_ACL_DEFAULT, size);
return size;
}
@@ -572,45 +536,16 @@ ext3_xattr_set_acl_default(struct inode *inode, const char *name,
return ext3_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
}
-struct ext3_xattr_handler ext3_xattr_acl_access_handler = {
+struct xattr_handler ext3_xattr_acl_access_handler = {
.prefix = XATTR_NAME_ACL_ACCESS,
.list = ext3_xattr_list_acl_access,
.get = ext3_xattr_get_acl_access,
.set = ext3_xattr_set_acl_access,
};
-struct ext3_xattr_handler ext3_xattr_acl_default_handler = {
+struct xattr_handler ext3_xattr_acl_default_handler = {
.prefix = XATTR_NAME_ACL_DEFAULT,
.list = ext3_xattr_list_acl_default,
.get = ext3_xattr_get_acl_default,
.set = ext3_xattr_set_acl_default,
};
-
-void
-exit_ext3_acl(void)
-{
- ext3_xattr_unregister(EXT3_XATTR_INDEX_POSIX_ACL_ACCESS,
- &ext3_xattr_acl_access_handler);
- ext3_xattr_unregister(EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT,
- &ext3_xattr_acl_default_handler);
-}
-
-int __init
-init_ext3_acl(void)
-{
- int error;
-
- error = ext3_xattr_register(EXT3_XATTR_INDEX_POSIX_ACL_ACCESS,
- &ext3_xattr_acl_access_handler);
- if (error)
- goto fail;
- error = ext3_xattr_register(EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT,
- &ext3_xattr_acl_default_handler);
- if (error)
- goto fail;
- return 0;
-
-fail:
- exit_ext3_acl();
- return error;
-}
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 50ba6861a247..fb39ecb44b3b 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -132,10 +132,12 @@ struct file_operations ext3_file_operations = {
struct inode_operations ext3_file_inode_operations = {
.truncate = ext3_truncate,
.setattr = ext3_setattr,
- .setxattr = ext3_setxattr,
- .getxattr = ext3_getxattr,
+#ifdef CONFIG_EXT3_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext3_listxattr,
- .removexattr = ext3_removexattr,
+ .removexattr = generic_removexattr,
+#endif
.permission = ext3_permission,
};
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 0e48f620d9e8..cf7225964a33 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1596,9 +1596,14 @@ out_stop:
if (end > inode->i_size) {
ei->i_disksize = end;
i_size_write(inode, end);
- err = ext3_mark_inode_dirty(handle, inode);
- if (!ret)
- ret = err;
+ /*
+ * We're going to return a positive `ret'
+ * here due to non-zero-length I/O, so there's
+ * no way of reporting error returns from
+ * ext3_mark_inode_dirty() to userspace. So
+ * ignore it.
+ */
+ ext3_mark_inode_dirty(handle, inode);
}
}
err = ext3_journal_stop(handle);
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index e7e287e19cfe..cf359ec81ed6 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -2352,18 +2352,22 @@ struct inode_operations ext3_dir_inode_operations = {
.mknod = ext3_mknod,
.rename = ext3_rename,
.setattr = ext3_setattr,
- .setxattr = ext3_setxattr,
- .getxattr = ext3_getxattr,
+#ifdef CONFIG_EXT3_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext3_listxattr,
- .removexattr = ext3_removexattr,
+ .removexattr = generic_removexattr,
+#endif
.permission = ext3_permission,
};
struct inode_operations ext3_special_inode_operations = {
.setattr = ext3_setattr,
- .setxattr = ext3_setxattr,
- .getxattr = ext3_getxattr,
+#ifdef CONFIG_EXT3_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext3_listxattr,
- .removexattr = ext3_removexattr,
+ .removexattr = generic_removexattr,
+#endif
.permission = ext3_permission,
};
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index a93964e67a2c..64d8840a6b7d 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1474,6 +1474,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
*/
sb->s_op = &ext3_sops;
sb->s_export_op = &ext3_export_ops;
+ sb->s_xattr = ext3_xattr_handlers;
#ifdef CONFIG_QUOTA
sb->s_qcop = &ext3_qctl_operations;
sb->dq_op = &ext3_quota_operations;
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
index 867f713a102c..8c3e72818fb0 100644
--- a/fs/ext3/symlink.c
+++ b/fs/ext3/symlink.c
@@ -34,17 +34,21 @@ struct inode_operations ext3_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
- .setxattr = ext3_setxattr,
- .getxattr = ext3_getxattr,
+#ifdef CONFIG_EXT3_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext3_listxattr,
- .removexattr = ext3_removexattr,
+ .removexattr = generic_removexattr,
+#endif
};
struct inode_operations ext3_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ext3_follow_link,
- .setxattr = ext3_setxattr,
- .getxattr = ext3_getxattr,
+#ifdef CONFIG_EXT3_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
.listxattr = ext3_listxattr,
- .removexattr = ext3_removexattr,
+ .removexattr = generic_removexattr,
+#endif
};
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index e6df2ba425d6..b06cd4de6830 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -7,6 +7,8 @@
* Ext3 code with a lot of help from Eric Jarman <ejarman@acm.org>.
* Extended attributes for symlinks and special files added per
* suggestion of Luka Renko <luka.renko@hermes.si>.
+ * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
+ * Red Hat Inc.
*/
/*
@@ -100,101 +102,40 @@ static void ext3_xattr_rehash(struct ext3_xattr_header *,
struct ext3_xattr_entry *);
static struct mb_cache *ext3_xattr_cache;
-static struct ext3_xattr_handler *ext3_xattr_handlers[EXT3_XATTR_INDEX_MAX];
-static rwlock_t ext3_handler_lock = RW_LOCK_UNLOCKED;
-int
-ext3_xattr_register(int name_index, struct ext3_xattr_handler *handler)
-{
- int error = -EINVAL;
-
- if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) {
- write_lock(&ext3_handler_lock);
- if (!ext3_xattr_handlers[name_index-1]) {
- ext3_xattr_handlers[name_index-1] = handler;
- error = 0;
- }
- write_unlock(&ext3_handler_lock);
- }
- return error;
-}
-
-void
-ext3_xattr_unregister(int name_index, struct ext3_xattr_handler *handler)
-{
- if (name_index > 0 || name_index <= EXT3_XATTR_INDEX_MAX) {
- write_lock(&ext3_handler_lock);
- ext3_xattr_handlers[name_index-1] = NULL;
- write_unlock(&ext3_handler_lock);
- }
-}
-
-static inline const char *
-strcmp_prefix(const char *a, const char *a_prefix)
-{
- while (*a_prefix && *a == *a_prefix) {
- a++;
- a_prefix++;
- }
- return *a_prefix ? NULL : a;
-}
+static struct xattr_handler *ext3_xattr_handler_map[EXT3_XATTR_INDEX_MAX] = {
+ [EXT3_XATTR_INDEX_USER] = &ext3_xattr_user_handler,
+#ifdef CONFIG_EXT3_FS_POSIX_ACL
+ [EXT3_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext3_xattr_acl_access_handler,
+ [EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext3_xattr_acl_default_handler,
+#endif
+ [EXT3_XATTR_INDEX_TRUSTED] = &ext3_xattr_trusted_handler,
+#ifdef CONFIG_EXT3_FS_SECURITY
+ [EXT3_XATTR_INDEX_SECURITY] = &ext3_xattr_security_handler,
+#endif
+};
-/*
- * Decode the extended attribute name, and translate it into
- * the name_index and name suffix.
- */
-static inline struct ext3_xattr_handler *
-ext3_xattr_resolve_name(const char **name)
-{
- struct ext3_xattr_handler *handler = NULL;
- int i;
-
- if (!*name)
- return NULL;
- read_lock(&ext3_handler_lock);
- for (i=0; i<EXT3_XATTR_INDEX_MAX; i++) {
- if (ext3_xattr_handlers[i]) {
- const char *n = strcmp_prefix(*name,
- ext3_xattr_handlers[i]->prefix);
- if (n) {
- handler = ext3_xattr_handlers[i];
- *name = n;
- break;
- }
- }
- }
- read_unlock(&ext3_handler_lock);
- return handler;
-}
+struct xattr_handler *ext3_xattr_handlers[] = {
+ &ext3_xattr_user_handler,
+ &ext3_xattr_trusted_handler,
+#ifdef CONFIG_EXT3_FS_POSIX_ACL
+ &ext3_xattr_acl_access_handler,
+ &ext3_xattr_acl_default_handler,
+#endif
+#ifdef CONFIG_EXT3_FS_SECURITY
+ &ext3_xattr_security_handler,
+#endif
+ NULL
+};
-static inline struct ext3_xattr_handler *
+static inline struct xattr_handler *
ext3_xattr_handler(int name_index)
{
- struct ext3_xattr_handler *handler = NULL;
- if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) {
- read_lock(&ext3_handler_lock);
- handler = ext3_xattr_handlers[name_index-1];
- read_unlock(&ext3_handler_lock);
- }
- return handler;
-}
-
-/*
- * Inode operation getxattr()
- *
- * dentry->d_inode->i_sem: don't care
- */
-ssize_t
-ext3_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size)
-{
- struct ext3_xattr_handler *handler;
- struct inode *inode = dentry->d_inode;
+ struct xattr_handler *handler = NULL;
- handler = ext3_xattr_resolve_name(&name);
- if (!handler)
- return -EOPNOTSUPP;
- return handler->get(inode, name, buffer, size);
+ if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX)
+ handler = ext3_xattr_handler_map[name_index];
+ return handler;
}
/*
@@ -209,43 +150,6 @@ ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
}
/*
- * Inode operation setxattr()
- *
- * dentry->d_inode->i_sem: down
- */
-int
-ext3_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
-{
- struct ext3_xattr_handler *handler;
- struct inode *inode = dentry->d_inode;
-
- if (size == 0)
- value = ""; /* empty EA, do not remove */
- handler = ext3_xattr_resolve_name(&name);
- if (!handler)
- return -EOPNOTSUPP;
- return handler->set(inode, name, value, size, flags);
-}
-
-/*
- * Inode operation removexattr()
- *
- * dentry->d_inode->i_sem: down
- */
-int
-ext3_removexattr(struct dentry *dentry, const char *name)
-{
- struct ext3_xattr_handler *handler;
- struct inode *inode = dentry->d_inode;
-
- handler = ext3_xattr_resolve_name(&name);
- if (!handler)
- return -EOPNOTSUPP;
- return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
-}
-
-/*
* ext3_xattr_get()
*
* Copy an extended attribute into the buffer
@@ -363,8 +267,8 @@ ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
{
struct buffer_head *bh = NULL;
struct ext3_xattr_entry *entry;
- size_t size = 0;
- char *buf, *end;
+ char *end;
+ size_t rest = buffer_size;
int error;
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
@@ -390,44 +294,40 @@ bad_block: ext3_error(inode->i_sb, "ext3_xattr_list",
error = -EIO;
goto cleanup;
}
- /* compute the size required for the list of attribute names */
- for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
- entry = EXT3_XATTR_NEXT(entry)) {
- struct ext3_xattr_handler *handler;
- struct ext3_xattr_entry *next =
- EXT3_XATTR_NEXT(entry);
+
+ /* check the on-disk data structure */
+ entry = FIRST_ENTRY(bh);
+ while (!IS_LAST_ENTRY(entry)) {
+ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(entry);
+
if ((char *)next >= end)
goto bad_block;
-
- handler = ext3_xattr_handler(entry->e_name_index);
- if (handler)
- size += handler->list(NULL, inode, entry->e_name,
- entry->e_name_len);
+ entry = next;
}
-
if (ext3_xattr_cache_insert(bh))
ea_idebug(inode, "cache insert failed");
- if (!buffer) {
- error = size;
- goto cleanup;
- } else {
- error = -ERANGE;
- if (size > buffer_size)
- goto cleanup;
- }
/* list the attribute names */
- buf = buffer;
for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
entry = EXT3_XATTR_NEXT(entry)) {
- struct ext3_xattr_handler *handler;
-
- handler = ext3_xattr_handler(entry->e_name_index);
- if (handler)
- buf += handler->list(buf, inode, entry->e_name,
- entry->e_name_len);
+ struct xattr_handler *handler =
+ ext3_xattr_handler(entry->e_name_index);
+
+ if (handler) {
+ size_t size = handler->list(inode, buffer, rest,
+ entry->e_name,
+ entry->e_name_len);
+ if (buffer) {
+ if (size > rest) {
+ error = -ERANGE;
+ goto cleanup;
+ }
+ buffer += size;
+ }
+ rest -= size;
+ }
}
- error = size;
+ error = buffer_size - rest; /* total size */
cleanup:
brelse(bh);
@@ -1179,51 +1079,12 @@ static void ext3_xattr_rehash(struct ext3_xattr_header *header,
int __init
init_ext3_xattr(void)
{
- int err;
-
- err = ext3_xattr_register(EXT3_XATTR_INDEX_USER,
- &ext3_xattr_user_handler);
- if (err)
- return err;
- err = ext3_xattr_register(EXT3_XATTR_INDEX_TRUSTED,
- &ext3_xattr_trusted_handler);
- if (err)
- goto out;
-#ifdef CONFIG_EXT3_FS_SECURITY
- err = ext3_xattr_register(EXT3_XATTR_INDEX_SECURITY,
- &ext3_xattr_security_handler);
- if (err)
- goto out1;
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
- err = init_ext3_acl();
- if (err)
- goto out2;
-#endif
ext3_xattr_cache = mb_cache_create("ext3_xattr", NULL,
sizeof(struct mb_cache_entry) +
sizeof(struct mb_cache_entry_index), 1, 6);
- if (!ext3_xattr_cache) {
- err = -ENOMEM;
- goto out3;
- }
+ if (!ext3_xattr_cache)
+ return -ENOMEM;
return 0;
-out3:
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
- exit_ext3_acl();
-out2:
-#endif
-#ifdef CONFIG_EXT3_FS_SECURITY
- ext3_xattr_unregister(EXT3_XATTR_INDEX_SECURITY,
- &ext3_xattr_security_handler);
-out1:
-#endif
- ext3_xattr_unregister(EXT3_XATTR_INDEX_TRUSTED,
- &ext3_xattr_trusted_handler);
-out:
- ext3_xattr_unregister(EXT3_XATTR_INDEX_USER,
- &ext3_xattr_user_handler);
- return err;
}
void
@@ -1232,15 +1093,4 @@ exit_ext3_xattr(void)
if (ext3_xattr_cache)
mb_cache_destroy(ext3_xattr_cache);
ext3_xattr_cache = NULL;
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
- exit_ext3_acl();
-#endif
-#ifdef CONFIG_EXT3_FS_SECURITY
- ext3_xattr_unregister(EXT3_XATTR_INDEX_SECURITY,
- &ext3_xattr_security_handler);
-#endif
- ext3_xattr_unregister(EXT3_XATTR_INDEX_TRUSTED,
- &ext3_xattr_trusted_handler);
- ext3_xattr_unregister(EXT3_XATTR_INDEX_USER,
- &ext3_xattr_user_handler);
}
diff --git a/fs/ext3/xattr.h b/fs/ext3/xattr.h
index 2aabe9f591d2..6ca51c0e65fa 100644
--- a/fs/ext3/xattr.h
+++ b/fs/ext3/xattr.h
@@ -56,23 +56,13 @@ struct ext3_xattr_entry {
# ifdef CONFIG_EXT3_FS_XATTR
-struct ext3_xattr_handler {
- char *prefix;
- size_t (*list)(char *list, struct inode *inode, const char *name,
- int name_len);
- int (*get)(struct inode *inode, const char *name, void *buffer,
- size_t size);
- int (*set)(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags);
-};
-
-extern int ext3_xattr_register(int, struct ext3_xattr_handler *);
-extern void ext3_xattr_unregister(int, struct ext3_xattr_handler *);
+extern struct xattr_handler ext3_xattr_user_handler;
+extern struct xattr_handler ext3_xattr_trusted_handler;
+extern struct xattr_handler ext3_xattr_acl_access_handler;
+extern struct xattr_handler ext3_xattr_acl_default_handler;
+extern struct xattr_handler ext3_xattr_security_handler;
-extern int ext3_setxattr(struct dentry *, const char *, const void *, size_t, int);
-extern ssize_t ext3_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
-extern int ext3_removexattr(struct dentry *, const char *);
extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
extern int ext3_xattr_list(struct inode *, char *, size_t);
@@ -85,11 +75,9 @@ extern void ext3_xattr_put_super(struct super_block *);
extern int init_ext3_xattr(void);
extern void exit_ext3_xattr(void);
+extern struct xattr_handler *ext3_xattr_handlers[];
+
# else /* CONFIG_EXT3_FS_XATTR */
-# define ext3_setxattr NULL
-# define ext3_getxattr NULL
-# define ext3_listxattr NULL
-# define ext3_removexattr NULL
static inline int
ext3_xattr_get(struct inode *inode, int name_index, const char *name,
@@ -139,8 +127,6 @@ exit_ext3_xattr(void)
{
}
-# endif /* CONFIG_EXT3_FS_XATTR */
+#define ext3_xattr_handlers NULL
-extern struct ext3_xattr_handler ext3_xattr_user_handler;
-extern struct ext3_xattr_handler ext3_xattr_trusted_handler;
-extern struct ext3_xattr_handler ext3_xattr_security_handler;
+# endif /* CONFIG_EXT3_FS_XATTR */
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index c948effaa257..ddc1c41750e1 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -12,17 +12,19 @@
#include "xattr.h"
static size_t
-ext3_xattr_security_list(char *list, struct inode *inode,
- const char *name, int name_len)
+ext3_xattr_security_list(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
{
- const int prefix_len = sizeof(XATTR_SECURITY_PREFIX)-1;
+ const size_t prefix_len = sizeof(XATTR_SECURITY_PREFIX)-1;
+ const size_t total_len = prefix_len + name_len + 1;
- if (list) {
+
+ if (list && total_len <= list_size) {
memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
- return prefix_len + name_len + 1;
+ return total_len;
}
static int
@@ -45,7 +47,7 @@ ext3_xattr_security_set(struct inode *inode, const char *name,
value, size, flags);
}
-struct ext3_xattr_handler ext3_xattr_security_handler = {
+struct xattr_handler ext3_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext3_xattr_security_list,
.get = ext3_xattr_security_get,
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
index 45b1549c4491..f68bfd1cf519 100644
--- a/fs/ext3/xattr_trusted.c
+++ b/fs/ext3/xattr_trusted.c
@@ -16,20 +16,21 @@
#define XATTR_TRUSTED_PREFIX "trusted."
static size_t
-ext3_xattr_trusted_list(char *list, struct inode *inode,
- const char *name, int name_len)
+ext3_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
{
- const int prefix_len = sizeof(XATTR_TRUSTED_PREFIX)-1;
+ const size_t prefix_len = sizeof(XATTR_TRUSTED_PREFIX)-1;
+ const size_t total_len = prefix_len + name_len + 1;
if (!capable(CAP_SYS_ADMIN))
return 0;
- if (list) {
+ if (list && total_len <= list_size) {
memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
- return prefix_len + name_len + 1;
+ return total_len;
}
static int
@@ -56,7 +57,7 @@ ext3_xattr_trusted_set(struct inode *inode, const char *name,
value, size, flags);
}
-struct ext3_xattr_handler ext3_xattr_trusted_handler = {
+struct xattr_handler ext3_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.list = ext3_xattr_trusted_list,
.get = ext3_xattr_trusted_get,
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
index 84877afff67a..e907cae7a07c 100644
--- a/fs/ext3/xattr_user.c
+++ b/fs/ext3/xattr_user.c
@@ -16,20 +16,21 @@
#define XATTR_USER_PREFIX "user."
static size_t
-ext3_xattr_user_list(char *list, struct inode *inode,
- const char *name, int name_len)
+ext3_xattr_user_list(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len)
{
- const int prefix_len = sizeof(XATTR_USER_PREFIX)-1;
+ const size_t prefix_len = sizeof(XATTR_USER_PREFIX)-1;
+ const size_t total_len = prefix_len + name_len + 1;
if (!test_opt(inode->i_sb, XATTR_USER))
return 0;
- if (list) {
+ if (list && total_len <= list_size) {
memcpy(list, XATTR_USER_PREFIX, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
- return prefix_len + name_len + 1;
+ return total_len;
}
static int
@@ -70,7 +71,7 @@ ext3_xattr_user_set(struct inode *inode, const char *name,
value, size, flags);
}
-struct ext3_xattr_handler ext3_xattr_user_handler = {
+struct xattr_handler ext3_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.list = ext3_xattr_user_list,
.get = ext3_xattr_user_get,
diff --git a/fs/fcntl.c b/fs/fcntl.c
index ee380e7b9569..da78d7bba880 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -291,8 +291,6 @@ void f_delown(struct file *filp)
f_modown(filp, 0, 0, 0, 1);
}
-EXPORT_SYMBOL(f_delown);
-
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
struct file *filp)
{
diff --git a/fs/file_table.c b/fs/file_table.c
index 3750a140ef43..e9ce5279ef6d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -24,11 +24,9 @@ struct files_stat_struct files_stat = {
EXPORT_SYMBOL(files_stat); /* Needed by unix.o */
-/* public *and* exported. Not pretty! */
+/* public. Not pretty! */
spinlock_t __cacheline_aligned_in_smp files_lock = SPIN_LOCK_UNLOCKED;
-EXPORT_SYMBOL(files_lock);
-
static spinlock_t filp_count_lock = SPIN_LOCK_UNLOCKED;
/* slab constructors and destructors are called from arbitrary
@@ -199,8 +197,6 @@ void put_filp(struct file *file)
}
}
-EXPORT_SYMBOL(put_filp);
-
void file_move(struct file *file, struct list_head *list)
{
if (!list)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index da522d511f2d..969e9b0e0afc 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -244,6 +244,8 @@ static int
__writeback_single_inode(struct inode *inode,
struct writeback_control *wbc)
{
+ wait_queue_head_t *wqh;
+
if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) {
list_move(&inode->i_list, &inode->i_sb->s_dirty);
return 0;
@@ -252,12 +254,18 @@ __writeback_single_inode(struct inode *inode,
/*
* It's a data-integrity sync. We must wait.
*/
- while (inode->i_state & I_LOCK) {
- __iget(inode);
- spin_unlock(&inode_lock);
- __wait_on_inode(inode);
- iput(inode);
- spin_lock(&inode_lock);
+ if (inode->i_state & I_LOCK) {
+ DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LOCK);
+
+ wqh = bit_waitqueue(&inode->i_state, __I_LOCK);
+ do {
+ __iget(inode);
+ spin_unlock(&inode_lock);
+ __wait_on_bit(wqh, &wq, inode_wait,
+ TASK_UNINTERRUPTIBLE);
+ iput(inode);
+ spin_lock(&inode_lock);
+ } while (inode->i_state & I_LOCK);
}
return __sync_single_inode(inode, wbc);
}
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 6c869f377964..ae55ce1a5405 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -517,7 +517,7 @@ static int hfs_permission(struct inode *inode, int mask,
{
if (S_ISREG(inode->i_mode) && mask & MAY_EXEC)
return 0;
- return vfs_permission(inode, mask);
+ return generic_permission(inode, mask, NULL);
}
static int hfs_file_open(struct inode *inode, struct file *file)
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index eff1c987b6fb..f58025cb38af 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -260,7 +260,7 @@ static int hfsplus_permission(struct inode *inode, int mask, struct nameidata *n
*/
if (S_ISREG(inode->i_mode) && mask & MAY_EXEC && !(inode->i_mode & 0111))
return 0;
- return vfs_permission(inode, mask);
+ return generic_permission(inode, mask, NULL);
}
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 7d42da0a304d..937fac26b34a 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -808,7 +808,7 @@ int hostfs_permission(struct inode *ino, int desired, struct nameidata *nd)
if(name == NULL) return(-ENOMEM);
err = access_file(name, r, w, x);
kfree(name);
- if(!err) err = vfs_permission(ino, desired);
+ if(!err) err = generic_permission(ino, desired, NULL);
return(err);
}
diff --git a/fs/inode.c b/fs/inode.c
index 8cd74200bdff..1fa7de5e8f84 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1264,37 +1264,10 @@ void remove_dquot_ref(struct super_block *sb, int type, struct list_head *tofree
#endif
-/*
- * Hashed waitqueues for wait_on_inode(). The table is pretty small - the
- * kernel doesn't lock many inodes at the same time.
- */
-#define I_WAIT_TABLE_ORDER 3
-static struct i_wait_queue_head {
- wait_queue_head_t wqh;
-} ____cacheline_aligned_in_smp i_wait_queue_heads[1<<I_WAIT_TABLE_ORDER];
-
-/*
- * Return the address of the waitqueue_head to be used for this inode
- */
-static wait_queue_head_t *i_waitq_head(struct inode *inode)
-{
- return &i_wait_queue_heads[hash_ptr(inode, I_WAIT_TABLE_ORDER)].wqh;
-}
-
-void __wait_on_inode(struct inode *inode)
+int inode_wait(void *word)
{
- DECLARE_WAITQUEUE(wait, current);
- wait_queue_head_t *wq = i_waitq_head(inode);
-
- add_wait_queue(wq, &wait);
-repeat:
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (inode->i_state & I_LOCK) {
- schedule();
- goto repeat;
- }
- remove_wait_queue(wq, &wait);
- __set_current_state(TASK_RUNNING);
+ schedule();
+ return 0;
}
/*
@@ -1303,36 +1276,39 @@ repeat:
* that it isn't found. This is because iget will immediately call
* ->read_inode, and we want to be sure that evidence of the deletion is found
* by ->read_inode.
- *
- * This call might return early if an inode which shares the waitq is woken up.
- * This is most easily handled by the caller which will loop around again
- * looking for the inode.
- *
* This is called with inode_lock held.
*/
static void __wait_on_freeing_inode(struct inode *inode)
{
- DECLARE_WAITQUEUE(wait, current);
- wait_queue_head_t *wq = i_waitq_head(inode);
+ wait_queue_head_t *wq;
+ DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
- add_wait_queue(wq, &wait);
- set_current_state(TASK_UNINTERRUPTIBLE);
+ /*
+ * I_FREEING and I_CLEAR are cleared in process context under
+ * inode_lock, so we have to give the tasks who would clear them
+ * a chance to run and acquire inode_lock.
+ */
+ if (!(inode->i_state & I_LOCK)) {
+ spin_unlock(&inode_lock);
+ yield();
+ spin_lock(&inode_lock);
+ return;
+ }
+ wq = bit_waitqueue(&inode->i_state, __I_LOCK);
+ prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode_lock);
schedule();
- remove_wait_queue(wq, &wait);
+ finish_wait(wq, &wait.wait);
spin_lock(&inode_lock);
}
void wake_up_inode(struct inode *inode)
{
- wait_queue_head_t *wq = i_waitq_head(inode);
-
/*
* Prevent speculative execution through spin_unlock(&inode_lock);
*/
smp_mb();
- if (waitqueue_active(wq))
- wake_up_all(wq);
+ wake_up_bit(&inode->i_state, __I_LOCK);
}
static __initdata unsigned long ihash_entries;
@@ -1367,11 +1343,6 @@ void __init inode_init_early(void)
void __init inode_init(unsigned long mempages)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(i_wait_queue_heads); i++)
- init_waitqueue_head(&i_wait_queue_heads[i].wqh);
-
/* inode slab cache */
inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
0, SLAB_PANIC, init_once, NULL);
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index f8a1dea56611..b4d6654ef7f2 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -579,7 +579,7 @@ wait_for_iobuf:
journal_file_buffer(jh, commit_transaction, BJ_Forget);
/* Wake up any transactions which were waiting for this
IO to complete */
- wake_up_buffer(bh);
+ wake_up_bit(&bh->b_state, BH_Unshadow);
JBUFFER_TRACE(jh, "brelse shadowed buffer");
__brelse(bh);
}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 18a678ce2591..a168757d26af 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -633,21 +633,22 @@ repeat:
* disk then we cannot do copy-out here. */
if (jh->b_jlist == BJ_Shadow) {
+ DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
wait_queue_head_t *wqh;
- DEFINE_WAIT(wait);
+
+ wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
JBUFFER_TRACE(jh, "on shadow: sleep");
jbd_unlock_bh_state(bh);
/* commit wakes up all shadow buffers after IO */
- wqh = bh_waitq_head(bh);
for ( ; ; ) {
- prepare_to_wait(wqh, &wait,
+ prepare_to_wait(wqh, &wait.wait,
TASK_UNINTERRUPTIBLE);
if (jh->b_jlist != BJ_Shadow)
break;
schedule();
}
- finish_wait(wqh, &wait);
+ finish_wait(wqh, &wait.wait);
goto repeat;
}
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index fc584f2194f4..8d2a9ab981d4 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -123,88 +123,25 @@ out:
return rc;
}
-/*
- * jfs_permission()
- *
- * modified vfs_permission to check posix acl
- */
-int jfs_permission(struct inode * inode, int mask, struct nameidata *nd)
+static int jfs_check_acl(struct inode *inode, int mask)
{
- umode_t mode = inode->i_mode;
struct jfs_inode_info *ji = JFS_IP(inode);
- if (mask & MAY_WRITE) {
- /*
- * Nobody gets write access to a read-only fs.
- */
- if (IS_RDONLY(inode) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- return -EROFS;
-
- /*
- * Nobody gets write access to an immutable file.
- */
- if (IS_IMMUTABLE(inode))
- return -EACCES;
- }
-
- if (current->fsuid == inode->i_uid) {
- mode >>= 6;
- goto check_mode;
- }
- /*
- * ACL can't contain additional permissions if the ACL_MASK entry
- * is zero.
- */
- if (!(mode & S_IRWXG))
- goto check_groups;
-
if (ji->i_acl == JFS_ACL_NOT_CACHED) {
- struct posix_acl *acl;
-
- acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
-
+ struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
posix_acl_release(acl);
}
- if (ji->i_acl) {
- int rc = posix_acl_permission(inode, ji->i_acl, mask);
- if (rc == -EACCES)
- goto check_capabilities;
- return rc;
- }
-
-check_groups:
- if (in_group_p(inode->i_gid))
- mode >>= 3;
-
-check_mode:
- /*
- * If the DACs are ok we don't need any capability check.
- */
- if (((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask))
- return 0;
+ if (ji->i_acl)
+ return posix_acl_permission(inode, ji->i_acl, mask);
+ return -EAGAIN;
+}
-check_capabilities:
- /*
- * Read/write DACs are always overridable.
- * Executable DACs are overridable if at least one exec bit is set.
- */
- if (!(mask & MAY_EXEC) ||
- (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
- if (capable(CAP_DAC_OVERRIDE))
- return 0;
-
- /*
- * Searching includes executable on directories, else just read.
- */
- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
- if (capable(CAP_DAC_READ_SEARCH))
- return 0;
-
- return -EACCES;
+int jfs_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+ return generic_permission(inode, mask, jfs_check_acl);
}
int jfs_init_acl(struct inode *inode, struct inode *dir)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 7c91ccfe382f..ecf1bca73050 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -403,6 +403,10 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
}
sbi->flag = flag;
+#ifdef CONFIG_JFS_POSIX_ACL
+ sb->s_flags |= MS_POSIXACL;
+#endif
+
if (newLVSize) {
printk(KERN_ERR "resize option for remount only\n");
return -EINVAL;
diff --git a/fs/mbcache.c b/fs/mbcache.c
index dbc4443e6949..988161cb0a77 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -65,9 +65,7 @@ EXPORT_SYMBOL(mb_cache_destroy);
EXPORT_SYMBOL(mb_cache_entry_alloc);
EXPORT_SYMBOL(mb_cache_entry_insert);
EXPORT_SYMBOL(mb_cache_entry_release);
-EXPORT_SYMBOL(mb_cache_entry_takeout);
EXPORT_SYMBOL(mb_cache_entry_free);
-EXPORT_SYMBOL(mb_cache_entry_dup);
EXPORT_SYMBOL(mb_cache_entry_get);
#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
EXPORT_SYMBOL(mb_cache_entry_find_first);
@@ -456,23 +454,6 @@ mb_cache_entry_release(struct mb_cache_entry *ce)
/*
- * mb_cache_entry_takeout()
- *
- * Take a cache entry out of the cache, making it invalid. The entry can later
- * be re-inserted using mb_cache_entry_insert(), or released using
- * mb_cache_entry_release().
- */
-void
-mb_cache_entry_takeout(struct mb_cache_entry *ce)
-{
- spin_lock(&mb_cache_spinlock);
- mb_assert(list_empty(&ce->e_lru_list));
- __mb_cache_entry_unhash(ce);
- spin_unlock(&mb_cache_spinlock);
-}
-
-
-/*
* mb_cache_entry_free()
*
* This is equivalent to the sequence mb_cache_entry_takeout() --
@@ -489,20 +470,6 @@ mb_cache_entry_free(struct mb_cache_entry *ce)
/*
- * mb_cache_entry_dup()
- *
- * Duplicate a handle to a cache entry (does not duplicate the cache entry
- * itself). After the call, both the old and the new handle must be released.
- */
-struct mb_cache_entry *
-mb_cache_entry_dup(struct mb_cache_entry *ce)
-{
- atomic_inc(&ce->e_used);
- return ce;
-}
-
-
-/*
* mb_cache_entry_get()
*
* Get a cache entry by device / block number. (There can only be one entry
diff --git a/fs/namei.c b/fs/namei.c
index b00bcaa91ee2..287339bb7b7c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -152,15 +152,19 @@ char * getname(const char __user * filename)
return result;
}
-/*
- * vfs_permission()
+/**
+ * generic_permission - check for access rights on a Posix-like filesystem
+ * @inode: inode to check access rights for
+ * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
+ * @check_acl: optional callback to check for Posix ACLs
*
- * is used to check for read/write/execute permissions on a file.
+ * Used to check for read/write/execute permissions on a file.
* We use "fsuid" for this, letting us set arbitrary permissions
* for filesystem access without changing the "normal" uids which
* are used for other things..
*/
-int vfs_permission(struct inode * inode, int mask)
+int generic_permission(struct inode *inode, int mask,
+ int (*check_acl)(struct inode *inode, int mask))
{
umode_t mode = inode->i_mode;
@@ -181,8 +185,18 @@ int vfs_permission(struct inode * inode, int mask)
if (current->fsuid == inode->i_uid)
mode >>= 6;
- else if (in_group_p(inode->i_gid))
- mode >>= 3;
+ else {
+ if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) {
+ int error = check_acl(inode, mask);
+ if (error == -EACCES)
+ goto check_capabilities;
+ else if (error != -EAGAIN)
+ return error;
+ }
+
+ if (in_group_p(inode->i_gid))
+ mode >>= 3;
+ }
/*
* If the DACs are ok we don't need any capability check.
@@ -190,6 +204,7 @@ int vfs_permission(struct inode * inode, int mask)
if (((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask))
return 0;
+ check_capabilities:
/*
* Read/write DACs are always overridable.
* Executable DACs are overridable if at least one exec bit is set.
@@ -220,7 +235,7 @@ int permission(struct inode * inode,int mask, struct nameidata *nd)
if (inode->i_op && inode->i_op->permission)
retval = inode->i_op->permission(inode, submask, nd);
else
- retval = vfs_permission(inode, submask);
+ retval = generic_permission(inode, submask, NULL);
if (retval)
return retval;
@@ -315,7 +330,7 @@ static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name,
/*
* Short-cut version of permission(), for calling by
* path_walk(), when dcache lock is held. Combines parts
- * of permission() and vfs_permission(), and tests ONLY for
+ * of permission() and generic_permission(), and tests ONLY for
* MAY_EXEC permission.
*
* If appropriate, check DAC only. If not appropriate, or
@@ -2438,7 +2453,6 @@ EXPORT_SYMBOL(follow_up);
EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
EXPORT_SYMBOL(getname);
EXPORT_SYMBOL(lock_rename);
-EXPORT_SYMBOL(lookup_create);
EXPORT_SYMBOL(lookup_hash);
EXPORT_SYMBOL(lookup_one_len);
EXPORT_SYMBOL(page_follow_link);
@@ -2457,7 +2471,7 @@ EXPORT_SYMBOL(vfs_follow_link);
EXPORT_SYMBOL(vfs_link);
EXPORT_SYMBOL(vfs_mkdir);
EXPORT_SYMBOL(vfs_mknod);
-EXPORT_SYMBOL(vfs_permission);
+EXPORT_SYMBOL(generic_permission);
EXPORT_SYMBOL(vfs_readlink);
EXPORT_SYMBOL(vfs_rename);
EXPORT_SYMBOL(vfs_rmdir);
diff --git a/fs/namespace.c b/fs/namespace.c
index 961b6ae00458..b09b08807f2c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1209,8 +1209,6 @@ void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt,
}
}
-EXPORT_SYMBOL(set_fs_root);
-
/*
* Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
* It can block. Requires the big lock held.
@@ -1234,8 +1232,6 @@ void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
}
}
-EXPORT_SYMBOL(set_fs_pwd);
-
static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
{
struct task_struct *g, *p;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 626252aec95a..eac30582ff9e 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1600,7 +1600,7 @@ int nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
return res;
out_notsup:
nfs_revalidate_inode(NFS_SERVER(inode), inode);
- res = vfs_permission(inode, mask);
+ res = generic_permission(inode, mask, NULL);
unlock_kernel();
return res;
}
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index 0302c351217f..b42f7bc9592d 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -21,7 +21,129 @@ ToDo/Notes:
- Enable the code for setting the NT4 compatibility flag when we start
making NTFS 1.2 specific modifications.
-2.1.20 - Fix a stupid bug in ntfs_attr_reinit_search_ctx().
+2.1.21 - Fix some races and bugs, rewrite mft write code, add mft allocator.
+
+ - Implement extent mft record deallocation
+ fs/ntfs/mft.c::ntfs_extent_mft_record_free().
+ - Splitt runlist related functions off from attrib.[hc] to runlist.[hc].
+ - Add vol->mft_data_pos and initialize it at mount time.
+ - Rename init_runlist() to ntfs_init_runlist(), ntfs_vcn_to_lcn() to
+ ntfs_rl_vcn_to_lcn(), decompress_mapping_pairs() to
+ ntfs_mapping_pairs_decompress(), ntfs_merge_runlists() to
+ ntfs_runlists_merge() and adapt all callers.
+ - Add fs/ntfs/runlist.[hc]::ntfs_get_nr_significant_bytes(),
+ ntfs_get_size_for_mapping_pairs(), ntfs_write_significant_bytes(),
+ and ntfs_mapping_pairs_build(), adapted from libntfs.
+ - Make fs/ntfs/lcnalloc.c::ntfs_cluster_free_from_rl_nolock() not
+ static and add a declaration for it to lcnalloc.h.
+ - Add fs/ntfs/lcnalloc.h::ntfs_cluster_free_from_rl() which is a static
+ inline wrapper for ntfs_cluster_free_from_rl_nolock() which takes the
+ cluster bitmap lock for the duration of the call.
+ - Add fs/ntfs/attrib.[hc]::ntfs_attr_record_resize().
+ - Implement the equivalent of memset() for an ntfs attribute in
+ fs/ntfs/attrib.[hc]::ntfs_attr_set() and switch
+ fs/ntfs/logfile.c::ntfs_empty_logfile() to using it.
+ - Remove unnecessary casts from LCN_* constants.
+ - Implement fs/ntfs/runlist.c::ntfs_rl_truncate_nolock().
+ - Add MFT_RECORD_OLD as a copy of MFT_RECORD in fs/ntfs/layout.h and
+ change MFT_RECORD to contain the NTFS 3.1+ specific fields.
+ - Add a helper function fs/ntfs/aops.c::mark_ntfs_record_dirty() which
+ marks all buffers belonging to an ntfs record dirty, followed by
+ marking the page the ntfs record is in dirty and also marking the vfs
+ inode containing the ntfs record dirty (I_DIRTY_PAGES).
+ - Switch fs/ntfs/index.h::ntfs_index_entry_mark_dirty() to using the
+ new helper fs/ntfs/aops.c::mark_ntfs_record_dirty() and remove the no
+ longer needed fs/ntfs/index.[hc]::__ntfs_index_entry_mark_dirty().
+ - Move ntfs_{un,}map_page() from ntfs.h to aops.h and fix resulting
+ include errors.
+ - Move the typedefs for runlist_element and runlist from types.h to
+ runlist.h and fix resulting include errors.
+ - Remove unused {__,}format_mft_record() from fs/ntfs/mft.c.
+ - Modify fs/ntfs/mft.c::__mark_mft_record_dirty() to use the helper
+ mark_ntfs_record_dirty() which also changes the behaviour in that we
+ now set the buffers belonging to the mft record dirty as well as the
+ page itself.
+ - Update fs/ntfs/mft.c::write_mft_record_nolock() and sync_mft_mirror()
+ to cope with the fact that there now are dirty buffers in mft pages.
+ - Update fs/ntfs/inode.c::ntfs_write_inode() to also use the helper
+ mark_ntfs_record_dirty() and thus to set the buffers belonging to the
+ mft record dirty as well as the page itself.
+ - Fix compiler warnings on x86-64 in fs/ntfs/dir.c. (Randy Dunlap,
+ slightly modified by me)
+ - Add fs/ntfs/mft.c::try_map_mft_record() which fails with -EALREADY if
+ the mft record is already locked and otherwise behaves the same way
+ as fs/ntfs/mft.c::map_mft_record().
+ - Modify fs/ntfs/mft.c::write_mft_record_nolock() so that it only
+ writes the mft record if the buffers belonging to it are dirty.
+ Otherwise we assume that it was written out by other means already.
+ - Attempting to write outside initialized size is _not_ a bug so remove
+ the bug check from fs/ntfs/aops.c::ntfs_write_mst_block(). It is in
+ fact required to write outside initialized size when preparing to
+ extend the initialized size.
+ - Map the page instead of using page_address() before writing to it in
+ fs/ntfs/aops.c::ntfs_mft_writepage().
+ - Provide exclusion between opening an inode / mapping an mft record
+ and accessing the mft record in fs/ntfs/mft.c::ntfs_mft_writepage()
+ by setting the page not uptodate throughout ntfs_mft_writepage().
+ - Clear the page uptodate flag in fs/ntfs/aops.c::ntfs_write_mst_block()
+ to ensure noone can see the page whilst the mst fixups are applied.
+ - Add the helper fs/ntfs/mft.c::ntfs_may_write_mft_record() which
+ checks if an mft record may be written out safely obtaining any
+ necessary locks in the process. This is used by
+ fs/ntfs/aops.c::ntfs_write_mst_block().
+ - Modify fs/ntfs/aops.c::ntfs_write_mst_block() to also work for
+ writing mft records and improve its error handling in the process.
+ Now if any of the records in the page fail to be written out, all
+ other records will be written out instead of aborting completely.
+ - Remove ntfs_mft_aops and update all users to use ntfs_mst_aops.
+ - Modify fs/ntfs/inode.c::ntfs_read_locked_inode() to set the
+ ntfs_mst_aops for all inodes which are NInoMstProtected() and
+ ntfs_aops for all other inodes.
+ - Rename fs/ntfs/mft.c::sync_mft_mirror{,_umount}() to
+ ntfs_sync_mft_mirror{,_umount}() and change their parameters so they
+ no longer require an ntfs inode to be present. Update all callers.
+ - Cleanup the error handling in fs/ntfs/mft.c::ntfs_sync_mft_mirror().
+ - Clear the page uptodate flag in fs/ntfs/mft.c::ntfs_sync_mft_mirror()
+ to ensure noone can see the page whilst the mst fixups are applied.
+ - Remove the no longer needed fs/ntfs/mft.c::ntfs_mft_writepage() and
+ fs/ntfs/mft.c::try_map_mft_record().
+ - Fix callers of fs/ntfs/aops.c::mark_ntfs_record_dirty() to call it
+ with the ntfs inode which contains the page rather than the ntfs
+ inode the mft record of which is in the page.
+ - Fix race condition in fs/ntfs/inode.c::ntfs_put_inode() by moving the
+ index inode bitmap inode release code from there to
+ fs/ntfs/inode.c::ntfs_clear_big_inode(). (Thanks to Christoph
+ Hellwig for spotting this.)
+ - Fix race condition in fs/ntfs/inode.c::ntfs_put_inode() by taking the
+ inode semaphore around the code that sets ni->itype.index.bmp_ino to
+ NULL and reorganize the code to optimize it a bit. (Thanks to
+ Christoph Hellwig for spotting this.)
+ - Modify fs/ntfs/aops.c::mark_ntfs_record_dirty() to no longer take the
+ ntfs inode as a parameter as this is confusing and misleading and the
+ needed ntfs inode is available via NTFS_I(page->mapping->host).
+ Adapt all callers to this change.
+ - Modify fs/ntfs/mft.c::write_mft_record_nolock() and
+ fs/ntfs/aops.c::ntfs_write_mst_block() to only check the dirty state
+ of the first buffer in a record and to take this as the ntfs record
+ dirty state. We cannot look at the dirty state for subsequent
+ buffers because we might be racing with
+ fs/ntfs/aops.c::mark_ntfs_record_dirty().
+ - Move the static inline ntfs_init_big_inode() from fs/ntfs/inode.c to
+ inode.h and make fs/ntfs/inode.c::__ntfs_init_inode() non-static and
+ add a declaration for it to inode.h. Fix some compilation issues
+ that resulted due to #includes and header file interdependencies.
+ - Simplify setup of i_mode in fs/ntfs/inode.c::ntfs_read_locked_inode().
+ - Add helpers fs/ntfs/layout.h::MK_MREF() and MK_LE_MREF().
+ - Modify fs/ntfs/mft.c::map_extent_mft_record() to only verify the mft
+ record sequence number if it is specified (i.e. not zero).
+ - Add fs/ntfs/mft.[hc]::ntfs_mft_record_alloc() and various helper
+ functions used by it.
+ - Update Documentation/filesystems/ntfs.txt with instructions on how to
+ use the Device-Mapper driver with NTFS ftdisk/LDM raid. This removes
+ the linear raid problem with the Software RAID / MD driver when one
+ or more of the devices has an odd number of sectors.
+
+2.1.20 - Fix two stupid bugs introduced in 2.1.18 release.
- Fix stupid bug in fs/ntfs/attrib.c::ntfs_attr_reinit_search_ctx()
where we did not clear ctx->al_entry but it was still set due to
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index 28e8ac163037..99cac1cd4285 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -3,10 +3,10 @@
obj-$(CONFIG_NTFS_FS) += ntfs.o
ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
- index.o inode.o mft.o mst.o namei.o super.o sysctl.o unistr.o \
- upcase.o
+ index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
+ unistr.o upcase.o
-EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.20\"
+EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.21\"
ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index edcc9fbf6c09..101929b277a2 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -26,7 +26,15 @@
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/buffer_head.h>
-
+#include <linux/writeback.h>
+
+#include "aops.h"
+#include "attrib.h"
+#include "debug.h"
+#include "inode.h"
+#include "mft.h"
+#include "runlist.h"
+#include "types.h"
#include "ntfs.h"
/**
@@ -232,9 +240,9 @@ lock_retry_remap:
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
- lcn = ntfs_vcn_to_lcn(rl, vcn);
+ lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
- lcn = (LCN)LCN_RL_NOT_MAPPED;
+ lcn = LCN_RL_NOT_MAPPED;
/* Successful remap. */
if (lcn >= 0) {
/* Setup buffer head to correct block. */
@@ -266,7 +274,7 @@ lock_retry_remap:
}
/* Hard error, zero out region. */
SetPageError(page);
- ntfs_error(vol->sb, "ntfs_vcn_to_lcn(vcn = 0x%llx) "
+ ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn(vcn = 0x%llx) "
"failed with error code 0x%llx%s.",
(unsigned long long)vcn,
(unsigned long long)-lcn,
@@ -274,9 +282,9 @@ lock_retry_remap:
// FIXME: Depending on vol->on_errors, do something.
}
/*
- * Either iblock was outside lblock limits or ntfs_vcn_to_lcn()
- * returned error. Just zero that portion of the page and set
- * the buffer uptodate.
+ * Either iblock was outside lblock limits or
+ * ntfs_rl_vcn_to_lcn() returned error. Just zero that portion
+ * of the page and set the buffer uptodate.
*/
handle_hole:
bh->b_blocknr = -1UL;
@@ -637,9 +645,9 @@ lock_retry_remap:
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
- lcn = ntfs_vcn_to_lcn(rl, vcn);
+ lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
- lcn = (LCN)LCN_RL_NOT_MAPPED;
+ lcn = LCN_RL_NOT_MAPPED;
/* Successful remap. */
if (lcn >= 0) {
/* Setup buffer head to point to correct block. */
@@ -673,7 +681,7 @@ lock_retry_remap:
}
/* Failed to map the buffer, even after retrying. */
bh->b_blocknr = -1UL;
- ntfs_error(vol->sb, "ntfs_vcn_to_lcn(vcn = 0x%llx) failed "
+ ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn(vcn = 0x%llx) failed "
"with error code 0x%llx%s.",
(unsigned long long)vcn,
(unsigned long long)-lcn,
@@ -772,25 +780,25 @@ lock_retry_remap:
return err;
}
-static const char *ntfs_please_email = "Please email "
- "linux-ntfs-dev@lists.sourceforge.net and say that you saw "
- "this message. Thank you.";
-
/**
* ntfs_write_mst_block - write a @page to the backing store
* @wbc: writeback control structure
* @page: page cache page to write out
*
* This function is for writing pages belonging to non-resident, mst protected
- * attributes to their backing store. The only supported attribute is the
- * index allocation attribute. Both directory inodes and index inodes are
- * supported.
+ * attributes to their backing store. The only supported attributes are index
+ * allocation and $MFT/$DATA. Both directory inodes and index inodes are
+ * supported for the index allocation case.
*
* The page must remain locked for the duration of the write because we apply
* the mst fixups, write, and then undo the fixups, so if we were to unlock the
* page before undoing the fixups, any other user of the page will see the
* page contents as corrupt.
*
+ * We clear the page uptodate flag for the duration of the function to ensure
+ * exclusion for the $MFT/$DATA case against someone mapping an mft record we
+ * are about to apply the mst fixups to.
+ *
* Return 0 on success and -errno on error.
*
* Based on ntfs_write_block(), ntfs_mft_writepage(), and
@@ -805,60 +813,53 @@ static int ntfs_write_mst_block(struct writeback_control *wbc,
ntfs_volume *vol = ni->vol;
u8 *kaddr;
unsigned int bh_size = 1 << vi->i_blkbits;
- unsigned int rec_size;
- struct buffer_head *bh, *head;
+ unsigned int rec_size = ni->itype.index.block_size;
+ ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
+ struct buffer_head *bh, *head, *tbh;
int max_bhs = PAGE_CACHE_SIZE / bh_size;
struct buffer_head *bhs[max_bhs];
- int i, nr_recs, nr_bhs, bhs_per_rec, err;
- unsigned char bh_size_bits;
- BOOL rec_is_dirty;
+ int i, nr_locked_nis, nr_recs, nr_bhs, bhs_per_rec, err;
+ unsigned char bh_size_bits, rec_size_bits;
+ BOOL sync, is_mft, page_is_dirty, rec_is_dirty;
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
"0x%lx.", vi->i_ino, ni->type, page->index);
BUG_ON(!NInoNonResident(ni));
BUG_ON(!NInoMstProtected(ni));
- BUG_ON(!(S_ISDIR(vi->i_mode) ||
+ is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
+ BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
(NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
- BUG_ON(PageWriteback(page));
- BUG_ON(!PageUptodate(page));
BUG_ON(!max_bhs);
+ /* Were we called for sync purposes? */
+ sync = (wbc->sync_mode == WB_SYNC_ALL);
+
/* Make sure we have mapped buffers. */
- if (unlikely(!page_has_buffers(page))) {
-no_buffers_err_out:
- ntfs_error(vol->sb, "Writing ntfs records without existing "
- "buffers is not implemented yet. %s",
- ntfs_please_email);
- err = -EOPNOTSUPP;
- goto err_out;
- }
+ BUG_ON(!page_has_buffers(page));
bh = head = page_buffers(page);
- if (unlikely(!bh))
- goto no_buffers_err_out;
+ BUG_ON(!bh);
bh_size_bits = vi->i_blkbits;
- rec_size = ni->itype.index.block_size;
- nr_recs = PAGE_CACHE_SIZE / rec_size;
- BUG_ON(!nr_recs);
+ rec_size_bits = ni->itype.index.block_size_bits;
+ BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
bhs_per_rec = rec_size >> bh_size_bits;
BUG_ON(!bhs_per_rec);
/* The first block in the page. */
- rec_block = block = (s64)page->index <<
+ rec_block = block = (sector_t)page->index <<
(PAGE_CACHE_SHIFT - bh_size_bits);
/* The first out of bounds block for the data size. */
dblock = (vi->i_size + bh_size - 1) >> bh_size_bits;
- err = nr_bhs = 0;
- /* Need this to silence a stupid gcc warning. */
- rec_is_dirty = FALSE;
+ err = nr_bhs = nr_recs = nr_locked_nis = 0;
+ page_is_dirty = rec_is_dirty = FALSE;
do {
if (unlikely(block >= dblock)) {
/*
* Mapped buffers outside i_size will occur, because
* this page can be outside i_size when there is a
- * truncate in progress. The contents of such buffers
+ * truncate in progress. The contents of such buffers
* were zeroed by ntfs_writepage().
*
* FIXME: What about the small race window where
@@ -869,137 +870,235 @@ no_buffers_err_out:
clear_buffer_dirty(bh);
continue;
}
- if (rec_block == block) {
+ if (likely(block < rec_block)) {
+ /*
+ * This block is not the first one in the record. We
+ * ignore the buffer's dirty state because we could
+ * have raced with a parallel mark_ntfs_record_dirty().
+ */
+ if (!rec_is_dirty)
+ continue;
+ } else /* if (block == rec_block) */ {
+ BUG_ON(block > rec_block);
/* This block is the first one in the record. */
- rec_block += rec_size >> bh_size_bits;
+ rec_block += bhs_per_rec;
if (!buffer_dirty(bh)) {
- /* Clean buffers are not written out. */
+ /* Clean records are not written out. */
rec_is_dirty = FALSE;
continue;
}
rec_is_dirty = TRUE;
- } else {
- /* This block is not the first one in the record. */
- if (!buffer_dirty(bh)) {
- /* Clean buffers are not written out. */
- BUG_ON(rec_is_dirty);
- continue;
- }
- BUG_ON(!rec_is_dirty);
- }
- /* Attempting to write outside the initialized size is a bug. */
- BUG_ON(((block + 1) << bh_size_bits) > ni->initialized_size);
- if (!buffer_mapped(bh)) {
- ntfs_error(vol->sb, "Writing ntfs records without "
- "existing mapped buffers is not "
- "implemented yet. %s",
- ntfs_please_email);
- clear_buffer_dirty(bh);
- err = -EOPNOTSUPP;
- goto cleanup_out;
- }
- if (!buffer_uptodate(bh)) {
- ntfs_error(vol->sb, "Writing ntfs records without "
- "existing uptodate buffers is not "
- "implemented yet. %s",
- ntfs_please_email);
- clear_buffer_dirty(bh);
- err = -EOPNOTSUPP;
- goto cleanup_out;
}
+ BUG_ON(!buffer_mapped(bh));
+ BUG_ON(!buffer_uptodate(bh));
bhs[nr_bhs++] = bh;
BUG_ON(nr_bhs > max_bhs);
} while (block++, (bh = bh->b_this_page) != head);
/* If there were no dirty buffers, we are done. */
if (!nr_bhs)
goto done;
- /* Apply the mst protection fixups. */
- kaddr = page_address(page);
+ /* Map the page so we can access its contents. */
+ kaddr = kmap(page);
+ /* Clear the page uptodate flag whilst the mst fixups are applied. */
+ BUG_ON(!PageUptodate(page));
+ ClearPageUptodate(page);
for (i = 0; i < nr_bhs; i++) {
- if (!(i % bhs_per_rec)) {
- err = pre_write_mst_fixup((NTFS_RECORD*)(kaddr +
- bh_offset(bhs[i])), rec_size);
- if (err) {
- ntfs_error(vol->sb, "Failed to apply mst "
- "fixups (inode 0x%lx, "
- "attribute type 0x%x, page "
- "index 0x%lx)! Umount and "
- "run chkdsk.", vi->i_ino,
- ni->type,
- page->index);
- nr_bhs = i;
- goto mst_cleanup_out;
+ unsigned int ofs;
+
+ /* Skip buffers which are not at the beginning of records. */
+ if (i % bhs_per_rec)
+ continue;
+ tbh = bhs[i];
+ ofs = bh_offset(tbh);
+ if (is_mft) {
+ ntfs_inode *tni;
+ unsigned long mft_no;
+
+ /* Get the mft record number. */
+ mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+ >> rec_size_bits;
+ /* Check whether to write this mft record. */
+ tni = NULL;
+ if (!ntfs_may_write_mft_record(vol, mft_no,
+ (MFT_RECORD*)(kaddr + ofs), &tni)) {
+ /*
+ * The record should not be written. This
+ * means we need to redirty the page before
+ * returning.
+ */
+ page_is_dirty = TRUE;
+ /*
+ * Remove the buffers in this mft record from
+ * the list of buffers to write.
+ */
+ do {
+ bhs[i] = NULL;
+ } while (++i % bhs_per_rec);
+ continue;
}
+ /*
+ * The record should be written. If a locked ntfs
+ * inode was returned, add it to the array of locked
+ * ntfs inodes.
+ */
+ if (tni)
+ locked_nis[nr_locked_nis++] = tni;
}
+ /* Apply the mst protection fixups. */
+ err = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
+ rec_size);
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Failed to apply mst fixups "
+ "(inode 0x%lx, attribute type 0x%x, "
+ "page index 0x%lx, page offset 0x%x)!"
+ " Unmount and run chkdsk.", vi->i_ino,
+ ni->type, page->index, ofs);
+ /*
+ * Mark all the buffers in this record clean as we do
+ * not want to write corrupt data to disk.
+ */
+ do {
+ clear_buffer_dirty(bhs[i]);
+ bhs[i] = NULL;
+ } while (++i % bhs_per_rec);
+ continue;
+ }
+ nr_recs++;
}
+ /* If no records are to be written out, we are done. */
+ if (!nr_recs)
+ goto unm_done;
flush_dcache_page(page);
/* Lock buffers and start synchronous write i/o on them. */
for (i = 0; i < nr_bhs; i++) {
- struct buffer_head *tbh = bhs[i];
-
+ tbh = bhs[i];
+ if (!tbh)
+ continue;
if (unlikely(test_set_buffer_locked(tbh)))
BUG();
- if (unlikely(!test_clear_buffer_dirty(tbh))) {
- unlock_buffer(tbh);
- continue;
- }
+ /* The buffer dirty state is now irrelevant, just clean it. */
+ clear_buffer_dirty(tbh);
BUG_ON(!buffer_uptodate(tbh));
BUG_ON(!buffer_mapped(tbh));
get_bh(tbh);
tbh->b_end_io = end_buffer_write_sync;
submit_bh(WRITE, tbh);
}
+ /* Synchronize the mft mirror now if not @sync. */
+ if (is_mft && !sync)
+ goto do_mirror;
+do_wait:
/* Wait on i/o completion of buffers. */
for (i = 0; i < nr_bhs; i++) {
- struct buffer_head *tbh = bhs[i];
-
+ tbh = bhs[i];
+ if (!tbh)
+ continue;
wait_on_buffer(tbh);
if (unlikely(!buffer_uptodate(tbh))) {
+ ntfs_error(vol->sb, "I/O error while writing ntfs "
+ "record buffer (inode 0x%lx, "
+ "attribute type 0x%x, page index "
+ "0x%lx, page offset 0x%lx)! Unmount "
+ "and run chkdsk.", vi->i_ino, ni->type,
+ page->index, bh_offset(tbh));
err = -EIO;
/*
- * Set the buffer uptodate so the page & buffer states
- * don't become out of sync.
+ * Set the buffer uptodate so the page and buffer
+ * states do not become out of sync.
*/
- if (PageUptodate(page))
- set_buffer_uptodate(tbh);
+ set_buffer_uptodate(tbh);
}
}
+ /* If @sync, now synchronize the mft mirror. */
+ if (is_mft && sync) {
+do_mirror:
+ for (i = 0; i < nr_bhs; i++) {
+ unsigned long mft_no;
+ unsigned int ofs;
+
+ /*
+ * Skip buffers which are not at the beginning of
+ * records.
+ */
+ if (i % bhs_per_rec)
+ continue;
+ tbh = bhs[i];
+ /* Skip removed buffers (and hence records). */
+ if (!tbh)
+ continue;
+ ofs = bh_offset(tbh);
+ /* Get the mft record number. */
+ mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+ >> rec_size_bits;
+ if (mft_no < vol->mftmirr_size)
+ ntfs_sync_mft_mirror(vol, mft_no,
+ (MFT_RECORD*)(kaddr + ofs),
+ sync);
+ }
+ if (!sync)
+ goto do_wait;
+ }
/* Remove the mst protection fixups again. */
for (i = 0; i < nr_bhs; i++) {
- if (!(i % bhs_per_rec))
+ if (!(i % bhs_per_rec)) {
+ tbh = bhs[i];
+ if (!tbh)
+ continue;
post_write_mst_fixup((NTFS_RECORD*)(kaddr +
- bh_offset(bhs[i])));
+ bh_offset(tbh)));
+ }
}
flush_dcache_page(page);
+unm_done:
+ /* Unlock any locked inodes. */
+ while (nr_locked_nis-- > 0) {
+ ntfs_inode *tni, *base_tni;
+
+ tni = locked_nis[nr_locked_nis];
+ /* Get the base inode. */
+ down(&tni->extent_lock);
+ if (tni->nr_extents >= 0)
+ base_tni = tni;
+ else {
+ base_tni = tni->ext.base_ntfs_ino;
+ BUG_ON(!base_tni);
+ }
+ up(&tni->extent_lock);
+ ntfs_debug("Unlocking %s inode 0x%lx.",
+ tni == base_tni ? "base" : "extent",
+ tni->mft_no);
+ up(&tni->mrec_lock);
+ atomic_dec(&tni->count);
+ iput(VFS_I(base_tni));
+ }
if (unlikely(err)) {
- /* I/O error during writing. This is really bad! */
- ntfs_error(vol->sb, "I/O error while writing ntfs record "
- "(inode 0x%lx, attribute type 0x%x, page "
- "index 0x%lx)! Umount and run chkdsk.",
- vi->i_ino, ni->type, page->index);
- goto err_out;
+ SetPageError(page);
+ NVolSetErrors(vol);
}
+ SetPageUptodate(page);
+ kunmap(page);
done:
- set_page_writeback(page);
- unlock_page(page);
- end_page_writeback(page);
- if (!err)
+ if (page_is_dirty) {
+ ntfs_debug("Page still contains one or more dirty ntfs "
+ "records. Redirtying the page starting at "
+ "record 0x%lx.", page->index <<
+ (PAGE_CACHE_SHIFT - rec_size_bits));
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
+ } else {
+ /*
+ * Keep the VM happy. This must be done otherwise the
+ * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
+ * the page is clean.
+ */
+ BUG_ON(PageWriteback(page));
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ }
+ if (likely(!err))
ntfs_debug("Done.");
return err;
-mst_cleanup_out:
- /* Remove the mst protection fixups again. */
- for (i = 0; i < nr_bhs; i++) {
- if (!(i % bhs_per_rec))
- post_write_mst_fixup((NTFS_RECORD*)(kaddr +
- bh_offset(bhs[i])));
- }
-cleanup_out:
- /* Clean the buffers. */
- for (i = 0; i < nr_bhs; i++)
- clear_buffer_dirty(bhs[i]);
-err_out:
- SetPageError(page);
- goto done;
}
/**
@@ -1007,6 +1106,9 @@ err_out:
* @page: page cache page to write out
* @wbc: writeback control structure
*
+ * This is called from the VM when it wants to have a dirty ntfs page cache
+ * page cleaned. The VM has already locked the page and marked it clean.
+ *
* For non-resident attributes, ntfs_writepage() writes the @page by calling
* the ntfs version of the generic block_write_full_page() function,
* ntfs_write_block(), which in turn if necessary creates and writes the
@@ -1017,8 +1119,6 @@ err_out:
* The mft record is then marked dirty and written out asynchronously via the
* vfs inode dirty code path.
*
- * Note the caller clears the page dirty flag before calling ntfs_writepage().
- *
* Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
*
* Return 0 on success and -errno on error.
@@ -1402,9 +1502,9 @@ lock_retry_remap:
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
- lcn = ntfs_vcn_to_lcn(rl, vcn);
+ lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
- lcn = (LCN)LCN_RL_NOT_MAPPED;
+ lcn = LCN_RL_NOT_MAPPED;
if (unlikely(lcn < 0)) {
/*
* We extended the attribute allocation above.
@@ -1451,7 +1551,7 @@ lock_retry_remap:
* retrying.
*/
bh->b_blocknr = -1UL;
- ntfs_error(vol->sb, "ntfs_vcn_to_lcn(vcn = "
+ ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn(vcn = "
"0x%llx) failed with error "
"code 0x%llx%s.",
(unsigned long long)vcn,
@@ -2028,3 +2128,46 @@ struct address_space_operations ntfs_mst_aops = {
belonging to the page. */
#endif /* NTFS_RW */
};
+
+#ifdef NTFS_RW
+
+/**
+ * mark_ntfs_record_dirty - mark an ntfs record dirty
+ * @page: page containing the ntfs record to mark dirty
+ * @ofs: byte offset within @page at which the ntfs record begins
+ *
+ * If the ntfs record is the same size as the page cache page @page, set all
+ * buffers in the page dirty. Otherwise, set only the buffers in which the
+ * ntfs record is located dirty.
+ *
+ * Also, set the page containing the ntfs record dirty, which also marks the
+ * vfs inode the ntfs record belongs to dirty (I_DIRTY_PAGES).
+ */
+void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
+ ntfs_inode *ni;
+ struct buffer_head *bh, *head;
+ unsigned int end, bh_size, bh_ofs;
+
+ BUG_ON(!page);
+ BUG_ON(!page_has_buffers(page));
+ ni = NTFS_I(page->mapping->host);
+ BUG_ON(!ni);
+ if (ni->itype.index.block_size == PAGE_CACHE_SIZE) {
+ __set_page_dirty_buffers(page);
+ return;
+ }
+ end = ofs + ni->itype.index.block_size;
+ bh_size = ni->vol->sb->s_blocksize;
+ bh = head = page_buffers(page);
+ do {
+ bh_ofs = bh_offset(bh);
+ if (bh_ofs + bh_size <= ofs)
+ continue;
+ if (unlikely(bh_ofs >= end))
+ break;
+ set_buffer_dirty(bh);
+ } while ((bh = bh->b_this_page) != head);
+ __set_page_dirty_nobuffers(page);
+}
+
+#endif /* NTFS_RW */
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
new file mode 100644
index 000000000000..10b23174cb5f
--- /dev/null
+++ b/fs/ntfs/aops.h
@@ -0,0 +1,102 @@
+/**
+ * aops.h - Defines for NTFS kernel address space operations and page cache
+ * handling. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_NTFS_AOPS_H
+#define _LINUX_NTFS_AOPS_H
+
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+
+#include "inode.h"
+
+/**
+ * ntfs_unmap_page - release a page that was mapped using ntfs_map_page()
+ * @page: the page to release
+ *
+ * Unpin, unmap and release a page that was obtained from ntfs_map_page().
+ */
+static inline void ntfs_unmap_page(struct page *page)
+{
+ kunmap(page);
+ page_cache_release(page);
+}
+
+/**
+ * ntfs_map_page - map a page into accessible memory, reading it if necessary
+ * @mapping: address space for which to obtain the page
+ * @index: index into the page cache for @mapping of the page to map
+ *
+ * Read a page from the page cache of the address space @mapping at position
+ * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes.
+ *
+ * If the page is not in memory it is loaded from disk first using the readpage
+ * method defined in the address space operations of @mapping and the page is
+ * added to the page cache of @mapping in the process.
+ *
+ * If the page is in high memory it is mapped into memory directly addressible
+ * by the kernel.
+ *
+ * Finally the page count is incremented, thus pinning the page into place.
+ *
+ * The above means that page_address(page) can be used on all pages obtained
+ * with ntfs_map_page() to get the kernel virtual address of the page.
+ *
+ * When finished with the page, the caller has to call ntfs_unmap_page() to
+ * unpin, unmap and release the page.
+ *
+ * Note this does not grant exclusive access. If such is desired, the caller
+ * must provide it independently of the ntfs_{un}map_page() calls by using
+ * a {rw_}semaphore or other means of serialization. A spin lock cannot be
+ * used as ntfs_map_page() can block.
+ *
+ * The unlocked and uptodate page is returned on success or an encoded error
+ * on failure. Caller has to test for error using the IS_ERR() macro on the
+ * return value. If that evaluates to TRUE, the negative error code can be
+ * obtained using PTR_ERR() on the return value of ntfs_map_page().
+ */
+static inline struct page *ntfs_map_page(struct address_space *mapping,
+ unsigned long index)
+{
+ struct page *page = read_cache_page(mapping, index,
+ (filler_t*)mapping->a_ops->readpage, NULL);
+
+ if (!IS_ERR(page)) {
+ wait_on_page_locked(page);
+ kmap(page);
+ if (PageUptodate(page) && !PageError(page))
+ return page;
+ ntfs_unmap_page(page);
+ return ERR_PTR(-EIO);
+ }
+ return page;
+}
+
+#ifdef NTFS_RW
+
+extern void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs);
+
+#endif /* NTFS_RW */
+
+#endif /* _LINUX_NTFS_AOPS_H */
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index fdf20b85ceb4..865de0c2c3d0 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1,5 +1,5 @@
/**
- * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
+ * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2004 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
@@ -21,915 +21,11 @@
*/
#include <linux/buffer_head.h>
-#include "ntfs.h"
-#include "dir.h"
-
-/* Temporary helper functions -- might become macros */
-
-/**
- * ntfs_rl_mm - runlist memmove
- *
- * It is up to the caller to serialize access to the runlist @base.
- */
-static inline void ntfs_rl_mm(runlist_element *base, int dst, int src,
- int size)
-{
- if (likely((dst != src) && (size > 0)))
- memmove(base + dst, base + src, size * sizeof (*base));
-}
-
-/**
- * ntfs_rl_mc - runlist memory copy
- *
- * It is up to the caller to serialize access to the runlists @dstbase and
- * @srcbase.
- */
-static inline void ntfs_rl_mc(runlist_element *dstbase, int dst,
- runlist_element *srcbase, int src, int size)
-{
- if (likely(size > 0))
- memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase));
-}
-
-/**
- * ntfs_rl_realloc - Reallocate memory for runlists
- * @rl: original runlist
- * @old_size: number of runlist elements in the original runlist @rl
- * @new_size: number of runlist elements we need space for
- *
- * As the runlists grow, more memory will be required. To prevent the
- * kernel having to allocate and reallocate large numbers of small bits of
- * memory, this function returns and entire page of memory.
- *
- * It is up to the caller to serialize access to the runlist @rl.
- *
- * N.B. If the new allocation doesn't require a different number of pages in
- * memory, the function will return the original pointer.
- *
- * On success, return a pointer to the newly allocated, or recycled, memory.
- * On error, return -errno. The following error codes are defined:
- * -ENOMEM - Not enough memory to allocate runlist array.
- * -EINVAL - Invalid parameters were passed in.
- */
-static inline runlist_element *ntfs_rl_realloc(runlist_element *rl,
- int old_size, int new_size)
-{
- runlist_element *new_rl;
-
- old_size = PAGE_ALIGN(old_size * sizeof(*rl));
- new_size = PAGE_ALIGN(new_size * sizeof(*rl));
- if (old_size == new_size)
- return rl;
-
- new_rl = ntfs_malloc_nofs(new_size);
- if (unlikely(!new_rl))
- return ERR_PTR(-ENOMEM);
-
- if (likely(rl != NULL)) {
- if (unlikely(old_size > new_size))
- old_size = new_size;
- memcpy(new_rl, rl, old_size);
- ntfs_free(rl);
- }
- return new_rl;
-}
-
-/**
- * ntfs_are_rl_mergeable - test if two runlists can be joined together
- * @dst: original runlist
- * @src: new runlist to test for mergeability with @dst
- *
- * Test if two runlists can be joined together. For this, their VCNs and LCNs
- * must be adjacent.
- *
- * It is up to the caller to serialize access to the runlists @dst and @src.
- *
- * Return: TRUE Success, the runlists can be merged.
- * FALSE Failure, the runlists cannot be merged.
- */
-static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst,
- runlist_element *src)
-{
- BUG_ON(!dst);
- BUG_ON(!src);
-
- if ((dst->lcn < 0) || (src->lcn < 0)) /* Are we merging holes? */
- return FALSE;
- if ((dst->lcn + dst->length) != src->lcn) /* Are the runs contiguous? */
- return FALSE;
- if ((dst->vcn + dst->length) != src->vcn) /* Are the runs misaligned? */
- return FALSE;
-
- return TRUE;
-}
-
-/**
- * __ntfs_rl_merge - merge two runlists without testing if they can be merged
- * @dst: original, destination runlist
- * @src: new runlist to merge with @dst
- *
- * Merge the two runlists, writing into the destination runlist @dst. The
- * caller must make sure the runlists can be merged or this will corrupt the
- * destination runlist.
- *
- * It is up to the caller to serialize access to the runlists @dst and @src.
- */
-static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src)
-{
- dst->length += src->length;
-}
-
-/**
- * ntfs_rl_merge - test if two runlists can be joined together and merge them
- * @dst: original, destination runlist
- * @src: new runlist to merge with @dst
- *
- * Test if two runlists can be joined together. For this, their VCNs and LCNs
- * must be adjacent. If they can be merged, perform the merge, writing into
- * the destination runlist @dst.
- *
- * It is up to the caller to serialize access to the runlists @dst and @src.
- *
- * Return: TRUE Success, the runlists have been merged.
- * FALSE Failure, the runlists cannot be merged and have not been
- * modified.
- */
-static inline BOOL ntfs_rl_merge(runlist_element *dst, runlist_element *src)
-{
- BOOL merge = ntfs_are_rl_mergeable(dst, src);
-
- if (merge)
- __ntfs_rl_merge(dst, src);
- return merge;
-}
-
-/**
- * ntfs_rl_append - append a runlist after a given element
- * @dst: original runlist to be worked on
- * @dsize: number of elements in @dst (including end marker)
- * @src: runlist to be inserted into @dst
- * @ssize: number of elements in @src (excluding end marker)
- * @loc: append the new runlist @src after this element in @dst
- *
- * Append the runlist @src after element @loc in @dst. Merge the right end of
- * the new runlist, if necessary. Adjust the size of the hole before the
- * appended runlist.
- *
- * It is up to the caller to serialize access to the runlists @dst and @src.
- *
- * On success, return a pointer to the new, combined, runlist. Note, both
- * runlists @dst and @src are deallocated before returning so you cannot use
- * the pointers for anything any more. (Strictly speaking the returned runlist
- * may be the same as @dst but this is irrelevant.)
- *
- * On error, return -errno. Both runlists are left unmodified. The following
- * error codes are defined:
- * -ENOMEM - Not enough memory to allocate runlist array.
- * -EINVAL - Invalid parameters were passed in.
- */
-static inline runlist_element *ntfs_rl_append(runlist_element *dst,
- int dsize, runlist_element *src, int ssize, int loc)
-{
- BOOL right;
- int magic;
-
- BUG_ON(!dst);
- BUG_ON(!src);
-
- /* First, check if the right hand end needs merging. */
- right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
-
- /* Space required: @dst size + @src size, less one if we merged. */
- dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right);
- if (IS_ERR(dst))
- return dst;
- /*
- * We are guaranteed to succeed from here so can start modifying the
- * original runlists.
- */
-
- /* First, merge the right hand end, if necessary. */
- if (right)
- __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
-
- magic = loc + ssize;
-
- /* Move the tail of @dst out of the way, then copy in @src. */
- ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right);
- ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
-
- /* Adjust the size of the preceding hole. */
- dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
-
- /* We may have changed the length of the file, so fix the end marker */
- if (dst[magic + 1].lcn == LCN_ENOENT)
- dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length;
-
- return dst;
-}
-
-/**
- * ntfs_rl_insert - insert a runlist into another
- * @dst: original runlist to be worked on
- * @dsize: number of elements in @dst (including end marker)
- * @src: new runlist to be inserted
- * @ssize: number of elements in @src (excluding end marker)
- * @loc: insert the new runlist @src before this element in @dst
- *
- * Insert the runlist @src before element @loc in the runlist @dst. Merge the
- * left end of the new runlist, if necessary. Adjust the size of the hole
- * after the inserted runlist.
- *
- * It is up to the caller to serialize access to the runlists @dst and @src.
- *
- * On success, return a pointer to the new, combined, runlist. Note, both
- * runlists @dst and @src are deallocated before returning so you cannot use
- * the pointers for anything any more. (Strictly speaking the returned runlist
- * may be the same as @dst but this is irrelevant.)
- *
- * On error, return -errno. Both runlists are left unmodified. The following
- * error codes are defined:
- * -ENOMEM - Not enough memory to allocate runlist array.
- * -EINVAL - Invalid parameters were passed in.
- */
-static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
- int dsize, runlist_element *src, int ssize, int loc)
-{
- BOOL left = FALSE;
- BOOL disc = FALSE; /* Discontinuity */
- BOOL hole = FALSE; /* Following a hole */
- int magic;
-
- BUG_ON(!dst);
- BUG_ON(!src);
-
- /* disc => Discontinuity between the end of @dst and the start of @src.
- * This means we might need to insert a hole.
- * hole => @dst ends with a hole or an unmapped region which we can
- * extend to match the discontinuity. */
- if (loc == 0)
- disc = (src[0].vcn > 0);
- else {
- s64 merged_length;
-
- left = ntfs_are_rl_mergeable(dst + loc - 1, src);
-
- merged_length = dst[loc - 1].length;
- if (left)
- merged_length += src->length;
-
- disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
- if (disc)
- hole = (dst[loc - 1].lcn == LCN_HOLE);
- }
-
- /* Space required: @dst size + @src size, less one if we merged, plus
- * one if there was a discontinuity, less one for a trailing hole. */
- dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole);
- if (IS_ERR(dst))
- return dst;
- /*
- * We are guaranteed to succeed from here so can start modifying the
- * original runlist.
- */
-
- if (left)
- __ntfs_rl_merge(dst + loc - 1, src);
-
- magic = loc + ssize - left + disc - hole;
-
- /* Move the tail of @dst out of the way, then copy in @src. */
- ntfs_rl_mm(dst, magic, loc, dsize - loc);
- ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left);
-
- /* Adjust the VCN of the last run ... */
- if (dst[magic].lcn <= LCN_HOLE)
- dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
- /* ... and the length. */
- if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED)
- dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn;
-
- /* Writing beyond the end of the file and there's a discontinuity. */
- if (disc) {
- if (hole)
- dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn;
- else {
- if (loc > 0) {
- dst[loc].vcn = dst[loc - 1].vcn +
- dst[loc - 1].length;
- dst[loc].length = dst[loc + 1].vcn -
- dst[loc].vcn;
- } else {
- dst[loc].vcn = 0;
- dst[loc].length = dst[loc + 1].vcn;
- }
- dst[loc].lcn = LCN_RL_NOT_MAPPED;
- }
-
- magic += hole;
-
- if (dst[magic].lcn == LCN_ENOENT)
- dst[magic].vcn = dst[magic - 1].vcn +
- dst[magic - 1].length;
- }
- return dst;
-}
-
-/**
- * ntfs_rl_replace - overwrite a runlist element with another runlist
- * @dst: original runlist to be worked on
- * @dsize: number of elements in @dst (including end marker)
- * @src: new runlist to be inserted
- * @ssize: number of elements in @src (excluding end marker)
- * @loc: index in runlist @dst to overwrite with @src
- *
- * Replace the runlist element @dst at @loc with @src. Merge the left and
- * right ends of the inserted runlist, if necessary.
- *
- * It is up to the caller to serialize access to the runlists @dst and @src.
- *
- * On success, return a pointer to the new, combined, runlist. Note, both
- * runlists @dst and @src are deallocated before returning so you cannot use
- * the pointers for anything any more. (Strictly speaking the returned runlist
- * may be the same as @dst but this is irrelevant.)
- *
- * On error, return -errno. Both runlists are left unmodified. The following
- * error codes are defined:
- * -ENOMEM - Not enough memory to allocate runlist array.
- * -EINVAL - Invalid parameters were passed in.
- */
-static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
- int dsize, runlist_element *src, int ssize, int loc)
-{
- BOOL left = FALSE;
- BOOL right;
- int magic;
-
- BUG_ON(!dst);
- BUG_ON(!src);
-
- /* First, merge the left and right ends, if necessary. */
- right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
- if (loc > 0)
- left = ntfs_are_rl_mergeable(dst + loc - 1, src);
-
- /* Allocate some space. We'll need less if the left, right, or both
- * ends were merged. */
- dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right);
- if (IS_ERR(dst))
- return dst;
- /*
- * We are guaranteed to succeed from here so can start modifying the
- * original runlists.
- */
- if (right)
- __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
- if (left)
- __ntfs_rl_merge(dst + loc - 1, src);
-
- /* FIXME: What does this mean? (AIA) */
- magic = loc + ssize - left;
-
- /* Move the tail of @dst out of the way, then copy in @src. */
- ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1);
- ntfs_rl_mc(dst, loc, src, left, ssize - left);
-
- /* We may have changed the length of the file, so fix the end marker */
- if (dst[magic].lcn == LCN_ENOENT)
- dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
- return dst;
-}
-
-/**
- * ntfs_rl_split - insert a runlist into the centre of a hole
- * @dst: original runlist to be worked on
- * @dsize: number of elements in @dst (including end marker)
- * @src: new runlist to be inserted
- * @ssize: number of elements in @src (excluding end marker)
- * @loc: index in runlist @dst at which to split and insert @src
- *
- * Split the runlist @dst at @loc into two and insert @new in between the two
- * fragments. No merging of runlists is necessary. Adjust the size of the
- * holes either side.
- *
- * It is up to the caller to serialize access to the runlists @dst and @src.
- *
- * On success, return a pointer to the new, combined, runlist. Note, both
- * runlists @dst and @src are deallocated before returning so you cannot use
- * the pointers for anything any more. (Strictly speaking the returned runlist
- * may be the same as @dst but this is irrelevant.)
- *
- * On error, return -errno. Both runlists are left unmodified. The following
- * error codes are defined:
- * -ENOMEM - Not enough memory to allocate runlist array.
- * -EINVAL - Invalid parameters were passed in.
- */
-static inline runlist_element *ntfs_rl_split(runlist_element *dst, int dsize,
- runlist_element *src, int ssize, int loc)
-{
- BUG_ON(!dst);
- BUG_ON(!src);
-
- /* Space required: @dst size + @src size + one new hole. */
- dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1);
- if (IS_ERR(dst))
- return dst;
- /*
- * We are guaranteed to succeed from here so can start modifying the
- * original runlists.
- */
-
- /* Move the tail of @dst out of the way, then copy in @src. */
- ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc);
- ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
-
- /* Adjust the size of the holes either size of @src. */
- dst[loc].length = dst[loc+1].vcn - dst[loc].vcn;
- dst[loc+ssize+1].vcn = dst[loc+ssize].vcn + dst[loc+ssize].length;
- dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn;
-
- return dst;
-}
-
-/**
- * ntfs_merge_runlists - merge two runlists into one
- * @drl: original runlist to be worked on
- * @srl: new runlist to be merged into @drl
- *
- * First we sanity check the two runlists @srl and @drl to make sure that they
- * are sensible and can be merged. The runlist @srl must be either after the
- * runlist @drl or completely within a hole (or unmapped region) in @drl.
- *
- * It is up to the caller to serialize access to the runlists @drl and @srl.
- *
- * Merging of runlists is necessary in two cases:
- * 1. When attribute lists are used and a further extent is being mapped.
- * 2. When new clusters are allocated to fill a hole or extend a file.
- *
- * There are four possible ways @srl can be merged. It can:
- * - be inserted at the beginning of a hole,
- * - split the hole in two and be inserted between the two fragments,
- * - be appended at the end of a hole, or it can
- * - replace the whole hole.
- * It can also be appended to the end of the runlist, which is just a variant
- * of the insert case.
- *
- * On success, return a pointer to the new, combined, runlist. Note, both
- * runlists @drl and @srl are deallocated before returning so you cannot use
- * the pointers for anything any more. (Strictly speaking the returned runlist
- * may be the same as @dst but this is irrelevant.)
- *
- * On error, return -errno. Both runlists are left unmodified. The following
- * error codes are defined:
- * -ENOMEM - Not enough memory to allocate runlist array.
- * -EINVAL - Invalid parameters were passed in.
- * -ERANGE - The runlists overlap and cannot be merged.
- */
-runlist_element *ntfs_merge_runlists(runlist_element *drl,
- runlist_element *srl)
-{
- int di, si; /* Current index into @[ds]rl. */
- int sstart; /* First index with lcn > LCN_RL_NOT_MAPPED. */
- int dins; /* Index into @drl at which to insert @srl. */
- int dend, send; /* Last index into @[ds]rl. */
- int dfinal, sfinal; /* The last index into @[ds]rl with
- lcn >= LCN_HOLE. */
- int marker = 0;
- VCN marker_vcn = 0;
-
-#ifdef DEBUG
- ntfs_debug("dst:");
- ntfs_debug_dump_runlist(drl);
- ntfs_debug("src:");
- ntfs_debug_dump_runlist(srl);
-#endif
-
- /* Check for silly calling... */
- if (unlikely(!srl))
- return drl;
- if (IS_ERR(srl) || IS_ERR(drl))
- return ERR_PTR(-EINVAL);
- /* Check for the case where the first mapping is being done now. */
- if (unlikely(!drl)) {
- drl = srl;
- /* Complete the source runlist if necessary. */
- if (unlikely(drl[0].vcn)) {
- /* Scan to the end of the source runlist. */
- for (dend = 0; likely(drl[dend].length); dend++)
- ;
- drl = ntfs_rl_realloc(drl, dend, dend + 1);
- if (IS_ERR(drl))
- return drl;
- /* Insert start element at the front of the runlist. */
- ntfs_rl_mm(drl, 1, 0, dend);
- drl[0].vcn = 0;
- drl[0].lcn = LCN_RL_NOT_MAPPED;
- drl[0].length = drl[1].vcn;
- }
- goto finished;
- }
-
- si = di = 0;
-
- /* Skip any unmapped start element(s) in the source runlist. */
- while (srl[si].length && srl[si].lcn < (LCN)LCN_HOLE)
- si++;
-
- /* Can't have an entirely unmapped source runlist. */
- BUG_ON(!srl[si].length);
-
- /* Record the starting points. */
- sstart = si;
-
- /*
- * Skip forward in @drl until we reach the position where @srl needs to
- * be inserted. If we reach the end of @drl, @srl just needs to be
- * appended to @drl.
- */
- for (; drl[di].length; di++) {
- if (drl[di].vcn + drl[di].length > srl[sstart].vcn)
- break;
- }
- dins = di;
-
- /* Sanity check for illegal overlaps. */
- if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) &&
- (srl[si].lcn >= 0)) {
- ntfs_error(NULL, "Run lists overlap. Cannot merge!");
- return ERR_PTR(-ERANGE);
- }
-
- /* Scan to the end of both runlists in order to know their sizes. */
- for (send = si; srl[send].length; send++)
- ;
- for (dend = di; drl[dend].length; dend++)
- ;
-
- if (srl[send].lcn == (LCN)LCN_ENOENT)
- marker_vcn = srl[marker = send].vcn;
-
- /* Scan to the last element with lcn >= LCN_HOLE. */
- for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--)
- ;
- for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--)
- ;
-
- {
- BOOL start;
- BOOL finish;
- int ds = dend + 1; /* Number of elements in drl & srl */
- int ss = sfinal - sstart + 1;
-
- start = ((drl[dins].lcn < LCN_RL_NOT_MAPPED) || /* End of file */
- (drl[dins].vcn == srl[sstart].vcn)); /* Start of hole */
- finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) && /* End of file */
- ((drl[dins].vcn + drl[dins].length) <= /* End of hole */
- (srl[send - 1].vcn + srl[send - 1].length)));
-
- /* Or we'll lose an end marker */
- if (start && finish && (drl[dins].length == 0))
- ss++;
- if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn))
- finish = FALSE;
-#if 0
- ntfs_debug("dfinal = %i, dend = %i", dfinal, dend);
- ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send);
- ntfs_debug("start = %i, finish = %i", start, finish);
- ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins);
-#endif
- if (start) {
- if (finish)
- drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins);
- else
- drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins);
- } else {
- if (finish)
- drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins);
- else
- drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins);
- }
- if (IS_ERR(drl)) {
- ntfs_error(NULL, "Merge failed.");
- return drl;
- }
- ntfs_free(srl);
- if (marker) {
- ntfs_debug("Triggering marker code.");
- for (ds = dend; drl[ds].length; ds++)
- ;
- /* We only need to care if @srl ended after @drl. */
- if (drl[ds].vcn <= marker_vcn) {
- int slots = 0;
-
- if (drl[ds].vcn == marker_vcn) {
- ntfs_debug("Old marker = 0x%llx, replacing "
- "with LCN_ENOENT.",
- (unsigned long long)
- drl[ds].lcn);
- drl[ds].lcn = (LCN)LCN_ENOENT;
- goto finished;
- }
- /*
- * We need to create an unmapped runlist element in
- * @drl or extend an existing one before adding the
- * ENOENT terminator.
- */
- if (drl[ds].lcn == (LCN)LCN_ENOENT) {
- ds--;
- slots = 1;
- }
- if (drl[ds].lcn != (LCN)LCN_RL_NOT_MAPPED) {
- /* Add an unmapped runlist element. */
- if (!slots) {
- /* FIXME/TODO: We need to have the
- * extra memory already! (AIA) */
- drl = ntfs_rl_realloc(drl, ds, ds + 2);
- if (!drl)
- goto critical_error;
- slots = 2;
- }
- ds++;
- /* Need to set vcn if it isn't set already. */
- if (slots != 1)
- drl[ds].vcn = drl[ds - 1].vcn +
- drl[ds - 1].length;
- drl[ds].lcn = (LCN)LCN_RL_NOT_MAPPED;
- /* We now used up a slot. */
- slots--;
- }
- drl[ds].length = marker_vcn - drl[ds].vcn;
- /* Finally add the ENOENT terminator. */
- ds++;
- if (!slots) {
- /* FIXME/TODO: We need to have the extra
- * memory already! (AIA) */
- drl = ntfs_rl_realloc(drl, ds, ds + 1);
- if (!drl)
- goto critical_error;
- }
- drl[ds].vcn = marker_vcn;
- drl[ds].lcn = (LCN)LCN_ENOENT;
- drl[ds].length = (s64)0;
- }
- }
- }
-
-finished:
- /* The merge was completed successfully. */
- ntfs_debug("Merged runlist:");
- ntfs_debug_dump_runlist(drl);
- return drl;
-
-critical_error:
- /* Critical error! We cannot afford to fail here. */
- ntfs_error(NULL, "Critical error! Not enough memory.");
- panic("NTFS: Cannot continue.");
-}
-
-/**
- * decompress_mapping_pairs - convert mapping pairs array to runlist
- * @vol: ntfs volume on which the attribute resides
- * @attr: attribute record whose mapping pairs array to decompress
- * @old_rl: optional runlist in which to insert @attr's runlist
- *
- * It is up to the caller to serialize access to the runlist @old_rl.
- *
- * Decompress the attribute @attr's mapping pairs array into a runlist. On
- * success, return the decompressed runlist.
- *
- * If @old_rl is not NULL, decompressed runlist is inserted into the
- * appropriate place in @old_rl and the resultant, combined runlist is
- * returned. The original @old_rl is deallocated.
- *
- * On error, return -errno. @old_rl is left unmodified in that case.
- *
- * The following error codes are defined:
- * -ENOMEM - Not enough memory to allocate runlist array.
- * -EIO - Corrupt runlist.
- * -EINVAL - Invalid parameters were passed in.
- * -ERANGE - The two runlists overlap.
- *
- * FIXME: For now we take the conceptionally simplest approach of creating the
- * new runlist disregarding the already existing one and then splicing the
- * two into one, if that is possible (we check for overlap and discard the new
- * runlist if overlap present before returning ERR_PTR(-ERANGE)).
- */
-runlist_element *decompress_mapping_pairs(const ntfs_volume *vol,
- const ATTR_RECORD *attr, runlist_element *old_rl)
-{
- VCN vcn; /* Current vcn. */
- LCN lcn; /* Current lcn. */
- s64 deltaxcn; /* Change in [vl]cn. */
- runlist_element *rl; /* The output runlist. */
- u8 *buf; /* Current position in mapping pairs array. */
- u8 *attr_end; /* End of attribute. */
- int rlsize; /* Size of runlist buffer. */
- u16 rlpos; /* Current runlist position in units of
- runlist_elements. */
- u8 b; /* Current byte offset in buf. */
-
-#ifdef DEBUG
- /* Make sure attr exists and is non-resident. */
- if (!attr || !attr->non_resident || sle64_to_cpu(
- attr->data.non_resident.lowest_vcn) < (VCN)0) {
- ntfs_error(vol->sb, "Invalid arguments.");
- return ERR_PTR(-EINVAL);
- }
-#endif
- /* Start at vcn = lowest_vcn and lcn 0. */
- vcn = sle64_to_cpu(attr->data.non_resident.lowest_vcn);
- lcn = 0;
- /* Get start of the mapping pairs array. */
- buf = (u8*)attr + le16_to_cpu(
- attr->data.non_resident.mapping_pairs_offset);
- attr_end = (u8*)attr + le32_to_cpu(attr->length);
- if (unlikely(buf < (u8*)attr || buf > attr_end)) {
- ntfs_error(vol->sb, "Corrupt attribute.");
- return ERR_PTR(-EIO);
- }
- /* Current position in runlist array. */
- rlpos = 0;
- /* Allocate first page and set current runlist size to one page. */
- rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE);
- if (unlikely(!rl))
- return ERR_PTR(-ENOMEM);
- /* Insert unmapped starting element if necessary. */
- if (vcn) {
- rl->vcn = (VCN)0;
- rl->lcn = (LCN)LCN_RL_NOT_MAPPED;
- rl->length = vcn;
- rlpos++;
- }
- while (buf < attr_end && *buf) {
- /*
- * Allocate more memory if needed, including space for the
- * not-mapped and terminator elements. ntfs_malloc_nofs()
- * operates on whole pages only.
- */
- if (((rlpos + 3) * sizeof(*old_rl)) > rlsize) {
- runlist_element *rl2;
-
- rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE);
- if (unlikely(!rl2)) {
- ntfs_free(rl);
- return ERR_PTR(-ENOMEM);
- }
- memcpy(rl2, rl, rlsize);
- ntfs_free(rl);
- rl = rl2;
- rlsize += PAGE_SIZE;
- }
- /* Enter the current vcn into the current runlist element. */
- rl[rlpos].vcn = vcn;
- /*
- * Get the change in vcn, i.e. the run length in clusters.
- * Doing it this way ensures that we signextend negative values.
- * A negative run length doesn't make any sense, but hey, I
- * didn't make up the NTFS specs and Windows NT4 treats the run
- * length as a signed value so that's how it is...
- */
- b = *buf & 0xf;
- if (b) {
- if (unlikely(buf + b > attr_end))
- goto io_error;
- for (deltaxcn = (s8)buf[b--]; b; b--)
- deltaxcn = (deltaxcn << 8) + buf[b];
- } else { /* The length entry is compulsory. */
- ntfs_error(vol->sb, "Missing length entry in mapping "
- "pairs array.");
- deltaxcn = (s64)-1;
- }
- /*
- * Assume a negative length to indicate data corruption and
- * hence clean-up and return NULL.
- */
- if (unlikely(deltaxcn < 0)) {
- ntfs_error(vol->sb, "Invalid length in mapping pairs "
- "array.");
- goto err_out;
- }
- /*
- * Enter the current run length into the current runlist
- * element.
- */
- rl[rlpos].length = deltaxcn;
- /* Increment the current vcn by the current run length. */
- vcn += deltaxcn;
- /*
- * There might be no lcn change at all, as is the case for
- * sparse clusters on NTFS 3.0+, in which case we set the lcn
- * to LCN_HOLE.
- */
- if (!(*buf & 0xf0))
- rl[rlpos].lcn = (LCN)LCN_HOLE;
- else {
- /* Get the lcn change which really can be negative. */
- u8 b2 = *buf & 0xf;
- b = b2 + ((*buf >> 4) & 0xf);
- if (buf + b > attr_end)
- goto io_error;
- for (deltaxcn = (s8)buf[b--]; b > b2; b--)
- deltaxcn = (deltaxcn << 8) + buf[b];
- /* Change the current lcn to its new value. */
- lcn += deltaxcn;
-#ifdef DEBUG
- /*
- * On NTFS 1.2-, apparently can have lcn == -1 to
- * indicate a hole. But we haven't verified ourselves
- * whether it is really the lcn or the deltaxcn that is
- * -1. So if either is found give us a message so we
- * can investigate it further!
- */
- if (vol->major_ver < 3) {
- if (unlikely(deltaxcn == (LCN)-1))
- ntfs_error(vol->sb, "lcn delta == -1");
- if (unlikely(lcn == (LCN)-1))
- ntfs_error(vol->sb, "lcn == -1");
- }
-#endif
- /* Check lcn is not below -1. */
- if (unlikely(lcn < (LCN)-1)) {
- ntfs_error(vol->sb, "Invalid LCN < -1 in "
- "mapping pairs array.");
- goto err_out;
- }
- /* Enter the current lcn into the runlist element. */
- rl[rlpos].lcn = lcn;
- }
- /* Get to the next runlist element. */
- rlpos++;
- /* Increment the buffer position to the next mapping pair. */
- buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1;
- }
- if (unlikely(buf >= attr_end))
- goto io_error;
- /*
- * If there is a highest_vcn specified, it must be equal to the final
- * vcn in the runlist - 1, or something has gone badly wrong.
- */
- deltaxcn = sle64_to_cpu(attr->data.non_resident.highest_vcn);
- if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) {
-mpa_err:
- ntfs_error(vol->sb, "Corrupt mapping pairs array in "
- "non-resident attribute.");
- goto err_out;
- }
- /* Setup not mapped runlist element if this is the base extent. */
- if (!attr->data.non_resident.lowest_vcn) {
- VCN max_cluster;
-
- max_cluster = (sle64_to_cpu(
- attr->data.non_resident.allocated_size) +
- vol->cluster_size - 1) >>
- vol->cluster_size_bits;
- /*
- * If there is a difference between the highest_vcn and the
- * highest cluster, the runlist is either corrupt or, more
- * likely, there are more extents following this one.
- */
- if (deltaxcn < --max_cluster) {
- ntfs_debug("More extents to follow; deltaxcn = 0x%llx, "
- "max_cluster = 0x%llx",
- (unsigned long long)deltaxcn,
- (unsigned long long)max_cluster);
- rl[rlpos].vcn = vcn;
- vcn += rl[rlpos].length = max_cluster - deltaxcn;
- rl[rlpos].lcn = (LCN)LCN_RL_NOT_MAPPED;
- rlpos++;
- } else if (unlikely(deltaxcn > max_cluster)) {
- ntfs_error(vol->sb, "Corrupt attribute. deltaxcn = "
- "0x%llx, max_cluster = 0x%llx",
- (unsigned long long)deltaxcn,
- (unsigned long long)max_cluster);
- goto mpa_err;
- }
- rl[rlpos].lcn = (LCN)LCN_ENOENT;
- } else /* Not the base extent. There may be more extents to follow. */
- rl[rlpos].lcn = (LCN)LCN_RL_NOT_MAPPED;
-
- /* Setup terminating runlist element. */
- rl[rlpos].vcn = vcn;
- rl[rlpos].length = (s64)0;
- /* If no existing runlist was specified, we are done. */
- if (!old_rl) {
- ntfs_debug("Mapping pairs array successfully decompressed:");
- ntfs_debug_dump_runlist(rl);
- return rl;
- }
- /* Now combine the new and old runlists checking for overlaps. */
- old_rl = ntfs_merge_runlists(old_rl, rl);
- if (likely(!IS_ERR(old_rl)))
- return old_rl;
- ntfs_free(rl);
- ntfs_error(vol->sb, "Failed to merge runlists.");
- return old_rl;
-io_error:
- ntfs_error(vol->sb, "Corrupt attribute.");
-err_out:
- ntfs_free(rl);
- return ERR_PTR(-EIO);
-}
+#include "attrib.h"
+#include "debug.h"
+#include "mft.h"
+#include "ntfs.h"
/**
* ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
@@ -973,10 +69,11 @@ int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
down_write(&ni->runlist.lock);
/* Make sure someone else didn't do the work while we were sleeping. */
- if (likely(ntfs_vcn_to_lcn(ni->runlist.rl, vcn) <= LCN_RL_NOT_MAPPED)) {
+ if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
+ LCN_RL_NOT_MAPPED)) {
runlist_element *rl;
- rl = decompress_mapping_pairs(ni->vol, ctx->attr,
+ rl = ntfs_mapping_pairs_decompress(ni->vol, ctx->attr,
ni->runlist.rl);
if (IS_ERR(rl))
err = PTR_ERR(rl);
@@ -993,63 +90,6 @@ err_out:
}
/**
- * ntfs_vcn_to_lcn - convert a vcn into a lcn given a runlist
- * @rl: runlist to use for conversion
- * @vcn: vcn to convert
- *
- * Convert the virtual cluster number @vcn of an attribute into a logical
- * cluster number (lcn) of a device using the runlist @rl to map vcns to their
- * corresponding lcns.
- *
- * It is up to the caller to serialize access to the runlist @rl.
- *
- * Since lcns must be >= 0, we use negative return values with special meaning:
- *
- * Return value Meaning / Description
- * ==================================================
- * -1 = LCN_HOLE Hole / not allocated on disk.
- * -2 = LCN_RL_NOT_MAPPED This is part of the runlist which has not been
- * inserted into the runlist yet.
- * -3 = LCN_ENOENT There is no such vcn in the attribute.
- *
- * Locking: - The caller must have locked the runlist (for reading or writing).
- * - This function does not touch the lock.
- */
-LCN ntfs_vcn_to_lcn(const runlist_element *rl, const VCN vcn)
-{
- int i;
-
- BUG_ON(vcn < 0);
- /*
- * If rl is NULL, assume that we have found an unmapped runlist. The
- * caller can then attempt to map it and fail appropriately if
- * necessary.
- */
- if (unlikely(!rl))
- return (LCN)LCN_RL_NOT_MAPPED;
-
- /* Catch out of lower bounds vcn. */
- if (unlikely(vcn < rl[0].vcn))
- return (LCN)LCN_ENOENT;
-
- for (i = 0; likely(rl[i].length); i++) {
- if (unlikely(vcn < rl[i+1].vcn)) {
- if (likely(rl[i].lcn >= (LCN)0))
- return rl[i].lcn + (vcn - rl[i].vcn);
- return rl[i].lcn;
- }
- }
- /*
- * The terminator element is setup to the correct value, i.e. one of
- * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT.
- */
- if (likely(rl[i].lcn < (LCN)0))
- return rl[i].lcn;
- /* Just in case... We could replace this with BUG() some day. */
- return (LCN)LCN_ENOENT;
-}
-
-/**
* ntfs_find_vcn - find a vcn in the runlist described by an ntfs inode
* @ni: ntfs inode describing the runlist to search
* @vcn: vcn to find
@@ -1104,7 +144,7 @@ lock_retry_remap:
if (likely(rl && vcn >= rl[0].vcn)) {
while (likely(rl->length)) {
if (likely(vcn < rl[1].vcn)) {
- if (likely(rl->lcn >= (LCN)LCN_HOLE)) {
+ if (likely(rl->lcn >= LCN_HOLE)) {
ntfs_debug("Done.");
return rl;
}
@@ -1112,8 +152,8 @@ lock_retry_remap:
}
rl++;
}
- if (likely(rl->lcn != (LCN)LCN_RL_NOT_MAPPED)) {
- if (likely(rl->lcn == (LCN)LCN_ENOENT))
+ if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
+ if (likely(rl->lcn == LCN_ENOENT))
err = -ENOENT;
else
err = -EIO;
@@ -1362,14 +402,14 @@ int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
rl = runlist->rl;
/* Read all clusters specified by the runlist one run at a time. */
while (rl->length) {
- lcn = ntfs_vcn_to_lcn(rl, rl->vcn);
+ lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
(unsigned long long)rl->vcn,
(unsigned long long)lcn);
/* The attribute list cannot be sparse. */
if (lcn < 0) {
- ntfs_error(sb, "ntfs_vcn_to_lcn() failed. Cannot read "
- "attribute list.");
+ ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed. Cannot "
+ "read attribute list.");
goto err_out;
}
block = lcn << vol->cluster_size_bits >> block_size_bits;
@@ -1908,3 +948,188 @@ void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
kmem_cache_free(ntfs_attr_ctx_cache, ctx);
return;
}
+
+/**
+ * ntfs_attr_record_resize - resize an attribute record
+ * @m: mft record containing attribute record
+ * @a: attribute record to resize
+ * @new_size: new size in bytes to which to resize the attribute record @a
+ *
+ * Resize the attribute record @a, i.e. the resident part of the attribute, in
+ * the mft record @m to @new_size bytes.
+ *
+ * Return 0 on success and -errno on error. The following error codes are
+ * defined:
+ * -ENOSPC - Not enough space in the mft record @m to perform the resize.
+ *
+ * Note: On error, no modifications have been performed whatsoever.
+ *
+ * Warning: If you make a record smaller without having copied all the data you
+ * are interested in the data may be overwritten.
+ */
+int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
+{
+ ntfs_debug("Entering for new_size %u.", new_size);
+ /* Align to 8 bytes if it is not already done. */
+ if (new_size & 7)
+ new_size = (new_size + 7) & ~7;
+ /* If the actual attribute length has changed, move things around. */
+ if (new_size != le32_to_cpu(a->length)) {
+ u32 new_muse = le32_to_cpu(m->bytes_in_use) -
+ le32_to_cpu(a->length) + new_size;
+ /* Not enough space in this mft record. */
+ if (new_muse > le32_to_cpu(m->bytes_allocated))
+ return -ENOSPC;
+ /* Move attributes following @a to their new location. */
+ memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
+ le32_to_cpu(m->bytes_in_use) - ((u8*)a -
+ (u8*)m) - le32_to_cpu(a->length));
+ /* Adjust @m to reflect the change in used space. */
+ m->bytes_in_use = cpu_to_le32(new_muse);
+ /* Adjust @a to reflect the new size. */
+ if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
+ a->length = cpu_to_le32(new_size);
+ }
+ return 0;
+}
+
+/**
+ * ntfs_attr_set - fill (a part of) an attribute with a byte
+ * @ni: ntfs inode describing the attribute to fill
+ * @ofs: offset inside the attribute at which to start to fill
+ * @cnt: number of bytes to fill
+ * @val: the unsigned 8-bit value with which to fill the attribute
+ *
+ * Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
+ * byte offset @ofs inside the attribute with the constant byte @val.
+ *
+ * This function is effectively like memset() applied to an ntfs attribute.
+ *
+ * Return 0 on success and -errno on error. An error code of -ESPIPE means
+ * that @ofs + @cnt were outside the end of the attribute and no write was
+ * performed.
+ */
+int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
+{
+ ntfs_volume *vol = ni->vol;
+ struct address_space *mapping;
+ struct page *page;
+ u8 *kaddr;
+ pgoff_t idx, end;
+ unsigned int start_ofs, end_ofs, size;
+
+ ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
+ (long long)ofs, (long long)cnt, val);
+ BUG_ON(ofs < 0);
+ BUG_ON(cnt < 0);
+ if (!cnt)
+ goto done;
+ mapping = VFS_I(ni)->i_mapping;
+ /* Work out the starting index and page offset. */
+ idx = ofs >> PAGE_CACHE_SHIFT;
+ start_ofs = ofs & ~PAGE_CACHE_MASK;
+ /* Work out the ending index and page offset. */
+ end = ofs + cnt;
+ end_ofs = end & ~PAGE_CACHE_MASK;
+ /* If the end is outside the inode size return -ESPIPE. */
+ if (unlikely(end > VFS_I(ni)->i_size)) {
+ ntfs_error(vol->sb, "Request exceeds end of attribute.");
+ return -ESPIPE;
+ }
+ end >>= PAGE_CACHE_SHIFT;
+ /* If there is a first partial page, need to do it the slow way. */
+ if (start_ofs) {
+ page = read_cache_page(mapping, idx,
+ (filler_t*)mapping->a_ops->readpage, NULL);
+ if (IS_ERR(page)) {
+ ntfs_error(vol->sb, "Failed to read first partial "
+ "page (sync error, index 0x%lx).", idx);
+ return PTR_ERR(page);
+ }
+ wait_on_page_locked(page);
+ if (unlikely(!PageUptodate(page))) {
+ ntfs_error(vol->sb, "Failed to read first partial page "
+ "(async error, index 0x%lx).", idx);
+ page_cache_release(page);
+ return PTR_ERR(page);
+ }
+ /*
+ * If the last page is the same as the first page, need to
+ * limit the write to the end offset.
+ */
+ size = PAGE_CACHE_SIZE;
+ if (idx == end)
+ size = end_ofs;
+ kaddr = kmap_atomic(page, KM_USER0);
+ memset(kaddr + start_ofs, val, size - start_ofs);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_USER0);
+ set_page_dirty(page);
+ page_cache_release(page);
+ if (idx == end)
+ goto done;
+ idx++;
+ }
+ /* Do the whole pages the fast way. */
+ for (; idx < end; idx++) {
+ /* Find or create the current page. (The page is locked.) */
+ page = grab_cache_page(mapping, idx);
+ if (unlikely(!page)) {
+ ntfs_error(vol->sb, "Insufficient memory to grab "
+ "page (index 0x%lx).", idx);
+ return -ENOMEM;
+ }
+ kaddr = kmap_atomic(page, KM_USER0);
+ memset(kaddr, val, PAGE_CACHE_SIZE);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_USER0);
+ /*
+ * If the page has buffers, mark them uptodate since buffer
+ * state and not page state is definitive in 2.6 kernels.
+ */
+ if (page_has_buffers(page)) {
+ struct buffer_head *bh, *head;
+
+ bh = head = page_buffers(page);
+ do {
+ set_buffer_uptodate(bh);
+ } while ((bh = bh->b_this_page) != head);
+ }
+ /* Now that buffers are uptodate, set the page uptodate, too. */
+ SetPageUptodate(page);
+ /*
+ * Set the page and all its buffers dirty and mark the inode
+ * dirty, too. The VM will write the page later on.
+ */
+ set_page_dirty(page);
+ /* Finally unlock and release the page. */
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ /* If there is a last partial page, need to do it the slow way. */
+ if (end_ofs) {
+ page = read_cache_page(mapping, idx,
+ (filler_t*)mapping->a_ops->readpage, NULL);
+ if (IS_ERR(page)) {
+ ntfs_error(vol->sb, "Failed to read last partial page "
+ "(sync error, index 0x%lx).", idx);
+ return PTR_ERR(page);
+ }
+ wait_on_page_locked(page);
+ if (unlikely(!PageUptodate(page))) {
+ ntfs_error(vol->sb, "Failed to read last partial page "
+ "(async error, index 0x%lx).", idx);
+ page_cache_release(page);
+ return PTR_ERR(page);
+ }
+ kaddr = kmap_atomic(page, KM_USER0);
+ memset(kaddr, val, end_ofs);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_USER0);
+ set_page_dirty(page);
+ page_cache_release(page);
+ }
+done:
+ ntfs_debug("Done.");
+ return 0;
+}
diff --git a/fs/ntfs/attrib.h b/fs/ntfs/attrib.h
index 92899f4ff571..3d98ce8b7aa0 100644
--- a/fs/ntfs/attrib.h
+++ b/fs/ntfs/attrib.h
@@ -24,23 +24,11 @@
#ifndef _LINUX_NTFS_ATTRIB_H
#define _LINUX_NTFS_ATTRIB_H
-#include <linux/fs.h>
-
#include "endian.h"
#include "types.h"
#include "layout.h"
-
-static inline void init_runlist(runlist *rl)
-{
- rl->rl = NULL;
- init_rwsem(&rl->lock);
-}
-
-typedef enum {
- LCN_HOLE = -1, /* Keep this as highest value or die! */
- LCN_RL_NOT_MAPPED = -2,
- LCN_ENOENT = -3,
-} LCN_SPECIAL_VALUES;
+#include "inode.h"
+#include "runlist.h"
/**
* ntfs_attr_search_ctx - used in attribute search functions
@@ -71,13 +59,8 @@ typedef struct {
ATTR_RECORD *base_attr;
} ntfs_attr_search_ctx;
-extern runlist_element *decompress_mapping_pairs(const ntfs_volume *vol,
- const ATTR_RECORD *attr, runlist_element *old_rl);
-
extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn);
-extern LCN ntfs_vcn_to_lcn(const runlist_element *rl, const VCN vcn);
-
extern runlist_element *ntfs_find_vcn(ntfs_inode *ni, const VCN vcn,
const BOOL need_write);
@@ -101,4 +84,9 @@ extern ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni,
MFT_RECORD *mrec);
extern void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx);
+extern int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size);
+
+extern int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt,
+ const u8 val);
+
#endif /* _LINUX_NTFS_ATTRIB_H */
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c
index b8f06111f6ef..12cf2e30c7dd 100644
--- a/fs/ntfs/bitmap.c
+++ b/fs/ntfs/bitmap.c
@@ -25,6 +25,7 @@
#include "bitmap.h"
#include "debug.h"
+#include "aops.h"
#include "ntfs.h"
/**
diff --git a/fs/ntfs/collate.c b/fs/ntfs/collate.c
index 31dd894a4319..4a28ab3898ef 100644
--- a/fs/ntfs/collate.c
+++ b/fs/ntfs/collate.c
@@ -19,8 +19,9 @@
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include "ntfs.h"
#include "collate.h"
+#include "debug.h"
+#include "ntfs.h"
static int ntfs_collate_binary(ntfs_volume *vol,
const void *data1, const int data1_len,
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index cc3aac147ba3..5d173da9b645 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -25,6 +25,9 @@
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
+#include "attrib.h"
+#include "inode.h"
+#include "debug.h"
#include "ntfs.h"
/**
@@ -600,9 +603,9 @@ lock_retry_remap:
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
- lcn = ntfs_vcn_to_lcn(rl, vcn);
+ lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
- lcn = (LCN)LCN_RL_NOT_MAPPED;
+ lcn = LCN_RL_NOT_MAPPED;
ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
(unsigned long long)vcn,
(unsigned long long)lcn);
@@ -926,7 +929,7 @@ map_rl_err:
rl_err:
up_read(&ni->runlist.lock);
- ntfs_error(vol->sb, "ntfs_vcn_to_lcn() failed. Cannot read "
+ ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
"compression block.");
goto err_out;
diff --git a/fs/ntfs/debug.h b/fs/ntfs/debug.h
index 63c62602224e..8ac37c33d127 100644
--- a/fs/ntfs/debug.h
+++ b/fs/ntfs/debug.h
@@ -22,13 +22,9 @@
#ifndef _LINUX_NTFS_DEBUG_H
#define _LINUX_NTFS_DEBUG_H
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
#include <linux/fs.h>
-#include "inode.h"
-#include "attrib.h"
+#include "runlist.h"
#ifdef DEBUG
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index ed9838e0c5d0..9b8594d4311b 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -21,8 +21,14 @@
*/
#include <linux/smp_lock.h>
-#include "ntfs.h"
+#include <linux/buffer_head.h>
+
#include "dir.h"
+#include "aops.h"
+#include "attrib.h"
+#include "mft.h"
+#include "debug.h"
+#include "ntfs.h"
/**
* The little endian Unicode string $I30 as a global constant.
@@ -1272,7 +1278,7 @@ get_next_bmp_page:
ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",
(unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT),
(unsigned long long)bmp_pos &
- ((PAGE_CACHE_SIZE * 8) - 1));
+ (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1));
bmp_page = ntfs_map_page(bmp_mapping,
bmp_pos >> (3 + PAGE_CACHE_SHIFT));
if (IS_ERR(bmp_page)) {
@@ -1386,8 +1392,8 @@ find_next_index_buffer:
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
ntfs_debug("In index allocation, offset 0x%llx.",
- (unsigned long long)ia_start + ((u8*)ie -
- (u8*)ia));
+ (unsigned long long)ia_start +
+ (unsigned long long)((u8*)ie - (u8*)ia));
/* Bounds checks. */
if (unlikely((u8*)ie < (u8*)ia || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
diff --git a/fs/ntfs/dir.h b/fs/ntfs/dir.h
index 90a8c3f65203..aea7582d561f 100644
--- a/fs/ntfs/dir.h
+++ b/fs/ntfs/dir.h
@@ -24,6 +24,8 @@
#define _LINUX_NTFS_DIR_H
#include "layout.h"
+#include "inode.h"
+#include "types.h"
/*
* ntfs_name is used to return the file name to the caller of
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index c7880d55b250..2ec83edb6893 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -19,6 +19,11 @@
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
+
+#include "inode.h"
+#include "debug.h"
#include "ntfs.h"
/**
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index b19a4882b689..aded65d13a65 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -19,9 +19,11 @@
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include "ntfs.h"
+#include "aops.h"
#include "collate.h"
+#include "debug.h"
#include "index.h"
+#include "ntfs.h"
/**
* ntfs_index_ctx_get - allocate and initialize a new index context
@@ -459,58 +461,3 @@ idx_err_out:
err = -EIO;
goto err_out;
}
-
-#ifdef NTFS_RW
-
-/**
- * __ntfs_index_entry_mark_dirty - mark an index allocation entry dirty
- * @ictx: ntfs index context describing the index entry
- *
- * NOTE: You want to use fs/ntfs/index.h::ntfs_index_entry_mark_dirty() instead!
- *
- * Mark the index allocation entry described by the index entry context @ictx
- * dirty.
- *
- * The index entry must be in an index block belonging to the index allocation
- * attribute. Mark the buffers belonging to the index record as well as the
- * page cache page the index block is in dirty. This automatically marks the
- * VFS inode of the ntfs index inode to which the index entry belongs dirty,
- * too (I_DIRTY_PAGES) and this in turn ensures the page buffers, and hence the
- * dirty index block, will be written out to disk later.
- */
-void __ntfs_index_entry_mark_dirty(ntfs_index_context *ictx)
-{
- ntfs_inode *ni;
- struct page *page;
- struct buffer_head *bh, *head;
- unsigned int rec_start, rec_end, bh_size, bh_start, bh_end;
-
- BUG_ON(ictx->is_in_root);
- ni = ictx->idx_ni;
- page = ictx->page;
- BUG_ON(!page_has_buffers(page));
- /*
- * If the index block is the same size as the page cache page, set all
- * the buffers in the page, as well as the page itself, dirty.
- */
- if (ni->itype.index.block_size == PAGE_CACHE_SIZE) {
- __set_page_dirty_buffers(page);
- return;
- }
- /* Set only the buffers in which the index block is located dirty. */
- rec_start = (unsigned int)((u8*)ictx->ia - (u8*)page_address(page));
- rec_end = rec_start + ni->itype.index.block_size;
- bh_size = ni->vol->sb->s_blocksize;
- bh_start = 0;
- bh = head = page_buffers(page);
- do {
- bh_end = bh_start + bh_size;
- if ((bh_start >= rec_start) && (bh_end <= rec_end))
- set_buffer_dirty(bh);
- bh_start = bh_end;
- } while ((bh = bh->b_this_page) != head);
- /* Finally, set the page itself dirty, too. */
- __set_page_dirty_nobuffers(page);
-}
-
-#endif /* NTFS_RW */
diff --git a/fs/ntfs/index.h b/fs/ntfs/index.h
index 159442dfa90a..846a489e8692 100644
--- a/fs/ntfs/index.h
+++ b/fs/ntfs/index.h
@@ -30,6 +30,7 @@
#include "inode.h"
#include "attrib.h"
#include "mft.h"
+#include "aops.h"
/**
* @idx_ni: index inode containing the @entry described by this context
@@ -115,8 +116,6 @@ static inline void ntfs_index_entry_flush_dcache_page(ntfs_index_context *ictx)
flush_dcache_page(ictx->page);
}
-extern void __ntfs_index_entry_mark_dirty(ntfs_index_context *ictx);
-
/**
* ntfs_index_entry_mark_dirty - mark an index entry dirty
* @ictx: ntfs index context describing the index entry
@@ -140,7 +139,8 @@ static inline void ntfs_index_entry_mark_dirty(ntfs_index_context *ictx)
if (ictx->is_in_root)
mark_mft_record_dirty(ictx->actx->ntfs_ino);
else
- __ntfs_index_entry_mark_dirty(ictx);
+ mark_ntfs_record_dirty(ictx->page,
+ (u8*)ictx->ia - (u8*)page_address(ictx->page));
}
#endif /* NTFS_RW */
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 873c25d9830f..181f9b8df900 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -25,11 +25,15 @@
#include <linux/quotaops.h>
#include <linux/mount.h>
-#include "ntfs.h"
+#include "aops.h"
#include "dir.h"
+#include "debug.h"
#include "inode.h"
#include "attrib.h"
+#include "malloc.h"
+#include "mft.h"
#include "time.h"
+#include "ntfs.h"
/**
* ntfs_test_inode - compare two (possibly fake) inodes for equality
@@ -369,20 +373,20 @@ void ntfs_destroy_extent_inode(ntfs_inode *ni)
*
* Return zero on success and -ENOMEM on error.
*/
-static void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
+void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
{
ntfs_debug("Entering.");
ni->initialized_size = ni->allocated_size = 0;
ni->seq_no = 0;
atomic_set(&ni->count, 1);
ni->vol = NTFS_SB(sb);
- init_runlist(&ni->runlist);
+ ntfs_init_runlist(&ni->runlist);
init_MUTEX(&ni->mrec_lock);
ni->page = NULL;
ni->page_ofs = 0;
ni->attr_list_size = 0;
ni->attr_list = NULL;
- init_runlist(&ni->attr_list_rl);
+ ntfs_init_runlist(&ni->attr_list_rl);
ni->itype.index.bmp_ino = NULL;
ni->itype.index.block_size = 0;
ni->itype.index.vcn_size = 0;
@@ -392,17 +396,6 @@ static void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
init_MUTEX(&ni->extent_lock);
ni->nr_extents = 0;
ni->ext.base_ntfs_ino = NULL;
- return;
-}
-
-static inline void ntfs_init_big_inode(struct inode *vi)
-{
- ntfs_inode *ni = NTFS_I(vi);
-
- ntfs_debug("Entering.");
- __ntfs_init_inode(vi->i_sb, ni);
- ni->mft_no = vi->i_ino;
- return;
}
inline ntfs_inode *ntfs_new_extent_inode(struct super_block *sb,
@@ -601,14 +594,26 @@ static int ntfs_read_locked_inode(struct inode *vi)
* Also if not a directory, it could be something else, rather than
* a regular file. But again, will do for now.
*/
+ /* Everyone gets all permissions. */
+ vi->i_mode |= S_IRWXUGO;
+ /* If read-only, noone gets write permissions. */
+ if (IS_RDONLY(vi))
+ vi->i_mode &= ~S_IWUGO;
if (m->flags & MFT_RECORD_IS_DIRECTORY) {
vi->i_mode |= S_IFDIR;
+ /*
+ * Apply the directory permissions mask set in the mount
+ * options.
+ */
+ vi->i_mode &= ~vol->dmask;
/* Things break without this kludge! */
if (vi->i_nlink > 1)
vi->i_nlink = 1;
- } else
+ } else {
vi->i_mode |= S_IFREG;
-
+ /* Apply the file permissions mask set in the mount options. */
+ vi->i_mode &= ~vol->fmask;
+ }
/*
* Find the standard information attribute in the mft record. At this
* stage we haven't setup the attribute list stuff yet, so this could
@@ -701,7 +706,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
* Setup the runlist. No need for locking as we have
* exclusive access to the inode at this time.
*/
- ni->attr_list_rl.rl = decompress_mapping_pairs(vol,
+ ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
ctx->attr, NULL);
if (IS_ERR(ni->attr_list_rl.rl)) {
err = PTR_ERR(ni->attr_list_rl.rl);
@@ -951,20 +956,9 @@ skip_attr_list_load:
goto unm_err_out;
}
skip_large_dir_stuff:
- /* Everyone gets read and scan permissions. */
- vi->i_mode |= S_IRUGO | S_IXUGO;
- /* If not read-only, set write permissions. */
- if (!IS_RDONLY(vi))
- vi->i_mode |= S_IWUGO;
- /*
- * Apply the directory permissions mask set in the mount
- * options.
- */
- vi->i_mode &= ~vol->dmask;
/* Setup the operations for this inode. */
vi->i_op = &ntfs_dir_inode_ops;
vi->i_fop = &ntfs_dir_ops;
- vi->i_mapping->a_ops = &ntfs_mst_aops;
} else {
/* It is a file. */
ntfs_attr_reinit_search_ctx(ctx);
@@ -1098,18 +1092,14 @@ no_data_attr_special_case:
unmap_mft_record(ni);
m = NULL;
ctx = NULL;
- /* Everyone gets all permissions. */
- vi->i_mode |= S_IRWXUGO;
- /* If read-only, noone gets write permissions. */
- if (IS_RDONLY(vi))
- vi->i_mode &= ~S_IWUGO;
- /* Apply the file permissions mask set in the mount options. */
- vi->i_mode &= ~vol->fmask;
/* Setup the operations for this inode. */
vi->i_op = &ntfs_file_inode_ops;
vi->i_fop = &ntfs_file_ops;
- vi->i_mapping->a_ops = &ntfs_aops;
}
+ if (NInoMstProtected(ni))
+ vi->i_mapping->a_ops = &ntfs_mst_aops;
+ else
+ vi->i_mapping->a_ops = &ntfs_aops;
/*
* The number of 512-byte blocks used on disk (for stat). This is in so
* far inaccurate as it doesn't account for any named streams or other
@@ -1672,8 +1662,8 @@ err_out:
*
* We solve these problems by starting with the $DATA attribute before anything
* else and iterating using ntfs_attr_lookup($DATA) over all extents. As each
- * extent is found, we decompress_mapping_pairs() including the implied
- * ntfs_merge_runlists(). Each step of the iteration necessarily provides
+ * extent is found, we ntfs_mapping_pairs_decompress() including the implied
+ * ntfs_runlists_merge(). Each step of the iteration necessarily provides
* sufficient information for the next step to complete.
*
* This should work but there are two possible pit falls (see inline comments
@@ -1762,7 +1752,7 @@ int ntfs_read_inode_mount(struct inode *vi)
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
/* Provides readpage() and sync_page() for map_mft_record(). */
- vi->i_mapping->a_ops = &ntfs_mft_aops;
+ vi->i_mapping->a_ops = &ntfs_mst_aops;
ctx = ntfs_attr_get_search_ctx(ni, m);
if (!ctx) {
@@ -1810,7 +1800,7 @@ int ntfs_read_inode_mount(struct inode *vi)
goto put_err_out;
}
/* Setup the runlist. */
- ni->attr_list_rl.rl = decompress_mapping_pairs(vol,
+ ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
ctx->attr, NULL);
if (IS_ERR(ni->attr_list_rl.rl)) {
err = PTR_ERR(ni->attr_list_rl.rl);
@@ -1942,11 +1932,11 @@ int ntfs_read_inode_mount(struct inode *vi)
* as we have exclusive access to the inode at this time and we
* are a mount in progress task, too.
*/
- nrl = decompress_mapping_pairs(vol, attr, ni->runlist.rl);
+ nrl = ntfs_mapping_pairs_decompress(vol, attr, ni->runlist.rl);
if (IS_ERR(nrl)) {
- ntfs_error(sb, "decompress_mapping_pairs() failed with "
- "error code %ld. $MFT is corrupt.",
- PTR_ERR(nrl));
+ ntfs_error(sb, "ntfs_mapping_pairs_decompress() "
+ "failed with error code %ld. $MFT is "
+ "corrupt.", PTR_ERR(nrl));
goto put_err_out;
}
ni->runlist.rl = nrl;
@@ -2024,8 +2014,6 @@ int ntfs_read_inode_mount(struct inode *vi)
/* No VFS initiated operations allowed for $MFT. */
vi->i_op = &ntfs_empty_inode_ops;
vi->i_fop = &ntfs_empty_file_ops;
- /* Put back our special address space operations. */
- vi->i_mapping->a_ops = &ntfs_mft_aops;
}
/* Get the lowest vcn for the next extent. */
@@ -2091,37 +2079,24 @@ err_out:
* dropped, we need to put the attribute inode for the directory index bitmap,
* if it is present, otherwise the directory inode would remain pinned for
* ever.
- *
- * If the inode @vi is an index inode with only one reference which is being
- * dropped, we need to put the attribute inode for the index bitmap, if it is
- * present, otherwise the index inode would disappear and the attribute inode
- * for the index bitmap would no longer be referenced from anywhere and thus it
- * would remain pinned for ever.
*/
void ntfs_put_inode(struct inode *vi)
{
- ntfs_inode *ni;
-
- if (S_ISDIR(vi->i_mode)) {
- if (atomic_read(&vi->i_count) == 2) {
- ni = NTFS_I(vi);
- if (NInoIndexAllocPresent(ni) &&
- ni->itype.index.bmp_ino) {
- iput(ni->itype.index.bmp_ino);
- ni->itype.index.bmp_ino = NULL;
+ if (S_ISDIR(vi->i_mode) && atomic_read(&vi->i_count) == 2) {
+ ntfs_inode *ni = NTFS_I(vi);
+ if (NInoIndexAllocPresent(ni)) {
+ struct inode *bvi = NULL;
+ down(&vi->i_sem);
+ if (atomic_read(&vi->i_count) == 2) {
+ bvi = ni->itype.index.bmp_ino;
+ if (bvi)
+ ni->itype.index.bmp_ino = NULL;
}
+ up(&vi->i_sem);
+ if (bvi)
+ iput(bvi);
}
- return;
}
- if (atomic_read(&vi->i_count) != 1)
- return;
- ni = NTFS_I(vi);
- if (NInoAttr(ni) && (ni->type == AT_INDEX_ALLOCATION) &&
- NInoIndexAllocPresent(ni) && ni->itype.index.bmp_ino) {
- iput(ni->itype.index.bmp_ino);
- ni->itype.index.bmp_ino = NULL;
- }
- return;
}
void __ntfs_clear_inode(ntfs_inode *ni)
@@ -2189,6 +2164,18 @@ void ntfs_clear_big_inode(struct inode *vi)
{
ntfs_inode *ni = NTFS_I(vi);
+ /*
+ * If the inode @vi is an index inode we need to put the attribute
+ * inode for the index bitmap, if it is present, otherwise the index
+ * inode would disappear and the attribute inode for the index bitmap
+ * would no longer be referenced from anywhere and thus it would remain
+ * pinned for ever.
+ */
+ if (NInoAttr(ni) && (ni->type == AT_INDEX_ALLOCATION) &&
+ NInoIndexAllocPresent(ni) && ni->itype.index.bmp_ino) {
+ iput(ni->itype.index.bmp_ino);
+ ni->itype.index.bmp_ino = NULL;
+ }
#ifdef NTFS_RW
if (NInoDirty(ni)) {
BOOL was_bad = (is_bad_inode(vi));
@@ -2268,7 +2255,7 @@ int ntfs_show_options(struct seq_file *sf, struct vfsmount *mnt)
* ntfs_truncate - called when the i_size of an ntfs inode is changed
* @vi: inode for which the i_size was changed
*
- * We don't support i_size changes yet.
+ * We do not support i_size changes yet.
*
* The kernel guarantees that @vi is a regular file (S_ISREG() is true) and
* that the change is allowed.
@@ -2289,6 +2276,8 @@ void ntfs_truncate(struct inode *vi)
MFT_RECORD *m;
int err;
+ BUG_ON(NInoAttr(ni));
+ BUG_ON(ni->nr_extents < 0);
m = map_mft_record(ni);
if (IS_ERR(m)) {
ntfs_error(vi->i_sb, "Failed to map mft record for inode 0x%lx "
@@ -2500,9 +2489,16 @@ int ntfs_write_inode(struct inode *vi, int sync)
* dirty, since we are going to write this mft record below in any case
* and the base mft record may actually not have been modified so it
* might not need to be written out.
+ * NOTE: It is not a problem when the inode for $MFT itself is being
+ * written out as mark_ntfs_record_dirty() will only set I_DIRTY_PAGES
+ * on the $MFT inode and hence ntfs_write_inode() will not be
+ * re-invoked because of it which in turn is ok since the dirtied mft
+ * record will be cleaned and written out to disk below, i.e. before
+ * this function returns.
*/
if (modified && !NInoTestSetDirty(ctx->ntfs_ino))
- __set_page_dirty_nobuffers(ctx->ntfs_ino->page);
+ mark_ntfs_record_dirty(ctx->ntfs_ino->page,
+ ctx->ntfs_ino->page_ofs);
ntfs_attr_put_search_ctx(ctx);
/* Now the access times are updated, write the base mft record. */
if (NInoDirty(ni))
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
index 3aa7b873fe0d..eb54db217e87 100644
--- a/fs/ntfs/inode.h
+++ b/fs/ntfs/inode.h
@@ -24,10 +24,18 @@
#ifndef _LINUX_NTFS_INODE_H
#define _LINUX_NTFS_INODE_H
+#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/seq_file.h>
+#include <linux/list.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
#include "layout.h"
#include "volume.h"
+#include "types.h"
+#include "runlist.h"
+#include "debug.h"
typedef struct _ntfs_inode ntfs_inode;
@@ -269,6 +277,17 @@ extern struct inode *ntfs_alloc_big_inode(struct super_block *sb);
extern void ntfs_destroy_big_inode(struct inode *inode);
extern void ntfs_clear_big_inode(struct inode *vi);
+extern void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni);
+
+static inline void ntfs_init_big_inode(struct inode *vi)
+{
+ ntfs_inode *ni = NTFS_I(vi);
+
+ ntfs_debug("Entering.");
+ __ntfs_init_inode(vi->i_sb, ni);
+ ni->mft_no = vi->i_ino;
+}
+
extern ntfs_inode *ntfs_new_extent_inode(struct super_block *sb,
unsigned long mft_no);
extern void ntfs_clear_extent_inode(ntfs_inode *ni);
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index 57f3a8893f17..a34eed5b922d 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -260,7 +260,7 @@ typedef enum {
enum {
MFT_RECORD_IN_USE = const_cpu_to_le16(0x0001),
MFT_RECORD_IS_DIRECTORY = const_cpu_to_le16(0x0002),
-};
+} __attribute__ ((__packed__));
typedef le16 MFT_RECORD_FLAGS;
@@ -316,6 +316,10 @@ typedef enum {
typedef u64 MFT_REF;
typedef le64 leMFT_REF;
+#define MK_MREF(m, s) ((MFT_REF)(((MFT_REF)(s) << 48) | \
+ ((MFT_REF)(m) & MFT_REF_MASK_CPU)))
+#define MK_LE_MREF(m, s) cpu_to_le64(MK_MREF(m, s))
+
#define MREF(x) ((unsigned long)((x) & MFT_REF_MASK_CPU))
#define MSEQNO(x) ((u16)(((x) >> 48) & 0xffff))
#define MREF_LE(x) ((unsigned long)(le64_to_cpu(x) & MFT_REF_MASK_CPU))
@@ -385,22 +389,87 @@ typedef struct {
NOTE: Every time the mft record is reused
this number is set to zero. NOTE: The first
instance number is always 0. */
-/* sizeof() = 42 bytes */
-/* NTFS 3.1+ (Windows XP and above) introduce the following additions. */
-/* 42*/ //le16 reserved; /* Reserved/alignment. */
-/* 44*/ //le32 mft_record_number;/* Number of this mft record. */
+/* The below fields are specific to NTFS 3.1+ (Windows XP and above): */
+/* 42*/ le16 reserved; /* Reserved/alignment. */
+/* 44*/ le32 mft_record_number; /* Number of this mft record. */
/* sizeof() = 48 bytes */
/*
* When (re)using the mft record, we place the update sequence array at this
- * offset, i.e. before we start with the attributes. This also makes sense,
+ * offset, i.e. before we start with the attributes. This also makes sense,
* otherwise we could run into problems with the update sequence array
* containing in itself the last two bytes of a sector which would mean that
- * multi sector transfer protection wouldn't work. As you can't protect data
+ * multi sector transfer protection wouldn't work. As you can't protect data
* by overwriting it since you then can't get it back...
* When reading we obviously use the data from the ntfs record header.
*/
} __attribute__ ((__packed__)) MFT_RECORD;
+/* This is the version without the NTFS 3.1+ specific fields. */
+typedef struct {
+/*Ofs*/
+/* 0 NTFS_RECORD; -- Unfolded here as gcc doesn't like unnamed structs. */
+ NTFS_RECORD_TYPE magic; /* Usually the magic is "FILE". */
+ le16 usa_ofs; /* See NTFS_RECORD definition above. */
+ le16 usa_count; /* See NTFS_RECORD definition above. */
+
+/* 8*/ le64 lsn; /* $LogFile sequence number for this record.
+ Changed every time the record is modified. */
+/* 16*/ le16 sequence_number; /* Number of times this mft record has been
+ reused. (See description for MFT_REF
+ above.) NOTE: The increment (skipping zero)
+ is done when the file is deleted. NOTE: If
+ this is zero it is left zero. */
+/* 18*/ le16 link_count; /* Number of hard links, i.e. the number of
+ directory entries referencing this record.
+ NOTE: Only used in mft base records.
+ NOTE: When deleting a directory entry we
+ check the link_count and if it is 1 we
+ delete the file. Otherwise we delete the
+ FILE_NAME_ATTR being referenced by the
+ directory entry from the mft record and
+ decrement the link_count.
+ FIXME: Careful with Win32 + DOS names! */
+/* 20*/ le16 attrs_offset; /* Byte offset to the first attribute in this
+ mft record from the start of the mft record.
+ NOTE: Must be aligned to 8-byte boundary. */
+/* 22*/ MFT_RECORD_FLAGS flags; /* Bit array of MFT_RECORD_FLAGS. When a file
+ is deleted, the MFT_RECORD_IN_USE flag is
+ set to zero. */
+/* 24*/ le32 bytes_in_use; /* Number of bytes used in this mft record.
+ NOTE: Must be aligned to 8-byte boundary. */
+/* 28*/ le32 bytes_allocated; /* Number of bytes allocated for this mft
+ record. This should be equal to the mft
+ record size. */
+/* 32*/ leMFT_REF base_mft_record;/* This is zero for base mft records.
+ When it is not zero it is a mft reference
+ pointing to the base mft record to which
+ this record belongs (this is then used to
+ locate the attribute list attribute present
+ in the base record which describes this
+ extension record and hence might need
+ modification when the extension record
+ itself is modified, also locating the
+ attribute list also means finding the other
+ potential extents, belonging to the non-base
+ mft record). */
+/* 40*/ le16 next_attr_instance;/* The instance number that will be assigned to
+ the next attribute added to this mft record.
+ NOTE: Incremented each time after it is used.
+ NOTE: Every time the mft record is reused
+ this number is set to zero. NOTE: The first
+ instance number is always 0. */
+/* sizeof() = 42 bytes */
+/*
+ * When (re)using the mft record, we place the update sequence array at this
+ * offset, i.e. before we start with the attributes. This also makes sense,
+ * otherwise we could run into problems with the update sequence array
+ * containing in itself the last two bytes of a sector which would mean that
+ * multi sector transfer protection wouldn't work. As you can't protect data
+ * by overwriting it since you then can't get it back...
+ * When reading we obviously use the data from the ntfs record header.
+ */
+} __attribute__ ((__packed__)) MFT_RECORD_OLD;
+
/*
* System defined attributes (32-bit). Each attribute type has a corresponding
* attribute name (Unicode string of maximum 64 character length) as described
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c
index 748ed0d78d3b..17888c9f02f1 100644
--- a/fs/ntfs/lcnalloc.c
+++ b/fs/ntfs/lcnalloc.c
@@ -30,6 +30,7 @@
#include "volume.h"
#include "attrib.h"
#include "malloc.h"
+#include "aops.h"
#include "ntfs.h"
/**
@@ -46,7 +47,7 @@
* Locking: - The volume lcn bitmap must be locked for writing on entry and is
* left locked on return.
*/
-static int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
+int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
const runlist_element *rl)
{
struct inode *lcnbmp_vi = vol->lcnbmp_ino;
@@ -855,7 +856,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
err = PTR_ERR(rl);
goto err_out;
}
- if (unlikely(rl->lcn < (LCN)LCN_HOLE)) {
+ if (unlikely(rl->lcn < LCN_HOLE)) {
if (!is_rollback)
ntfs_error(vol->sb, "First runlist element has "
"invalid lcn, aborting.");
@@ -895,7 +896,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
* free them.
*/
for (; rl->length && count != 0; ++rl) {
- if (unlikely(rl->lcn < (LCN)LCN_HOLE)) {
+ if (unlikely(rl->lcn < LCN_HOLE)) {
VCN vcn;
/*
@@ -926,7 +927,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
"element.");
goto err_out;
}
- if (unlikely(rl->lcn < (LCN)LCN_HOLE)) {
+ if (unlikely(rl->lcn < LCN_HOLE)) {
if (!is_rollback)
ntfs_error(vol->sb, "Runlist element "
"has invalid lcn "
diff --git a/fs/ntfs/lcnalloc.h b/fs/ntfs/lcnalloc.h
index f9292e882adc..4cac1c024af6 100644
--- a/fs/ntfs/lcnalloc.h
+++ b/fs/ntfs/lcnalloc.h
@@ -28,6 +28,7 @@
#include <linux/fs.h>
#include "types.h"
+#include "runlist.h"
#include "volume.h"
typedef enum {
@@ -78,6 +79,34 @@ static inline s64 ntfs_cluster_free(struct inode *vi, const VCN start_vcn,
return __ntfs_cluster_free(vi, start_vcn, count, FALSE);
}
+extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
+ const runlist_element *rl);
+
+/**
+ * ntfs_cluster_free_from_rl - free clusters from runlist
+ * @vol: mounted ntfs volume on which to free the clusters
+ * @rl: runlist describing the clusters to free
+ *
+ * Free all the clusters described by the runlist @rl on the volume @vol. In
+ * the case of an error being returned, at least some of the clusters were not
+ * freed.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: This function takes the volume lcn bitmap lock for writing and
+ * modifies the bitmap contents.
+ */
+static inline int ntfs_cluster_free_from_rl(ntfs_volume *vol,
+ const runlist_element *rl)
+{
+ int ret;
+
+ down_write(&vol->lcnbmp_lock);
+ ret = ntfs_cluster_free_from_rl_nolock(vol, rl);
+ up_write(&vol->lcnbmp_lock);
+ return ret;
+}
+
#endif /* NTFS_RW */
#endif /* defined _LINUX_NTFS_LCNALLOC_H */
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 1869a4375898..5e280abafab3 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -27,10 +27,13 @@
#include <linux/buffer_head.h>
#include <linux/bitops.h>
+#include "attrib.h"
+#include "aops.h"
+#include "debug.h"
#include "logfile.h"
+#include "malloc.h"
#include "volume.h"
#include "ntfs.h"
-#include "debug.h"
/**
* ntfs_check_restart_page_header - check the page header for consistency
@@ -681,60 +684,20 @@ err_out:
BOOL ntfs_empty_logfile(struct inode *log_vi)
{
ntfs_volume *vol = NTFS_SB(log_vi->i_sb);
- struct address_space *mapping;
- pgoff_t idx, end;
ntfs_debug("Entering.");
- if (NVolLogFileEmpty(vol))
- goto done;
- mapping = log_vi->i_mapping;
- end = (log_vi->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- for (idx = 0; idx < end; ++idx) {
- struct page *page;
- u8 *kaddr;
-
- /* Find or create the current page. (The page is locked.) */
- page = grab_cache_page(mapping, idx);
- if (unlikely(!page)) {
- ntfs_error(vol->sb, "Insufficient memory to grab "
- "$LogFile page (index %lu).", idx);
+ if (!NVolLogFileEmpty(vol)) {
+ int err;
+
+ err = ntfs_attr_set(NTFS_I(log_vi), 0, log_vi->i_size, 0xff);
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Failed to fill $LogFile with "
+ "0xff bytes (error code %i).", err);
return FALSE;
}
- /*
- * Set all bytes in the page to 0xff. It doesn't matter if we
- * go beyond i_size, because ntfs_writepage() will take care of
- * that for us.
- */
- kaddr = (u8*)kmap_atomic(page, KM_USER0);
- memset(kaddr, 0xff, PAGE_CACHE_SIZE);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- /*
- * If the page has buffers, mark them uptodate since buffer
- * state and not page state is definitive in 2.6 kernels.
- */
- if (page_has_buffers(page)) {
- struct buffer_head *bh, *head;
-
- bh = head = page_buffers(page);
- do {
- set_buffer_uptodate(bh);
- } while ((bh = bh->b_this_page) != head);
- }
- /* Now that buffers are uptodate, set the page uptodate, too. */
- SetPageUptodate(page);
- /*
- * Set the page and all its buffers dirty and mark the inode
- * dirty, too. The VM will write the page later on.
- */
- set_page_dirty(page);
- /* Finally unlock and release the page. */
- unlock_page(page);
- page_cache_release(page);
- }
- /* We set the flag so we do not clear the log file again on remount. */
- NVolSetLogFileEmpty(vol);
-done:
+ /* Set the flag so we do not have to do it again on remount. */
+ NVolSetLogFileEmpty(vol);
+ }
ntfs_debug("Done.");
return TRUE;
}
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h
index c8548a5336e0..fac5944df6d8 100644
--- a/fs/ntfs/malloc.h
+++ b/fs/ntfs/malloc.h
@@ -24,6 +24,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
/**
* ntfs_malloc_nofs - allocate memory in multiples of pages
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 9192bbdf2c2e..982cf8e37ba6 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -20,115 +20,20 @@
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/buffer_head.h>
#include <linux/swap.h>
+#include "attrib.h"
+#include "aops.h"
+#include "bitmap.h"
+#include "debug.h"
+#include "dir.h"
+#include "lcnalloc.h"
+#include "malloc.h"
+#include "mft.h"
#include "ntfs.h"
/**
- * __format_mft_record - initialize an empty mft record
- * @m: mapped, pinned and locked for writing mft record
- * @size: size of the mft record
- * @rec_no: mft record number / inode number
- *
- * Private function to initialize an empty mft record. Use one of the two
- * provided format_mft_record() functions instead.
- */
-static void __format_mft_record(MFT_RECORD *m, const int size,
- const unsigned long rec_no)
-{
- ATTR_RECORD *a;
-
- memset(m, 0, size);
- m->magic = magic_FILE;
- /* Aligned to 2-byte boundary. */
- m->usa_ofs = cpu_to_le16((sizeof(MFT_RECORD) + 1) & ~1);
- m->usa_count = cpu_to_le16(size / NTFS_BLOCK_SIZE + 1);
- /* Set the update sequence number to 1. */
- *(le16*)((char*)m + ((sizeof(MFT_RECORD) + 1) & ~1)) = cpu_to_le16(1);
- m->lsn = cpu_to_le64(0LL);
- m->sequence_number = cpu_to_le16(1);
- m->link_count = 0;
- /* Aligned to 8-byte boundary. */
- m->attrs_offset = cpu_to_le16((le16_to_cpu(m->usa_ofs) +
- (le16_to_cpu(m->usa_count) << 1) + 7) & ~7);
- m->flags = 0;
- /*
- * Using attrs_offset plus eight bytes (for the termination attribute),
- * aligned to 8-byte boundary.
- */
- m->bytes_in_use = cpu_to_le32((le16_to_cpu(m->attrs_offset) + 8 + 7) &
- ~7);
- m->bytes_allocated = cpu_to_le32(size);
- m->base_mft_record = cpu_to_le64((MFT_REF)0);
- m->next_attr_instance = 0;
- a = (ATTR_RECORD*)((char*)m + le16_to_cpu(m->attrs_offset));
- a->type = AT_END;
- a->length = 0;
-}
-
-/**
- * format_mft_record - initialize an empty mft record
- * @ni: ntfs inode of mft record
- * @mft_rec: mapped, pinned and locked mft record (optional)
- *
- * Initialize an empty mft record. This is used when extending the MFT.
- *
- * If @mft_rec is NULL, we call map_mft_record() to obtain the
- * record and we unmap it again when finished.
- *
- * We return 0 on success or -errno on error.
- */
-int format_mft_record(ntfs_inode *ni, MFT_RECORD *mft_rec)
-{
- MFT_RECORD *m;
-
- if (mft_rec)
- m = mft_rec;
- else {
- m = map_mft_record(ni);
- if (IS_ERR(m))
- return PTR_ERR(m);
- }
- __format_mft_record(m, ni->vol->mft_record_size, ni->mft_no);
- if (!mft_rec) {
- // FIXME: Need to set the mft record dirty!
- unmap_mft_record(ni);
- }
- return 0;
-}
-
-/**
- * ntfs_readpage - external declaration, function is in fs/ntfs/aops.c
- */
-extern int ntfs_readpage(struct file *, struct page *);
-
-#ifdef NTFS_RW
-/**
- * ntfs_mft_writepage - forward declaration, function is further below
- */
-static int ntfs_mft_writepage(struct page *page, struct writeback_control *wbc);
-#endif /* NTFS_RW */
-
-/**
- * ntfs_mft_aops - address space operations for access to $MFT
- *
- * Address space operations for access to $MFT. This allows us to simply use
- * ntfs_map_page() in map_mft_record_page().
- */
-struct address_space_operations ntfs_mft_aops = {
- .readpage = ntfs_readpage, /* Fill page with data. */
- .sync_page = block_sync_page, /* Currently, just unplugs the
- disk request queue. */
-#ifdef NTFS_RW
- .writepage = ntfs_mft_writepage, /* Write out the dirty mft
- records in a page. */
- .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
- without touching the buffers
- belonging to the page. */
-#endif /* NTFS_RW */
-};
-
-/**
* map_mft_record_page - map the page in which a specific mft record resides
* @ni: ntfs inode whose mft record page to map
*
@@ -397,8 +302,8 @@ map_err_out:
ntfs_clear_extent_inode(ni);
goto map_err_out;
}
- /* Verify the sequence number. */
- if (unlikely(le16_to_cpu(m->sequence_number) != seq_no)) {
+ /* Verify the sequence number if it is present. */
+ if (seq_no && (le16_to_cpu(m->sequence_number) != seq_no)) {
ntfs_error(base_ni->vol->sb, "Found stale extent mft "
"reference! Corrupt file system. Run chkdsk.");
destroy_ni = TRUE;
@@ -473,19 +378,11 @@ unm_err_out:
*/
void __mark_mft_record_dirty(ntfs_inode *ni)
{
- struct page *page = ni->page;
ntfs_inode *base_ni;
ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
- BUG_ON(!page);
BUG_ON(NInoAttr(ni));
-
- /*
- * Set the page containing the mft record dirty. This also marks the
- * $MFT inode dirty (I_DIRTY_PAGES).
- */
- __set_page_dirty_nobuffers(page);
-
+ mark_ntfs_record_dirty(ni->page, ni->page_ofs);
/* Determine the base vfs inode and mark it dirty, too. */
down(&ni->extent_lock);
if (likely(ni->nr_extents >= 0))
@@ -501,13 +398,14 @@ static const char *ntfs_please_email = "Please email "
"this message. Thank you.";
/**
- * sync_mft_mirror_umount - synchronise an mft record to the mft mirror
- * @ni: ntfs inode whose mft record to synchronize
+ * ntfs_sync_mft_mirror_umount - synchronise an mft record to the mft mirror
+ * @vol: ntfs volume on which the mft record to synchronize resides
+ * @mft_no: mft record number of mft record to synchronize
* @m: mapped, mst protected (extent) mft record to synchronize
*
- * Write the mapped, mst protected (extent) mft record @m described by the
- * (regular or extent) ntfs inode @ni to the mft mirror ($MFTMirr) bypassing
- * the page cache and the $MFTMirr inode itself.
+ * Write the mapped, mst protected (extent) mft record @m with mft record
+ * number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol,
+ * bypassing the page cache and the $MFTMirr inode itself.
*
* This function is only for use at umount time when the mft mirror inode has
* already been disposed off. We BUG() if we are called while the mft mirror
@@ -521,10 +419,9 @@ static const char *ntfs_please_email = "Please email "
* alternative would be either to BUG() or to get a NULL pointer dereference
* and Oops.
*/
-static int sync_mft_mirror_umount(ntfs_inode *ni, MFT_RECORD *m)
+static int ntfs_sync_mft_mirror_umount(ntfs_volume *vol,
+ const unsigned long mft_no, MFT_RECORD *m)
{
- ntfs_volume *vol = ni->vol;
-
BUG_ON(vol->mftmirr_ino);
ntfs_error(vol->sb, "Umount time mft mirror syncing is not "
"implemented yet. %s", ntfs_please_email);
@@ -532,25 +429,26 @@ static int sync_mft_mirror_umount(ntfs_inode *ni, MFT_RECORD *m)
}
/**
- * sync_mft_mirror - synchronize an mft record to the mft mirror
- * @ni: ntfs inode whose mft record to synchronize
+ * ntfs_sync_mft_mirror - synchronize an mft record to the mft mirror
+ * @vol: ntfs volume on which the mft record to synchronize resides
+ * @mft_no: mft record number of mft record to synchronize
* @m: mapped, mst protected (extent) mft record to synchronize
* @sync: if true, wait for i/o completion
*
- * Write the mapped, mst protected (extent) mft record @m described by the
- * (regular or extent) ntfs inode @ni to the mft mirror ($MFTMirr).
+ * Write the mapped, mst protected (extent) mft record @m with mft record
+ * number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol.
*
* On success return 0. On error return -errno and set the volume errors flag
- * in the ntfs_volume to which @ni belongs.
+ * in the ntfs volume @vol.
*
* NOTE: We always perform synchronous i/o and ignore the @sync parameter.
*
* TODO: If @sync is false, want to do truly asynchronous i/o, i.e. just
* schedule i/o via ->writepage or do it via kntfsd or whatever.
*/
-static int sync_mft_mirror(ntfs_inode *ni, MFT_RECORD *m, int sync)
+int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
+ MFT_RECORD *m, int sync)
{
- ntfs_volume *vol = ni->vol;
struct page *page;
unsigned int blocksize = vol->sb->s_blocksize;
int max_bhs = vol->mft_record_size / blocksize;
@@ -560,17 +458,17 @@ static int sync_mft_mirror(ntfs_inode *ni, MFT_RECORD *m, int sync)
unsigned int block_start, block_end, m_start, m_end;
int i_bhs, nr_bhs, err = 0;
- ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
+ ntfs_debug("Entering for inode 0x%lx.", mft_no);
BUG_ON(!max_bhs);
if (unlikely(!vol->mftmirr_ino)) {
/* This could happen during umount... */
- err = sync_mft_mirror_umount(ni, m);
+ err = ntfs_sync_mft_mirror_umount(vol, mft_no, m);
if (likely(!err))
return err;
goto err_out;
}
/* Get the page containing the mirror copy of the mft record @m. */
- page = ntfs_map_page(vol->mftmirr_ino->i_mapping, ni->mft_no >>
+ page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
(PAGE_CACHE_SHIFT - vol->mft_record_size_bits));
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to map mft mirror page.");
@@ -584,59 +482,28 @@ static int sync_mft_mirror(ntfs_inode *ni, MFT_RECORD *m, int sync)
* make sure no one is writing from elsewhere.
*/
lock_page(page);
+ BUG_ON(!PageUptodate(page));
+ ClearPageUptodate(page);
/* The address in the page of the mirror copy of the mft record @m. */
- kmirr = page_address(page) + ((ni->mft_no << vol->mft_record_size_bits)
- & ~PAGE_CACHE_MASK);
+ kmirr = page_address(page) + ((mft_no << vol->mft_record_size_bits) &
+ ~PAGE_CACHE_MASK);
/* Copy the mst protected mft record to the mirror. */
memcpy(kmirr, m, vol->mft_record_size);
/* Make sure we have mapped buffers. */
- if (!page_has_buffers(page)) {
-no_buffers_err_out:
- ntfs_error(vol->sb, "Writing mft mirror records without "
- "existing buffers is not implemented yet. %s",
- ntfs_please_email);
- err = -EOPNOTSUPP;
- goto unlock_err_out;
- }
+ BUG_ON(!page_has_buffers(page));
bh = head = page_buffers(page);
- if (!bh)
- goto no_buffers_err_out;
+ BUG_ON(!bh);
nr_bhs = 0;
block_start = 0;
m_start = kmirr - (u8*)page_address(page);
m_end = m_start + vol->mft_record_size;
do {
block_end = block_start + blocksize;
- /*
- * If the buffer is outside the mft record, just skip it,
- * clearing it if it is dirty to make sure it is not written
- * out. It should never be marked dirty but better be safe.
- */
- if ((block_end <= m_start) || (block_start >= m_end)) {
- if (buffer_dirty(bh)) {
- ntfs_warning(vol->sb, "Clearing dirty mft "
- "record page buffer. %s",
- ntfs_please_email);
- clear_buffer_dirty(bh);
- }
- continue;
- }
- if (!buffer_mapped(bh)) {
- ntfs_error(vol->sb, "Writing mft mirror records "
- "without existing mapped buffers is "
- "not implemented yet. %s",
- ntfs_please_email);
- err = -EOPNOTSUPP;
- continue;
- }
- if (!buffer_uptodate(bh)) {
- ntfs_error(vol->sb, "Writing mft mirror records "
- "without existing uptodate buffers is "
- "not implemented yet. %s",
- ntfs_please_email);
- err = -EOPNOTSUPP;
+ /* If the buffer is outside the mft record, skip it. */
+ if ((block_end <= m_start) || (block_start >= m_end))
continue;
- }
+ BUG_ON(!buffer_mapped(bh));
+ BUG_ON(!buffer_uptodate(bh));
BUG_ON(!nr_bhs && (m_start != block_start));
BUG_ON(nr_bhs >= max_bhs);
bhs[nr_bhs++] = bh;
@@ -664,11 +531,10 @@ no_buffers_err_out:
if (unlikely(!buffer_uptodate(tbh))) {
err = -EIO;
/*
- * Set the buffer uptodate so the page & buffer
- * states don't become out of sync.
+ * Set the buffer uptodate so the page and
+ * buffer states do not become out of sync.
*/
- if (PageUptodate(page))
- set_buffer_uptodate(tbh);
+ set_buffer_uptodate(tbh);
}
}
} else /* if (unlikely(err)) */ {
@@ -676,29 +542,25 @@ no_buffers_err_out:
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)
clear_buffer_dirty(bhs[i_bhs]);
}
-unlock_err_out:
/* Current state: all buffers are clean, unlocked, and uptodate. */
/* Remove the mst protection fixups again. */
post_write_mst_fixup((NTFS_RECORD*)kmirr);
flush_dcache_page(page);
+ SetPageUptodate(page);
unlock_page(page);
ntfs_unmap_page(page);
- if (unlikely(err)) {
- /* I/O error during writing. This is really bad! */
+ if (likely(!err)) {
+ ntfs_debug("Done.");
+ } else {
ntfs_error(vol->sb, "I/O error while writing mft mirror "
- "record 0x%lx! You should unmount the volume "
- "and run chkdsk or ntfsfix.", ni->mft_no);
- goto err_out;
- }
- ntfs_debug("Done.");
- return 0;
+ "record 0x%lx!", mft_no);
err_out:
- ntfs_error(vol->sb, "Failed to synchronize $MFTMirr (error code %i). "
- "Volume will be left marked dirty on umount. Run "
- "ntfsfix on the partition after umounting to correct "
- "this.", -err);
- /* We don't want to clear the dirty bit on umount. */
- NVolSetErrors(vol);
+ ntfs_error(vol->sb, "Failed to synchronize $MFTMirr (error "
+ "code %i). Volume will be left marked dirty "
+ "on umount. Run ntfsfix on the partition "
+ "after umounting to correct this.", -err);
+ NVolSetErrors(vol);
+ }
return err;
}
@@ -712,6 +574,11 @@ err_out:
* ntfs inode @ni to backing store. If the mft record @m has a counterpart in
* the mft mirror, that is also updated.
*
+ * We only write the mft record if the ntfs inode @ni is dirty and the first
+ * buffer belonging to its mft record is dirty, too. We ignore the dirty state
+ * of subsequent buffers because we could have raced with
+ * fs/ntfs/aops.c::mark_ntfs_record_dirty().
+ *
* On success, clean the mft record and return 0. On error, leave the mft
* record dirty and return -errno. The caller should call make_bad_inode() on
* the base inode to ensure no more access happens to this inode. We do not do
@@ -741,6 +608,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
struct buffer_head *bh, *head;
unsigned int block_start, block_end, m_start, m_end;
int i_bhs, nr_bhs, err = 0;
+ BOOL rec_is_dirty = TRUE;
ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
BUG_ON(NInoAttr(ni));
@@ -754,59 +622,46 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
*/
if (!NInoTestClearDirty(ni))
goto done;
- /* Make sure we have mapped buffers. */
- if (!page_has_buffers(page)) {
-no_buffers_err_out:
- ntfs_error(vol->sb, "Writing mft records without existing "
- "buffers is not implemented yet. %s",
- ntfs_please_email);
- err = -EOPNOTSUPP;
- goto err_out;
- }
+ BUG_ON(!page_has_buffers(page));
bh = head = page_buffers(page);
- if (!bh)
- goto no_buffers_err_out;
+ BUG_ON(!bh);
nr_bhs = 0;
block_start = 0;
m_start = ni->page_ofs;
m_end = m_start + vol->mft_record_size;
do {
block_end = block_start + blocksize;
- /*
- * If the buffer is outside the mft record, just skip it,
- * clearing it if it is dirty to make sure it is not written
- * out. It should never be marked dirty but better be safe.
- */
- if ((block_end <= m_start) || (block_start >= m_end)) {
- if (buffer_dirty(bh)) {
- ntfs_warning(vol->sb, "Clearing dirty mft "
- "record page buffer. %s",
- ntfs_please_email);
- clear_buffer_dirty(bh);
- }
- continue;
- }
- if (!buffer_mapped(bh)) {
- ntfs_error(vol->sb, "Writing mft records without "
- "existing mapped buffers is not "
- "implemented yet. %s",
- ntfs_please_email);
- err = -EOPNOTSUPP;
- continue;
- }
- if (!buffer_uptodate(bh)) {
- ntfs_error(vol->sb, "Writing mft records without "
- "existing uptodate buffers is not "
- "implemented yet. %s",
- ntfs_please_email);
- err = -EOPNOTSUPP;
+ /* If the buffer is outside the mft record, skip it. */
+ if (block_end <= m_start)
continue;
+ if (unlikely(block_start >= m_end))
+ break;
+ if (block_start == m_start) {
+ /* This block is the first one in the record. */
+ if (!buffer_dirty(bh)) {
+ /* Clean records are not written out. */
+ rec_is_dirty = FALSE;
+ continue;
+ }
+ rec_is_dirty = TRUE;
+ } else {
+ /*
+ * This block is not the first one in the record. We
+ * ignore the buffer's dirty state because we could
+ * have raced with a parallel mark_ntfs_record_dirty().
+ */
+ if (!rec_is_dirty)
+ continue;
}
+ BUG_ON(!buffer_mapped(bh));
+ BUG_ON(!buffer_uptodate(bh));
BUG_ON(!nr_bhs && (m_start != block_start));
BUG_ON(nr_bhs >= max_bhs);
bhs[nr_bhs++] = bh;
BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
} while (block_start = block_end, (bh = bh->b_this_page) != head);
+ if (!rec_is_dirty)
+ goto done;
if (unlikely(err))
goto cleanup_out;
/* Apply the mst protection fixups. */
@@ -823,15 +678,14 @@ no_buffers_err_out:
if (unlikely(test_set_buffer_locked(tbh)))
BUG();
BUG_ON(!buffer_uptodate(tbh));
- if (buffer_dirty(tbh))
- clear_buffer_dirty(tbh);
+ clear_buffer_dirty(tbh);
get_bh(tbh);
tbh->b_end_io = end_buffer_write_sync;
submit_bh(WRITE, tbh);
}
/* Synchronize the mft mirror now if not @sync. */
if (!sync && ni->mft_no < vol->mftmirr_size)
- sync_mft_mirror(ni, m, sync);
+ ntfs_sync_mft_mirror(vol, ni->mft_no, m, sync);
/* Wait on i/o completion of buffers. */
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
struct buffer_head *tbh = bhs[i_bhs];
@@ -840,8 +694,8 @@ no_buffers_err_out:
if (unlikely(!buffer_uptodate(tbh))) {
err = -EIO;
/*
- * Set the buffer uptodate so the page & buffer states
- * don't become out of sync.
+ * Set the buffer uptodate so the page and buffer
+ * states do not become out of sync.
*/
if (PageUptodate(page))
set_buffer_uptodate(tbh);
@@ -849,7 +703,7 @@ no_buffers_err_out:
}
/* If @sync, now synchronize the mft mirror. */
if (sync && ni->mft_no < vol->mftmirr_size)
- sync_mft_mirror(ni, m, sync);
+ ntfs_sync_mft_mirror(vol, ni->mft_no, m, sync);
/* Remove the mst protection fixups again. */
post_write_mst_fixup((NTFS_RECORD*)m);
flush_dcache_mft_record_page(ni);
@@ -885,214 +739,1987 @@ err_out:
}
/**
- * ntfs_mft_writepage - check if a metadata page contains dirty mft records
- * @page: metadata page possibly containing dirty mft records
- * @wbc: writeback control structure
- *
- * This is called from the VM when it wants to have a dirty $MFT/$DATA metadata
- * page cache page cleaned. The VM has already locked the page and marked it
- * clean. Instead of writing the page as a conventional ->writepage function
- * would do, we check if the page still contains any dirty mft records (it must
- * have done at some point in the past since the page was marked dirty) and if
- * none are found, i.e. all mft records are clean, we unlock the page and
- * return. The VM is then free to do with the page as it pleases. If on the
- * other hand we do find any dirty mft records in the page, we redirty the page
- * before unlocking it and returning so the VM knows that the page is still
- * busy and cannot be thrown out.
- *
- * Note, we do not actually write any dirty mft records here because they are
- * dirty inodes and hence will be written by the VFS inode dirty code paths.
- * There is no need to write them from the VM page dirty code paths, too and in
- * fact once we implement journalling it would be a complete nightmare having
- * two code paths leading to mft record writeout.
+ * ntfs_may_write_mft_record - check if an mft record may be written out
+ * @vol: [IN] ntfs volume on which the mft record to check resides
+ * @mft_no: [IN] mft record number of the mft record to check
+ * @m: [IN] mapped mft record to check
+ * @locked_ni: [OUT] caller has to unlock this ntfs inode if one is returned
+ *
+ * Check if the mapped (base or extent) mft record @m with mft record number
+ * @mft_no belonging to the ntfs volume @vol may be written out. If necessary
+ * and possible the ntfs inode of the mft record is locked and the base vfs
+ * inode is pinned. The locked ntfs inode is then returned in @locked_ni. The
+ * caller is responsible for unlocking the ntfs inode and unpinning the base
+ * vfs inode.
+ *
+ * Return TRUE if the mft record may be written out and FALSE if not.
+ *
+ * The caller has locked the page and cleared the uptodate flag on it which
+ * means that we can safely write out any dirty mft records that do not have
+ * their inodes in icache as determined by ilookup5() as anyone
+ * opening/creating such an inode would block when attempting to map the mft
+ * record in read_cache_page() until we are finished with the write out.
+ *
+ * Here is a description of the tests we perform:
+ *
+ * If the inode is found in icache we know the mft record must be a base mft
+ * record. If it is dirty, we do not write it and return FALSE as the vfs
+ * inode write paths will result in the access times being updated which would
+ * cause the base mft record to be redirtied and written out again. (We know
+ * the access time update will modify the base mft record because Windows
+ * chkdsk complains if the standard information attribute is not in the base
+ * mft record.)
+ *
+ * If the inode is in icache and not dirty, we attempt to lock the mft record
+ * and if we find the lock was already taken, it is not safe to write the mft
+ * record and we return FALSE.
+ *
+ * If we manage to obtain the lock we have exclusive access to the mft record,
+ * which also allows us safe writeout of the mft record. We then set
+ * @locked_ni to the locked ntfs inode and return TRUE.
+ *
+ * Note we cannot just lock the mft record and sleep while waiting for the lock
+ * because this would deadlock due to lock reversal (normally the mft record is
+ * locked before the page is locked but we already have the page locked here
+ * when we try to lock the mft record).
+ *
+ * If the inode is not in icache we need to perform further checks.
+ *
+ * If the mft record is not a FILE record or it is a base mft record, we can
+ * safely write it and return TRUE.
+ *
+ * We now know the mft record is an extent mft record. We check if the inode
+ * corresponding to its base mft record is in icache and obtain a reference to
+ * it if it is. If it is not, we can safely write it and return TRUE.
+ *
+ * We now have the base inode for the extent mft record. We check if it has an
+ * ntfs inode for the extent mft record attached and if not it is safe to write
+ * the extent mft record and we return TRUE.
+ *
+ * The ntfs inode for the extent mft record is attached to the base inode so we
+ * attempt to lock the extent mft record and if we find the lock was already
+ * taken, it is not safe to write the extent mft record and we return FALSE.
+ *
+ * If we manage to obtain the lock we have exclusive access to the extent mft
+ * record, which also allows us safe writeout of the extent mft record. We
+ * set the ntfs inode of the extent mft record clean and then set @locked_ni to
+ * the now locked ntfs inode and return TRUE.
+ *
+ * Note, the reason for actually writing dirty mft records here and not just
+ * relying on the vfs inode dirty code paths is that we can have mft records
+ * modified without them ever having actual inodes in memory. Also we can have
+ * dirty mft records with clean ntfs inodes in memory. None of the described
+ * cases would result in the dirty mft records being written out if we only
+ * relied on the vfs inode dirty code paths. And these cases can really occur
+ * during allocation of new mft records and in particular when the
+ * initialized_size of the $MFT/$DATA attribute is extended and the new space
+ * is initialized using ntfs_mft_record_format(). The clean inode can then
+ * appear if the mft record is reused for a new inode before it got written
+ * out.
*/
-static int ntfs_mft_writepage(struct page *page, struct writeback_control *wbc)
+BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
+ const MFT_RECORD *m, ntfs_inode **locked_ni)
{
- struct inode *mft_vi = page->mapping->host;
- struct super_block *sb = mft_vi->i_sb;
- ntfs_volume *vol = NTFS_SB(sb);
- u8 *maddr;
- MFT_RECORD *m;
- ntfs_inode **extent_nis;
- unsigned long mft_no;
- int nr, i, j;
- BOOL is_dirty = FALSE;
+ struct super_block *sb = vol->sb;
+ struct inode *mft_vi = vol->mft_ino;
+ struct inode *vi;
+ ntfs_inode *ni, *eni, **extent_nis;
+ int i;
+ ntfs_attr na;
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
- BUG_ON(mft_vi != vol->mft_ino);
- /* The first mft record number in the page. */
- mft_no = page->index << (PAGE_CACHE_SHIFT - vol->mft_record_size_bits);
- /* Number of mft records in the page. */
- nr = PAGE_CACHE_SIZE >> vol->mft_record_size_bits;
- BUG_ON(!nr);
- ntfs_debug("Entering for %i inodes starting at 0x%lx.", nr, mft_no);
- /* Iterate over the mft records in the page looking for a dirty one. */
- maddr = (u8*)kmap(page);
- for (i = 0; i < nr; ++i, ++mft_no, maddr += vol->mft_record_size) {
- struct inode *vi;
- ntfs_inode *ni, *eni;
- ntfs_attr na;
-
- na.mft_no = mft_no;
- na.name = NULL;
- na.name_len = 0;
- na.type = AT_UNUSED;
- /*
- * Check if the inode corresponding to this mft record is in
- * the VFS inode cache and obtain a reference to it if it is.
- */
- ntfs_debug("Looking for inode 0x%lx in icache.", mft_no);
- /*
- * For inode 0, i.e. $MFT itself, we cannot use ilookup5() from
- * here or we deadlock because the inode is already locked by
- * the kernel (fs/fs-writeback.c::__sync_single_inode()) and
- * ilookup5() waits until the inode is unlocked before
- * returning it and it never gets unlocked because
- * ntfs_mft_writepage() never returns. )-: Fortunately, we
- * have inode 0 pinned in icache for the duration of the mount
- * so we can access it directly.
- */
- if (!mft_no) {
- /* Balance the below iput(). */
- vi = igrab(mft_vi);
- BUG_ON(vi != mft_vi);
- } else
- vi = ilookup5(sb, mft_no, (test_t)ntfs_test_inode, &na);
- if (vi) {
- ntfs_debug("Inode 0x%lx is in icache.", mft_no);
- /* The inode is in icache. Check if it is dirty. */
- ni = NTFS_I(vi);
- if (!NInoDirty(ni)) {
- /* The inode is not dirty, skip this record. */
- ntfs_debug("Inode 0x%lx is not dirty, "
- "continuing search.", mft_no);
- iput(vi);
- continue;
- }
- ntfs_debug("Inode 0x%lx is dirty, aborting search.",
+ ntfs_debug("Entering for inode 0x%lx.", mft_no);
+ /*
+ * Normally we do not return a locked inode so set @locked_ni to NULL.
+ */
+ BUG_ON(!locked_ni);
+ *locked_ni = NULL;
+ /*
+ * Check if the inode corresponding to this mft record is in the VFS
+ * inode cache and obtain a reference to it if it is.
+ */
+ ntfs_debug("Looking for inode 0x%lx in icache.", mft_no);
+ na.mft_no = mft_no;
+ na.name = NULL;
+ na.name_len = 0;
+ na.type = AT_UNUSED;
+ /*
+ * For inode 0, i.e. $MFT itself, we cannot use ilookup5() from here or
+ * we deadlock because the inode is already locked by the kernel
+ * (fs/fs-writeback.c::__sync_single_inode()) and ilookup5() waits
+ * until the inode is unlocked before returning it and it never gets
+ * unlocked because ntfs_should_write_mft_record() never returns. )-:
+ * Fortunately, we have inode 0 pinned in icache for the duration of
+ * the mount so we can access it directly.
+ */
+ if (!mft_no) {
+ /* Balance the below iput(). */
+ vi = igrab(mft_vi);
+ BUG_ON(vi != mft_vi);
+ } else
+ vi = ilookup5(sb, mft_no, (test_t)ntfs_test_inode, &na);
+ if (vi) {
+ ntfs_debug("Base inode 0x%lx is in icache.", mft_no);
+ /* The inode is in icache. */
+ ni = NTFS_I(vi);
+ /* Take a reference to the ntfs inode. */
+ atomic_inc(&ni->count);
+ /* If the inode is dirty, do not write this record. */
+ if (NInoDirty(ni)) {
+ ntfs_debug("Inode 0x%lx is dirty, do not write it.",
mft_no);
- /* The inode is dirty, no need to search further. */
+ atomic_dec(&ni->count);
iput(vi);
- is_dirty = TRUE;
- break;
+ return FALSE;
}
- ntfs_debug("Inode 0x%lx is not in icache.", mft_no);
- /* The inode is not in icache. */
- /* Skip the record if it is not a mft record (type "FILE"). */
- if (!ntfs_is_mft_recordp((le32*)maddr)) {
- ntfs_debug("Mft record 0x%lx is not a FILE record, "
- "continuing search.", mft_no);
- continue;
+ ntfs_debug("Inode 0x%lx is not dirty.", mft_no);
+ /* The inode is not dirty, try to take the mft record lock. */
+ if (unlikely(down_trylock(&ni->mrec_lock))) {
+ ntfs_debug("Mft record 0x%lx is already locked, do "
+ "not write it.", mft_no);
+ atomic_dec(&ni->count);
+ iput(vi);
+ return FALSE;
}
- m = (MFT_RECORD*)maddr;
+ ntfs_debug("Managed to lock mft record 0x%lx, write it.",
+ mft_no);
/*
- * Skip the mft record if it is not in use. FIXME: What about
- * deleted/deallocated (extent) inodes? (AIA)
+ * The write has to occur while we hold the mft record lock so
+ * return the locked ntfs inode.
*/
- if (!(m->flags & MFT_RECORD_IN_USE)) {
- ntfs_debug("Mft record 0x%lx is not in use, "
- "continuing search.", mft_no);
- continue;
- }
- /* Skip the mft record if it is a base inode. */
- if (!m->base_mft_record) {
- ntfs_debug("Mft record 0x%lx is a base record, "
- "continuing search.", mft_no);
- continue;
- }
+ *locked_ni = ni;
+ return TRUE;
+ }
+ ntfs_debug("Inode 0x%lx is not in icache.", mft_no);
+ /* The inode is not in icache. */
+ /* Write the record if it is not a mft record (type "FILE"). */
+ if (!ntfs_is_mft_record(m->magic)) {
+ ntfs_debug("Mft record 0x%lx is not a FILE record, write it.",
+ mft_no);
+ return TRUE;
+ }
+ /* Write the mft record if it is a base inode. */
+ if (!m->base_mft_record) {
+ ntfs_debug("Mft record 0x%lx is a base record, write it.",
+ mft_no);
+ return TRUE;
+ }
+ /*
+ * This is an extent mft record. Check if the inode corresponding to
+ * its base mft record is in icache and obtain a reference to it if it
+ * is.
+ */
+ na.mft_no = MREF_LE(m->base_mft_record);
+ ntfs_debug("Mft record 0x%lx is an extent record. Looking for base "
+ "inode 0x%lx in icache.", mft_no, na.mft_no);
+ vi = ilookup5(sb, na.mft_no, (test_t)ntfs_test_inode, &na);
+ if (!vi) {
+ /*
+ * The base inode is not in icache, write this extent mft
+ * record.
+ */
+ ntfs_debug("Base inode 0x%lx is not in icache, write the "
+ "extent record.", na.mft_no);
+ return TRUE;
+ }
+ ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no);
+ /*
+ * The base inode is in icache. Check if it has the extent inode
+ * corresponding to this extent mft record attached.
+ */
+ ni = NTFS_I(vi);
+ down(&ni->extent_lock);
+ if (ni->nr_extents <= 0) {
/*
- * This is an extent mft record. Check if the inode
- * corresponding to its base mft record is in icache.
+ * The base inode has no attached extent inodes, write this
+ * extent mft record.
*/
- na.mft_no = MREF_LE(m->base_mft_record);
- ntfs_debug("Mft record 0x%lx is an extent record. Looking "
- "for base inode 0x%lx in icache.", mft_no,
- na.mft_no);
- vi = ilookup5(sb, na.mft_no, (test_t)ntfs_test_inode,
- &na);
- if (!vi) {
+ up(&ni->extent_lock);
+ iput(vi);
+ ntfs_debug("Base inode 0x%lx has no attached extent inodes, "
+ "write the extent record.", na.mft_no);
+ return TRUE;
+ }
+ /* Iterate over the attached extent inodes. */
+ extent_nis = ni->ext.extent_ntfs_inos;
+ for (eni = NULL, i = 0; i < ni->nr_extents; ++i) {
+ if (mft_no == extent_nis[i]->mft_no) {
/*
- * The base inode is not in icache. Skip this extent
+ * Found the extent inode corresponding to this extent
* mft record.
*/
- ntfs_debug("Base inode 0x%lx is not in icache, "
- "continuing search.", na.mft_no);
- continue;
+ eni = extent_nis[i];
+ break;
}
- ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no);
+ }
+ /*
+ * If the extent inode was not attached to the base inode, write this
+ * extent mft record.
+ */
+ if (!eni) {
+ up(&ni->extent_lock);
+ iput(vi);
+ ntfs_debug("Extent inode 0x%lx is not attached to its base "
+ "inode 0x%lx, write the extent record.",
+ mft_no, na.mft_no);
+ return TRUE;
+ }
+ ntfs_debug("Extent inode 0x%lx is attached to its base inode 0x%lx.",
+ mft_no, na.mft_no);
+ /* Take a reference to the extent ntfs inode. */
+ atomic_inc(&eni->count);
+ up(&ni->extent_lock);
+ /*
+ * Found the extent inode coresponding to this extent mft record.
+ * Try to take the mft record lock.
+ */
+ if (unlikely(down_trylock(&eni->mrec_lock))) {
+ atomic_dec(&eni->count);
+ iput(vi);
+ ntfs_debug("Extent mft record 0x%lx is already locked, do "
+ "not write it.", mft_no);
+ return FALSE;
+ }
+ ntfs_debug("Managed to lock extent mft record 0x%lx, write it.",
+ mft_no);
+ if (NInoTestClearDirty(eni))
+ ntfs_debug("Extent inode 0x%lx is dirty, marking it clean.",
+ mft_no);
+ /*
+ * The write has to occur while we hold the mft record lock so return
+ * the locked extent ntfs inode.
+ */
+ *locked_ni = eni;
+ return TRUE;
+}
+
+static const char *es = " Leaving inconsistent metadata. Unmount and run "
+ "chkdsk.";
+
+/**
+ * ntfs_mft_bitmap_find_and_alloc_free_rec_nolock - see name
+ * @vol: volume on which to search for a free mft record
+ * @base_ni: open base inode if allocating an extent mft record or NULL
+ *
+ * Search for a free mft record in the mft bitmap attribute on the ntfs volume
+ * @vol.
+ *
+ * If @base_ni is NULL start the search at the default allocator position.
+ *
+ * If @base_ni is not NULL start the search at the mft record after the base
+ * mft record @base_ni.
+ *
+ * Return the free mft record on success and -errno on error. An error code of
+ * -ENOSPC means that there are no free mft records in the currently
+ * initialized mft bitmap.
+ *
+ * Locking: Caller must hold vol->mftbmp_lock for writing.
+ */
+static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
+ ntfs_inode *base_ni)
+{
+ s64 pass_end, ll, data_pos, pass_start, ofs, bit;
+ struct address_space *mftbmp_mapping;
+ u8 *buf, *byte;
+ struct page *page;
+ unsigned int page_ofs, size;
+ u8 pass, b;
+
+ ntfs_debug("Searching for free mft record in the currently "
+ "initialized mft bitmap.");
+ mftbmp_mapping = vol->mftbmp_ino->i_mapping;
+ /*
+ * Set the end of the pass making sure we do not overflow the mft
+ * bitmap.
+ */
+ pass_end = NTFS_I(vol->mft_ino)->allocated_size >>
+ vol->mft_record_size_bits;
+ ll = NTFS_I(vol->mftbmp_ino)->initialized_size << 3;
+ if (pass_end > ll)
+ pass_end = ll;
+ pass = 1;
+ if (!base_ni)
+ data_pos = vol->mft_data_pos;
+ else
+ data_pos = base_ni->mft_no + 1;
+ if (data_pos < 24)
+ data_pos = 24;
+ if (data_pos >= pass_end) {
+ data_pos = 24;
+ pass = 2;
+ /* This happens on a freshly formatted volume. */
+ if (data_pos >= pass_end)
+ return -ENOSPC;
+ }
+ pass_start = data_pos;
+ ntfs_debug("Starting bitmap search: pass %u, pass_start 0x%llx, "
+ "pass_end 0x%llx, data_pos 0x%llx.", pass,
+ (long long)pass_start, (long long)pass_end,
+ (long long)data_pos);
+ /* Loop until a free mft record is found. */
+ for (; pass <= 2;) {
+ /* Cap size to pass_end. */
+ ofs = data_pos >> 3;
+ page_ofs = ofs & ~PAGE_CACHE_MASK;
+ size = PAGE_CACHE_SIZE - page_ofs;
+ ll = ((pass_end + 7) >> 3) - ofs;
+ if (size > ll)
+ size = ll;
+ size <<= 3;
/*
- * The base inode is in icache. Check if it has the extent
- * inode corresponding to this extent mft record attached.
+ * If we are still within the active pass, search the next page
+ * for a zero bit.
*/
- ni = NTFS_I(vi);
- down(&ni->extent_lock);
- if (ni->nr_extents <= 0) {
+ if (size) {
+ page = ntfs_map_page(mftbmp_mapping,
+ ofs >> PAGE_CACHE_SHIFT);
+ if (unlikely(IS_ERR(page))) {
+ ntfs_error(vol->sb, "Failed to read mft "
+ "bitmap, aborting.");
+ return PTR_ERR(page);
+ }
+ buf = (u8*)page_address(page) + page_ofs;
+ bit = data_pos & 7;
+ data_pos &= ~7ull;
+ ntfs_debug("Before inner for loop: size 0x%x, "
+ "data_pos 0x%llx, bit 0x%llx", size,
+ (long long)data_pos, (long long)bit);
+ for (; bit < size && data_pos + bit < pass_end;
+ bit &= ~7ull, bit += 8) {
+ byte = buf + (bit >> 3);
+ if (*byte == 0xff)
+ continue;
+ b = ffz((unsigned long)*byte);
+ if (b < 8 && b >= (bit & 7)) {
+ ll = data_pos + (bit & ~7ull) + b;
+ if (unlikely(ll > (1ll << 32))) {
+ ntfs_unmap_page(page);
+ return -ENOSPC;
+ }
+ *byte |= 1 << b;
+ flush_dcache_page(page);
+ set_page_dirty(page);
+ ntfs_unmap_page(page);
+ ntfs_debug("Done. (Found and "
+ "allocated mft record "
+ "0x%llx.)",
+ (long long)ll);
+ return ll;
+ }
+ }
+ ntfs_debug("After inner for loop: size 0x%x, "
+ "data_pos 0x%llx, bit 0x%llx", size,
+ (long long)data_pos, (long long)bit);
+ data_pos += size;
+ ntfs_unmap_page(page);
/*
- * The base inode has no attached extent inodes. Skip
- * this extent mft record.
+ * If the end of the pass has not been reached yet,
+ * continue searching the mft bitmap for a zero bit.
*/
- up(&ni->extent_lock);
- iput(vi);
- continue;
+ if (data_pos < pass_end)
+ continue;
}
- /* Iterate over the attached extent inodes. */
- extent_nis = ni->ext.extent_ntfs_inos;
- for (eni = NULL, j = 0; j < ni->nr_extents; ++j) {
- if (mft_no == extent_nis[j]->mft_no) {
- /*
- * Found the extent inode corresponding to this
- * extent mft record.
- */
- eni = extent_nis[j];
+ /* Do the next pass. */
+ if (++pass == 2) {
+ /*
+ * Starting the second pass, in which we scan the first
+ * part of the zone which we omitted earlier.
+ */
+ pass_end = pass_start;
+ data_pos = pass_start = 24;
+ ntfs_debug("pass %i, pass_start 0x%llx, pass_end "
+ "0x%llx.", pass, (long long)pass_start,
+ (long long)pass_end);
+ if (data_pos >= pass_end)
break;
+ }
+ }
+ /* No free mft records in currently initialized mft bitmap. */
+ ntfs_debug("Done. (No free mft records left in currently initialized "
+ "mft bitmap.)");
+ return -ENOSPC;
+}
+
+/**
+ * ntfs_mft_bitmap_extend_allocation_nolock - extend mft bitmap by a cluster
+ * @vol: volume on which to extend the mft bitmap attribute
+ *
+ * Extend the mft bitmap attribute on the ntfs volume @vol by one cluster.
+ *
+ * Note: Only changes allocated_size, i.e. does not touch initialized_size or
+ * data_size.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: - Caller must hold vol->mftbmp_lock for writing.
+ * - This function takes NTFS_I(vol->mftbmp_ino)->runlist.lock for
+ * writing and releases it before returning.
+ * - This function takes vol->lcnbmp_lock for writing and releases it
+ * before returning.
+ */
+static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
+{
+ LCN lcn;
+ s64 ll;
+ struct page *page;
+ ntfs_inode *mft_ni, *mftbmp_ni;
+ runlist_element *rl, *rl2 = NULL;
+ ntfs_attr_search_ctx *ctx = NULL;
+ MFT_RECORD *mrec;
+ ATTR_RECORD *a = NULL;
+ int ret, mp_size;
+ u32 old_alen = 0;
+ u8 *b, tb;
+ struct {
+ u8 added_cluster:1;
+ u8 added_run:1;
+ u8 mp_rebuilt:1;
+ } status = { 0, 0, 0 };
+
+ ntfs_debug("Extending mft bitmap allocation.");
+ mft_ni = NTFS_I(vol->mft_ino);
+ mftbmp_ni = NTFS_I(vol->mftbmp_ino);
+ /*
+ * Determine the last lcn of the mft bitmap. The allocated size of the
+ * mft bitmap cannot be zero so we are ok to do this.
+ * ntfs_find_vcn() returns the runlist locked on success.
+ */
+ rl = ntfs_find_vcn(mftbmp_ni, (mftbmp_ni->allocated_size - 1) >>
+ vol->cluster_size_bits, TRUE);
+ if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+ ntfs_error(vol->sb, "Failed to determine last allocated "
+ "cluster of mft bitmap attribute.");
+ if (!IS_ERR(rl)) {
+ up_write(&mftbmp_ni->runlist.lock);
+ ret = -EIO;
+ } else
+ ret = PTR_ERR(rl);
+ return ret;
+ }
+ lcn = rl->lcn + rl->length;
+ ntfs_debug("Last lcn of mft bitmap attribute is 0x%llx.",
+ (long long)lcn);
+ /*
+ * Attempt to get the cluster following the last allocated cluster by
+ * hand as it may be in the MFT zone so the allocator would not give it
+ * to us.
+ */
+ ll = lcn >> 3;
+ page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
+ ll >> PAGE_CACHE_SHIFT);
+ if (IS_ERR(page)) {
+ up_write(&mftbmp_ni->runlist.lock);
+ ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
+ return PTR_ERR(page);
+ }
+ b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK);
+ tb = 1 << (lcn & 7ull);
+ down_write(&vol->lcnbmp_lock);
+ if (*b != 0xff && !(*b & tb)) {
+ /* Next cluster is free, allocate it. */
+ *b |= tb;
+ flush_dcache_page(page);
+ set_page_dirty(page);
+ up_write(&vol->lcnbmp_lock);
+ ntfs_unmap_page(page);
+ /* Update the mft bitmap runlist. */
+ rl->length++;
+ rl[1].vcn++;
+ status.added_cluster = 1;
+ ntfs_debug("Appending one cluster to mft bitmap.");
+ } else {
+ up_write(&vol->lcnbmp_lock);
+ ntfs_unmap_page(page);
+ /* Allocate a cluster from the DATA_ZONE. */
+ rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE);
+ if (IS_ERR(rl2)) {
+ up_write(&mftbmp_ni->runlist.lock);
+ ntfs_error(vol->sb, "Failed to allocate a cluster for "
+ "the mft bitmap.");
+ return PTR_ERR(rl2);
+ }
+ rl = ntfs_runlists_merge(mftbmp_ni->runlist.rl, rl2);
+ if (IS_ERR(rl)) {
+ up_write(&mftbmp_ni->runlist.lock);
+ ntfs_error(vol->sb, "Failed to merge runlists for mft "
+ "bitmap.");
+ if (ntfs_cluster_free_from_rl(vol, rl2)) {
+ ntfs_error(vol->sb, "Failed to dealocate "
+ "allocated cluster.%s", es);
+ NVolSetErrors(vol);
}
+ ntfs_free(rl2);
+ return PTR_ERR(rl);
}
+ mftbmp_ni->runlist.rl = rl;
+ status.added_run = 1;
+ ntfs_debug("Adding one run to mft bitmap.");
+ /* Find the last run in the new runlist. */
+ for (; rl[1].length; rl++)
+ ;
+ }
+ /*
+ * Update the attribute record as well. Note: @rl is the last
+ * (non-terminator) runlist element of mft bitmap.
+ */
+ mrec = map_mft_record(mft_ni);
+ if (IS_ERR(mrec)) {
+ ntfs_error(vol->sb, "Failed to map mft record.");
+ ret = PTR_ERR(mrec);
+ goto undo_alloc;
+ }
+ ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
+ if (unlikely(!ctx)) {
+ ntfs_error(vol->sb, "Failed to get search context.");
+ ret = -ENOMEM;
+ goto undo_alloc;
+ }
+ ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+ mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
+ 0, ctx);
+ if (unlikely(ret)) {
+ ntfs_error(vol->sb, "Failed to find last attribute extent of "
+ "mft bitmap attribute.");
+ if (ret == -ENOENT)
+ ret = -EIO;
+ goto undo_alloc;
+ }
+ a = ctx->attr;
+ ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
+ /* Search back for the previous last allocated cluster of mft bitmap. */
+ for (rl2 = rl; rl2 > mftbmp_ni->runlist.rl; rl2--) {
+ if (ll >= rl2->vcn)
+ break;
+ }
+ BUG_ON(ll < rl2->vcn);
+ BUG_ON(ll >= rl2->vcn + rl2->length);
+ /* Get the size for the new mapping pairs array for this extent. */
+ mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll);
+ if (unlikely(mp_size <= 0)) {
+ ntfs_error(vol->sb, "Get size for mapping pairs failed for "
+ "mft bitmap attribute extent.");
+ ret = mp_size;
+ if (!ret)
+ ret = -EIO;
+ goto undo_alloc;
+ }
+ /* Expand the attribute record if necessary. */
+ old_alen = le32_to_cpu(a->length);
+ ret = ntfs_attr_record_resize(ctx->mrec, a, mp_size +
+ le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
+ if (unlikely(ret)) {
+ if (ret != -ENOSPC) {
+ ntfs_error(vol->sb, "Failed to resize attribute "
+ "record for mft bitmap attribute.");
+ goto undo_alloc;
+ }
+ // TODO: Deal with this by moving this extent to a new mft
+ // record or by starting a new extent in a new mft record or by
+ // moving other attributes out of this mft record.
+ ntfs_error(vol->sb, "Not enough space in this mft record to "
+ "accomodate extended mft bitmap attribute "
+ "extent. Cannot handle this yet.");
+ ret = -EOPNOTSUPP;
+ goto undo_alloc;
+ }
+ status.mp_rebuilt = 1;
+ /* Generate the mapping pairs array directly into the attr record. */
+ ret = ntfs_mapping_pairs_build(vol, (u8*)a +
+ le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
+ mp_size, rl2, ll, NULL);
+ if (unlikely(ret)) {
+ ntfs_error(vol->sb, "Failed to build mapping pairs array for "
+ "mft bitmap attribute.");
+ goto undo_alloc;
+ }
+ /* Update the highest_vcn. */
+ a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
+ /*
+ * We now have extended the mft bitmap allocated_size by one cluster.
+ * Reflect this in the ntfs_inode structure and the attribute record.
+ */
+ if (a->data.non_resident.lowest_vcn) {
/*
- * If the extent inode was not attached to the base inode, skip
- * this extent mft record.
+ * We are not in the first attribute extent, switch to it, but
+ * first ensure the changes will make it to disk later.
*/
- if (!eni) {
- up(&ni->extent_lock);
- iput(vi);
- continue;
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
+ ntfs_attr_reinit_search_ctx(ctx);
+ ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+ mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL,
+ 0, ctx);
+ if (unlikely(ret)) {
+ ntfs_error(vol->sb, "Failed to find first attribute "
+ "extent of mft bitmap attribute.");
+ goto restore_undo_alloc;
}
+ a = ctx->attr;
+ }
+ mftbmp_ni->allocated_size += vol->cluster_size;
+ a->data.non_resident.allocated_size =
+ cpu_to_sle64(mftbmp_ni->allocated_size);
+ /* Ensure the changes make it to disk. */
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
+ ntfs_attr_put_search_ctx(ctx);
+ unmap_mft_record(mft_ni);
+ up_write(&mftbmp_ni->runlist.lock);
+ ntfs_debug("Done.");
+ return 0;
+restore_undo_alloc:
+ ntfs_attr_reinit_search_ctx(ctx);
+ if (ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+ mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
+ 0, ctx)) {
+ ntfs_error(vol->sb, "Failed to find last attribute extent of "
+ "mft bitmap attribute.%s", es);
+ mftbmp_ni->allocated_size += vol->cluster_size;
+ ntfs_attr_put_search_ctx(ctx);
+ unmap_mft_record(mft_ni);
+ up_write(&mftbmp_ni->runlist.lock);
/*
- * Found the extent inode corrsponding to this extent mft
- * record. If it is dirty, no need to search further.
+ * The only thing that is now wrong is ->allocated_size of the
+ * base attribute extent which chkdsk should be able to fix.
*/
- if (NInoDirty(eni)) {
- up(&ni->extent_lock);
- iput(vi);
- is_dirty = TRUE;
+ NVolSetErrors(vol);
+ return ret;
+ }
+ a = ctx->attr;
+ a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 2);
+undo_alloc:
+ if (status.added_cluster) {
+ /* Truncate the last run in the runlist by one cluster. */
+ rl->length--;
+ rl[1].vcn--;
+ } else if (status.added_run) {
+ lcn = rl->lcn;
+ /* Remove the last run from the runlist. */
+ rl->lcn = rl[1].lcn;
+ rl->length = 0;
+ }
+ /* Deallocate the cluster. */
+ down_write(&vol->lcnbmp_lock);
+ if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
+ ntfs_error(vol->sb, "Failed to free allocated cluster.%s", es);
+ NVolSetErrors(vol);
+ }
+ up_write(&vol->lcnbmp_lock);
+ if (status.mp_rebuilt) {
+ if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
+ a->data.non_resident.mapping_pairs_offset),
+ old_alen - le16_to_cpu(
+ a->data.non_resident.mapping_pairs_offset),
+ rl2, ll, NULL)) {
+ ntfs_error(vol->sb, "Failed to restore mapping pairs "
+ "array.%s", es);
+ NVolSetErrors(vol);
+ }
+ if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
+ ntfs_error(vol->sb, "Failed to restore attribute "
+ "record.%s", es);
+ NVolSetErrors(vol);
+ }
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
+ }
+ if (ctx)
+ ntfs_attr_put_search_ctx(ctx);
+ if (!IS_ERR(mrec))
+ unmap_mft_record(mft_ni);
+ up_write(&mftbmp_ni->runlist.lock);
+ return ret;
+}
+
+/**
+ * ntfs_mft_bitmap_extend_initialized_nolock - extend mftbmp initialized data
+ * @vol: volume on which to extend the mft bitmap attribute
+ *
+ * Extend the initialized portion of the mft bitmap attribute on the ntfs
+ * volume @vol by 8 bytes.
+ *
+ * Note: Only changes initialized_size and data_size, i.e. requires that
+ * allocated_size is big enough to fit the new initialized_size.
+ *
+ * Return 0 on success and -error on error.
+ *
+ * Locking: Caller must hold vol->mftbmp_lock for writing.
+ */
+static int ntfs_mft_bitmap_extend_initialized_nolock(ntfs_volume *vol)
+{
+ s64 old_data_size, old_initialized_size;
+ struct inode *mftbmp_vi;
+ ntfs_inode *mft_ni, *mftbmp_ni;
+ ntfs_attr_search_ctx *ctx;
+ MFT_RECORD *mrec;
+ ATTR_RECORD *a;
+ int ret;
+
+ ntfs_debug("Extending mft bitmap initiailized (and data) size.");
+ mft_ni = NTFS_I(vol->mft_ino);
+ mftbmp_vi = vol->mftbmp_ino;
+ mftbmp_ni = NTFS_I(mftbmp_vi);
+ /* Get the attribute record. */
+ mrec = map_mft_record(mft_ni);
+ if (IS_ERR(mrec)) {
+ ntfs_error(vol->sb, "Failed to map mft record.");
+ return PTR_ERR(mrec);
+ }
+ ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
+ if (unlikely(!ctx)) {
+ ntfs_error(vol->sb, "Failed to get search context.");
+ ret = -ENOMEM;
+ goto unm_err_out;
+ }
+ ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+ mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx);
+ if (unlikely(ret)) {
+ ntfs_error(vol->sb, "Failed to find first attribute extent of "
+ "mft bitmap attribute.");
+ if (ret == -ENOENT)
+ ret = -EIO;
+ goto put_err_out;
+ }
+ a = ctx->attr;
+ old_data_size = mftbmp_vi->i_size;
+ old_initialized_size = mftbmp_ni->initialized_size;
+ /*
+ * We can simply update the initialized_size before filling the space
+ * with zeroes because the caller is holding the mft bitmap lock for
+ * writing which ensures that no one else is trying to access the data.
+ */
+ mftbmp_ni->initialized_size += 8;
+ a->data.non_resident.initialized_size =
+ cpu_to_sle64(mftbmp_ni->initialized_size);
+ if (mftbmp_ni->initialized_size > mftbmp_vi->i_size) {
+ mftbmp_vi->i_size = mftbmp_ni->initialized_size;
+ a->data.non_resident.data_size =
+ cpu_to_sle64(mftbmp_vi->i_size);
+ }
+ /* Ensure the changes make it to disk. */
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
+ ntfs_attr_put_search_ctx(ctx);
+ unmap_mft_record(mft_ni);
+ /* Initialize the mft bitmap attribute value with zeroes. */
+ ret = ntfs_attr_set(mftbmp_ni, old_initialized_size, 8, 0);
+ if (likely(!ret)) {
+ ntfs_debug("Done. (Wrote eight initialized bytes to mft "
+ "bitmap.");
+ return 0;
+ }
+ ntfs_error(vol->sb, "Failed to write to mft bitmap.");
+ /* Try to recover from the error. */
+ mrec = map_mft_record(mft_ni);
+ if (IS_ERR(mrec)) {
+ ntfs_error(vol->sb, "Failed to map mft record.%s", es);
+ NVolSetErrors(vol);
+ return ret;
+ }
+ ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
+ if (unlikely(!ctx)) {
+ ntfs_error(vol->sb, "Failed to get search context.%s", es);
+ NVolSetErrors(vol);
+ goto unm_err_out;
+ }
+ if (ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
+ mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx)) {
+ ntfs_error(vol->sb, "Failed to find first attribute extent of "
+ "mft bitmap attribute.%s", es);
+ NVolSetErrors(vol);
+put_err_out:
+ ntfs_attr_put_search_ctx(ctx);
+unm_err_out:
+ unmap_mft_record(mft_ni);
+ goto err_out;
+ }
+ a = ctx->attr;
+ mftbmp_ni->initialized_size = old_initialized_size;
+ a->data.non_resident.initialized_size =
+ cpu_to_sle64(old_initialized_size);
+ if (mftbmp_vi->i_size != old_data_size) {
+ mftbmp_vi->i_size = old_data_size;
+ a->data.non_resident.data_size = cpu_to_sle64(old_data_size);
+ }
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
+ ntfs_attr_put_search_ctx(ctx);
+ unmap_mft_record(mft_ni);
+ ntfs_debug("Restored status of mftbmp: allocated_size 0x%llx, "
+ "data_size 0x%llx, initialized_size 0x%llx.",
+ (long long)mftbmp_ni->allocated_size,
+ (long long)mftbmp_vi->i_size,
+ (long long)mftbmp_ni->initialized_size);
+err_out:
+ return ret;
+}
+
+/**
+ * ntfs_mft_data_extend_allocation_nolock - extend mft data attribute
+ * @vol: volume on which to extend the mft data attribute
+ *
+ * Extend the mft data attribute on the ntfs volume @vol by 16 mft records
+ * worth of clusters or if not enough space for this by one mft record worth
+ * of clusters.
+ *
+ * Note: Only changes allocated_size, i.e. does not touch initialized_size or
+ * data_size.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: - Caller must hold vol->mftbmp_lock for writing.
+ * - This function takes NTFS_I(vol->mft_ino)->runlist.lock for
+ * writing and releases it before returning.
+ * - This function calls functions which take vol->lcnbmp_lock for
+ * writing and release it before returning.
+ */
+static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
+{
+ LCN lcn;
+ VCN old_last_vcn;
+ s64 min_nr, nr, ll = 0;
+ ntfs_inode *mft_ni;
+ runlist_element *rl, *rl2;
+ ntfs_attr_search_ctx *ctx = NULL;
+ MFT_RECORD *mrec;
+ ATTR_RECORD *a = NULL;
+ int ret, mp_size;
+ u32 old_alen = 0;
+ BOOL mp_rebuilt = FALSE;
+
+ ntfs_debug("Extending mft data allocation.");
+ mft_ni = NTFS_I(vol->mft_ino);
+ /*
+ * Determine the preferred allocation location, i.e. the last lcn of
+ * the mft data attribute. The allocated size of the mft data
+ * attribute cannot be zero so we are ok to do this.
+ * ntfs_find_vcn() returns the runlist locked on success.
+ */
+ rl = ntfs_find_vcn(mft_ni, (mft_ni->allocated_size - 1) >>
+ vol->cluster_size_bits, TRUE);
+ if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+ ntfs_error(vol->sb, "Failed to determine last allocated "
+ "cluster of mft data attribute.");
+ if (!IS_ERR(rl)) {
+ up_write(&mft_ni->runlist.lock);
+ ret = -EIO;
+ } else
+ ret = PTR_ERR(rl);
+ return ret;
+ }
+ lcn = rl->lcn + rl->length;
+ ntfs_debug("Last lcn of mft data attribute is 0x%llx.",
+ (long long)lcn);
+ /* Minimum allocation is one mft record worth of clusters. */
+ min_nr = vol->mft_record_size >> vol->cluster_size_bits;
+ if (!min_nr)
+ min_nr = 1;
+ /* Want to allocate 16 mft records worth of clusters. */
+ nr = vol->mft_record_size << 4 >> vol->cluster_size_bits;
+ if (!nr)
+ nr = min_nr;
+ /* Ensure we do not go above 2^32-1 mft records. */
+ if (unlikely((mft_ni->allocated_size +
+ (nr << vol->cluster_size_bits)) >>
+ vol->mft_record_size_bits >= (1ll << 32))) {
+ nr = min_nr;
+ if (unlikely((mft_ni->allocated_size +
+ (nr << vol->cluster_size_bits)) >>
+ vol->mft_record_size_bits >= (1ll << 32))) {
+ ntfs_warning(vol->sb, "Cannot allocate mft record "
+ "because the maximum number of inodes "
+ "(2^32) has already been reached.");
+ up_write(&mft_ni->runlist.lock);
+ return -ENOSPC;
+ }
+ }
+ ntfs_debug("Trying mft data allocation with %s cluster count %lli.",
+ nr > min_nr ? "default" : "minimal", (long long)nr);
+ old_last_vcn = rl[1].vcn;
+ do {
+ rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE);
+ if (likely(!IS_ERR(rl2)))
break;
+ if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
+ ntfs_error(vol->sb, "Failed to allocate the minimal "
+ "number of clusters (%lli) for the "
+ "mft data attribute.", (long long)nr);
+ up_write(&mft_ni->runlist.lock);
+ return PTR_ERR(rl2);
}
- /* The extent inode is not dirty, so do the next record. */
- up(&ni->extent_lock);
- iput(vi);
+ /*
+ * There is not enough space to do the allocation, but there
+ * might be enough space to do a minimal allocation so try that
+ * before failing.
+ */
+ nr = min_nr;
+ ntfs_debug("Retrying mft data allocation with minimal cluster "
+ "count %lli.", (long long)nr);
+ } while (1);
+ rl = ntfs_runlists_merge(mft_ni->runlist.rl, rl2);
+ if (IS_ERR(rl)) {
+ up_write(&mft_ni->runlist.lock);
+ ntfs_error(vol->sb, "Failed to merge runlists for mft data "
+ "attribute.");
+ if (ntfs_cluster_free_from_rl(vol, rl2)) {
+ ntfs_error(vol->sb, "Failed to dealocate clusters "
+ "from the mft data attribute.%s", es);
+ NVolSetErrors(vol);
+ }
+ ntfs_free(rl2);
+ return PTR_ERR(rl);
}
- kunmap(page);
- /* If a dirty mft record was found, redirty the page. */
- if (is_dirty) {
- ntfs_debug("Inode 0x%lx is dirty. Redirtying the page "
- "starting at inode 0x%lx.", mft_no,
- page->index << (PAGE_CACHE_SHIFT -
- vol->mft_record_size_bits));
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- } else {
+ mft_ni->runlist.rl = rl;
+ ntfs_debug("Allocated %lli clusters.", nr);
+ /* Find the last run in the new runlist. */
+ for (; rl[1].length; rl++)
+ ;
+ /* Update the attribute record as well. */
+ mrec = map_mft_record(mft_ni);
+ if (IS_ERR(mrec)) {
+ ntfs_error(vol->sb, "Failed to map mft record.");
+ ret = PTR_ERR(mrec);
+ goto undo_alloc;
+ }
+ ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
+ if (unlikely(!ctx)) {
+ ntfs_error(vol->sb, "Failed to get search context.");
+ ret = -ENOMEM;
+ goto undo_alloc;
+ }
+ ret = ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
+ CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx);
+ if (unlikely(ret)) {
+ ntfs_error(vol->sb, "Failed to find last attribute extent of "
+ "mft data attribute.");
+ if (ret == -ENOENT)
+ ret = -EIO;
+ goto undo_alloc;
+ }
+ a = ctx->attr;
+ ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
+ /* Search back for the previous last allocated cluster of mft bitmap. */
+ for (rl2 = rl; rl2 > mft_ni->runlist.rl; rl2--) {
+ if (ll >= rl2->vcn)
+ break;
+ }
+ BUG_ON(ll < rl2->vcn);
+ BUG_ON(ll >= rl2->vcn + rl2->length);
+ /* Get the size for the new mapping pairs array for this extent. */
+ mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll);
+ if (unlikely(mp_size <= 0)) {
+ ntfs_error(vol->sb, "Get size for mapping pairs failed for "
+ "mft data attribute extent.");
+ ret = mp_size;
+ if (!ret)
+ ret = -EIO;
+ goto undo_alloc;
+ }
+ /* Expand the attribute record if necessary. */
+ old_alen = le32_to_cpu(a->length);
+ ret = ntfs_attr_record_resize(ctx->mrec, a, mp_size +
+ le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
+ if (unlikely(ret)) {
+ if (ret != -ENOSPC) {
+ ntfs_error(vol->sb, "Failed to resize attribute "
+ "record for mft data attribute.");
+ goto undo_alloc;
+ }
+ // TODO: Deal with this by moving this extent to a new mft
+ // record or by starting a new extent in a new mft record or by
+ // moving other attributes out of this mft record.
+ // Note: Use the special reserved mft records and ensure that
+ // this extent is not required to find the mft record in
+ // question.
+ ntfs_error(vol->sb, "Not enough space in this mft record to "
+ "accomodate extended mft data attribute "
+ "extent. Cannot handle this yet.");
+ ret = -EOPNOTSUPP;
+ goto undo_alloc;
+ }
+ mp_rebuilt = TRUE;
+ /* Generate the mapping pairs array directly into the attr record. */
+ ret = ntfs_mapping_pairs_build(vol, (u8*)a +
+ le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
+ mp_size, rl2, ll, NULL);
+ if (unlikely(ret)) {
+ ntfs_error(vol->sb, "Failed to build mapping pairs array of "
+ "mft data attribute.");
+ goto undo_alloc;
+ }
+ /* Update the highest_vcn. */
+ a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
+ /*
+ * We now have extended the mft data allocated_size by nr clusters.
+ * Reflect this in the ntfs_inode structure and the attribute record.
+ * @rl is the last (non-terminator) runlist element of mft data
+ * attribute.
+ */
+ if (a->data.non_resident.lowest_vcn) {
+ /*
+ * We are not in the first attribute extent, switch to it, but
+ * first ensure the changes will make it to disk later.
+ */
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
+ ntfs_attr_reinit_search_ctx(ctx);
+ ret = ntfs_attr_lookup(mft_ni->type, mft_ni->name,
+ mft_ni->name_len, CASE_SENSITIVE, 0, NULL, 0,
+ ctx);
+ if (unlikely(ret)) {
+ ntfs_error(vol->sb, "Failed to find first attribute "
+ "extent of mft data attribute.");
+ goto restore_undo_alloc;
+ }
+ a = ctx->attr;
+ }
+ mft_ni->allocated_size += nr << vol->cluster_size_bits;
+ a->data.non_resident.allocated_size =
+ cpu_to_sle64(mft_ni->allocated_size);
+ /* Ensure the changes make it to disk. */
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
+ ntfs_attr_put_search_ctx(ctx);
+ unmap_mft_record(mft_ni);
+ up_write(&mft_ni->runlist.lock);
+ ntfs_debug("Done.");
+ return 0;
+restore_undo_alloc:
+ ntfs_attr_reinit_search_ctx(ctx);
+ if (ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
+ CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx)) {
+ ntfs_error(vol->sb, "Failed to find last attribute extent of "
+ "mft data attribute.%s", es);
+ mft_ni->allocated_size += nr << vol->cluster_size_bits;
+ ntfs_attr_put_search_ctx(ctx);
+ unmap_mft_record(mft_ni);
+ up_write(&mft_ni->runlist.lock);
+ /*
+ * The only thing that is now wrong is ->allocated_size of the
+ * base attribute extent which chkdsk should be able to fix.
+ */
+ NVolSetErrors(vol);
+ return ret;
+ }
+ a = ctx->attr;
+ a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1);
+undo_alloc:
+ if (ntfs_cluster_free(vol->mft_ino, old_last_vcn, -1) < 0) {
+ ntfs_error(vol->sb, "Failed to free clusters from mft data "
+ "attribute.%s", es);
+ NVolSetErrors(vol);
+ }
+ if (ntfs_rl_truncate_nolock(vol, &mft_ni->runlist, old_last_vcn)) {
+ ntfs_error(vol->sb, "Failed to truncate mft data attribute "
+ "runlist.%s", es);
+ NVolSetErrors(vol);
+ }
+ if (mp_rebuilt) {
+ if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
+ a->data.non_resident.mapping_pairs_offset),
+ old_alen - le16_to_cpu(
+ a->data.non_resident.mapping_pairs_offset),
+ rl2, ll, NULL)) {
+ ntfs_error(vol->sb, "Failed to restore mapping pairs "
+ "array.%s", es);
+ NVolSetErrors(vol);
+ }
+ if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
+ ntfs_error(vol->sb, "Failed to restore attribute "
+ "record.%s", es);
+ NVolSetErrors(vol);
+ }
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
+ }
+ if (ctx)
+ ntfs_attr_put_search_ctx(ctx);
+ if (!IS_ERR(mrec))
+ unmap_mft_record(mft_ni);
+ up_write(&mft_ni->runlist.lock);
+ return ret;
+}
+
+/**
+ * ntfs_mft_record_layout - layout an mft record into a memory buffer
+ * @vol: volume to which the mft record will belong
+ * @mft_no: mft reference specifying the mft record number
+ * @m: destination buffer of size >= @vol->mft_record_size bytes
+ *
+ * Layout an empty, unused mft record with the mft record number @mft_no into
+ * the buffer @m. The volume @vol is needed because the mft record structure
+ * was modified in NTFS 3.1 so we need to know which volume version this mft
+ * record will be used on.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static int ntfs_mft_record_layout(const ntfs_volume *vol, const s64 mft_no,
+ MFT_RECORD *m)
+{
+ ATTR_RECORD *a;
+
+ ntfs_debug("Entering for mft record 0x%llx.", (long long)mft_no);
+ if (mft_no >= (1ll << 32)) {
+ ntfs_error(vol->sb, "Mft record number 0x%llx exceeds "
+ "maximum of 2^32.", (long long)mft_no);
+ return -ERANGE;
+ }
+ /* Start by clearing the whole mft record to gives us a clean slate. */
+ memset(m, 0, vol->mft_record_size);
+ /* Aligned to 2-byte boundary. */
+ if (vol->major_ver < 3 || (vol->major_ver == 3 && !vol->minor_ver))
+ m->usa_ofs = cpu_to_le16((sizeof(MFT_RECORD_OLD) + 1) & ~1);
+ else {
+ m->usa_ofs = cpu_to_le16((sizeof(MFT_RECORD) + 1) & ~1);
/*
- * Keep the VM happy. This must be done otherwise the
- * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
- * the page is clean.
+ * Set the NTFS 3.1+ specific fields while we know that the
+ * volume version is 3.1+.
*/
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
+ m->reserved = 0;
+ m->mft_record_number = cpu_to_le32((u32)mft_no);
+ }
+ m->magic = magic_FILE;
+ if (vol->mft_record_size >= NTFS_BLOCK_SIZE)
+ m->usa_count = cpu_to_le16(vol->mft_record_size /
+ NTFS_BLOCK_SIZE + 1);
+ else {
+ m->usa_count = cpu_to_le16(1);
+ ntfs_warning(vol->sb, "Sector size is bigger than mft record "
+ "size. Setting usa_count to 1. If chkdsk "
+ "reports this as corruption, please email "
+ "linux-ntfs-dev@lists.sourceforge.net stating "
+ "that you saw this message and that the "
+ "modified file system created was corrupt. "
+ "Thank you.");
+ }
+ /* Set the update sequence number to 1. */
+ *(le16*)((u8*)m + le16_to_cpu(m->usa_ofs)) = cpu_to_le16(1);
+ m->lsn = 0;
+ m->sequence_number = cpu_to_le16(1);
+ m->link_count = 0;
+ /*
+ * Place the attributes straight after the update sequence array,
+ * aligned to 8-byte boundary.
+ */
+ m->attrs_offset = cpu_to_le16((le16_to_cpu(m->usa_ofs) +
+ (le16_to_cpu(m->usa_count) << 1) + 7) & ~7);
+ m->flags = 0;
+ /*
+ * Using attrs_offset plus eight bytes (for the termination attribute).
+ * attrs_offset is already aligned to 8-byte boundary, so no need to
+ * align again.
+ */
+ m->bytes_in_use = cpu_to_le32(le16_to_cpu(m->attrs_offset) + 8);
+ m->bytes_allocated = cpu_to_le32(vol->mft_record_size);
+ m->base_mft_record = 0;
+ m->next_attr_instance = 0;
+ /* Add the termination attribute. */
+ a = (ATTR_RECORD*)((u8*)m + le16_to_cpu(m->attrs_offset));
+ a->type = AT_END;
+ a->length = 0;
+ ntfs_debug("Done.");
+ return 0;
+}
+
+/**
+ * ntfs_mft_record_format - format an mft record on an ntfs volume
+ * @vol: volume on which to format the mft record
+ * @mft_no: mft record number to format
+ *
+ * Format the mft record @mft_no in $MFT/$DATA, i.e. lay out an empty, unused
+ * mft record into the appropriate place of the mft data attribute. This is
+ * used when extending the mft data attribute.
+ *
+ * Return 0 on success and -errno on error.
+ */
+static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
+{
+ struct inode *mft_vi = vol->mft_ino;
+ struct page *page;
+ MFT_RECORD *m;
+ pgoff_t index, end_index;
+ unsigned int ofs;
+ int err;
+
+ ntfs_debug("Entering for mft record 0x%llx.", (long long)mft_no);
+ /*
+ * The index into the page cache and the offset within the page cache
+ * page of the wanted mft record.
+ */
+ index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
+ ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+ /* The maximum valid index into the page cache for $MFT's data. */
+ end_index = mft_vi->i_size >> PAGE_CACHE_SHIFT;
+ if (unlikely(index >= end_index)) {
+ if (unlikely(index > end_index || ofs + vol->mft_record_size >=
+ (mft_vi->i_size & ~PAGE_CACHE_MASK))) {
+ ntfs_error(vol->sb, "Tried to format non-existing mft "
+ "record 0x%llx.", (long long)mft_no);
+ return -ENOENT;
+ }
+ }
+ /* Read, map, and pin the page containing the mft record. */
+ page = ntfs_map_page(mft_vi->i_mapping, index);
+ if (unlikely(IS_ERR(page))) {
+ ntfs_error(vol->sb, "Failed to map page containing mft record "
+ "to format 0x%llx.", (long long)mft_no);
+ return PTR_ERR(page);
+ }
+ lock_page(page);
+ BUG_ON(!PageUptodate(page));
+ ClearPageUptodate(page);
+ m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
+ err = ntfs_mft_record_layout(vol, mft_no, m);
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Failed to layout mft record 0x%llx.",
+ (long long)mft_no);
+ SetPageUptodate(page);
unlock_page(page);
- end_page_writeback(page);
+ ntfs_unmap_page(page);
+ return err;
}
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ /*
+ * Make sure the mft record is written out to disk. We could use
+ * ilookup5() to check if an inode is in icache and so on but this is
+ * unnecessary as ntfs_writepage() will write the dirty record anyway.
+ */
+ mark_ntfs_record_dirty(page, ofs);
+ ntfs_unmap_page(page);
ntfs_debug("Done.");
return 0;
}
+/**
+ * ntfs_mft_record_alloc - allocate an mft record on an ntfs volume
+ * @vol: [IN] volume on which to allocate the mft record
+ * @mode: [IN] mode if want a file or directory, i.e. base inode or 0
+ * @base_ni: [IN] open base inode if allocating an extent mft record or NULL
+ * @mrec: [OUT] on successful return this is the mapped mft record
+ *
+ * Allocate an mft record in $MFT/$DATA of an open ntfs volume @vol.
+ *
+ * If @base_ni is NULL make the mft record a base mft record, i.e. a file or
+ * direvctory inode, and allocate it at the default allocator position. In
+ * this case @mode is the file mode as given to us by the caller. We in
+ * particular use @mode to distinguish whether a file or a directory is being
+ * created (S_IFDIR(mode) and S_IFREG(mode), respectively).
+ *
+ * If @base_ni is not NULL make the allocated mft record an extent record,
+ * allocate it starting at the mft record after the base mft record and attach
+ * the allocated and opened ntfs inode to the base inode @base_ni. In this
+ * case @mode must be 0 as it is meaningless for extent inodes.
+ *
+ * You need to check the return value with IS_ERR(). If false, the function
+ * was successful and the return value is the now opened ntfs inode of the
+ * allocated mft record. *@mrec is then set to the allocated, mapped, pinned,
+ * and locked mft record. If IS_ERR() is true, the function failed and the
+ * error code is obtained from PTR_ERR(return value). *@mrec is undefined in
+ * this case.
+ *
+ * Allocation strategy:
+ *
+ * To find a free mft record, we scan the mft bitmap for a zero bit. To
+ * optimize this we start scanning at the place specified by @base_ni or if
+ * @base_ni is NULL we start where we last stopped and we perform wrap around
+ * when we reach the end. Note, we do not try to allocate mft records below
+ * number 24 because numbers 0 to 15 are the defined system files anyway and 16
+ * to 24 are special in that they are used for storing extension mft records
+ * for the $DATA attribute of $MFT. This is required to avoid the possibility
+ * of creating a runlist with a circular dependency which once written to disk
+ * can never be read in again. Windows will only use records 16 to 24 for
+ * normal files if the volume is completely out of space. We never use them
+ * which means that when the volume is really out of space we cannot create any
+ * more files while Windows can still create up to 8 small files. We can start
+ * doing this at some later time, it does not matter much for now.
+ *
+ * When scanning the mft bitmap, we only search up to the last allocated mft
+ * record. If there are no free records left in the range 24 to number of
+ * allocated mft records, then we extend the $MFT/$DATA attribute in order to
+ * create free mft records. We extend the allocated size of $MFT/$DATA by 16
+ * records at a time or one cluster, if cluster size is above 16kiB. If there
+ * is not sufficient space to do this, we try to extend by a single mft record
+ * or one cluster, if cluster size is above the mft record size.
+ *
+ * No matter how many mft records we allocate, we initialize only the first
+ * allocated mft record, incrementing mft data size and initialized size
+ * accordingly, open an ntfs_inode for it and return it to the caller, unless
+ * there are less than 24 mft records, in which case we allocate and initialize
+ * mft records until we reach record 24 which we consider as the first free mft
+ * record for use by normal files.
+ *
+ * If during any stage we overflow the initialized data in the mft bitmap, we
+ * extend the initialized size (and data size) by 8 bytes, allocating another
+ * cluster if required. The bitmap data size has to be at least equal to the
+ * number of mft records in the mft, but it can be bigger, in which case the
+ * superflous bits are padded with zeroes.
+ *
+ * Thus, when we return successfully (IS_ERR() is false), we will have:
+ * - initialized / extended the mft bitmap if necessary,
+ * - initialized / extended the mft data if necessary,
+ * - set the bit corresponding to the mft record being allocated in the
+ * mft bitmap,
+ * - opened an ntfs_inode for the allocated mft record, and we will have
+ * - returned the ntfs_inode as well as the allocated mapped, pinned, and
+ * locked mft record.
+ *
+ * On error, the volume will be left in a consistent state and no record will
+ * be allocated. If rolling back a partial operation fails, we may leave some
+ * inconsistent metadata in which case we set NVolErrors() so the volume is
+ * left dirty when unmounted.
+ *
+ * Note, this function cannot make use of most of the normal functions, like
+ * for example for attribute resizing, etc, because when the run list overflows
+ * the base mft record and an attribute list is used, it is very important that
+ * the extension mft records used to store the $DATA attribute of $MFT can be
+ * reached without having to read the information contained inside them, as
+ * this would make it impossible to find them in the first place after the
+ * volume is unmounted. $MFT/$BITMAP probably does not need to follow this
+ * rule because the bitmap is not essential for finding the mft records, but on
+ * the other hand, handling the bitmap in this special way would make life
+ * easier because otherwise there might be circular invocations of functions
+ * when reading the bitmap.
+ */
+ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
+ ntfs_inode *base_ni, MFT_RECORD **mrec)
+{
+ s64 ll, bit, old_data_initialized, old_data_size;
+ struct inode *vi;
+ struct page *page;
+ ntfs_inode *mft_ni, *mftbmp_ni, *ni;
+ ntfs_attr_search_ctx *ctx;
+ MFT_RECORD *m;
+ ATTR_RECORD *a;
+ pgoff_t index;
+ unsigned int ofs;
+ int err;
+ le16 seq_no, usn;
+ BOOL record_formatted = FALSE;
+
+ if (base_ni) {
+ ntfs_debug("Entering (allocating an extent mft record for "
+ "base mft record 0x%llx).",
+ (long long)base_ni->mft_no);
+ /* @mode and @base_ni are mutually exclusive. */
+ BUG_ON(mode);
+ } else
+ ntfs_debug("Entering (allocating a base mft record).");
+ if (mode) {
+ /* @mode and @base_ni are mutually exclusive. */
+ BUG_ON(base_ni);
+ /* We only support creation of normal files and directories. */
+ if (!S_ISREG(mode) && !S_ISDIR(mode))
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ BUG_ON(!mrec);
+ mft_ni = NTFS_I(vol->mft_ino);
+ mftbmp_ni = NTFS_I(vol->mftbmp_ino);
+ down_write(&vol->mftbmp_lock);
+ bit = ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(vol, base_ni);
+ if (bit >= 0) {
+ ntfs_debug("Found and allocated free record (#1), bit 0x%llx.",
+ (long long)bit);
+ goto have_alloc_rec;
+ }
+ if (bit != -ENOSPC) {
+ up_write(&vol->mftbmp_lock);
+ return ERR_PTR(bit);
+ }
+ /*
+ * No free mft records left. If the mft bitmap already covers more
+ * than the currently used mft records, the next records are all free,
+ * so we can simply allocate the first unused mft record.
+ * Note: We also have to make sure that the mft bitmap at least covers
+ * the first 24 mft records as they are special and whilst they may not
+ * be in use, we do not allocate from them.
+ */
+ ll = mft_ni->initialized_size >> vol->mft_record_size_bits;
+ if (mftbmp_ni->initialized_size << 3 > ll &&
+ mftbmp_ni->initialized_size > 3) {
+ bit = ll;
+ if (bit < 24)
+ bit = 24;
+ if (unlikely(bit >= (1ll << 32)))
+ goto max_err_out;
+ ntfs_debug("Found free record (#2), bit 0x%llx.",
+ (long long)bit);
+ goto found_free_rec;
+ }
+ /*
+ * The mft bitmap needs to be expanded until it covers the first unused
+ * mft record that we can allocate.
+ * Note: The smallest mft record we allocate is mft record 24.
+ */
+ bit = mftbmp_ni->initialized_size << 3;
+ if (unlikely(bit >= (1ll << 32)))
+ goto max_err_out;
+ ntfs_debug("Status of mftbmp before extension: allocated_size 0x%llx, "
+ "data_size 0x%llx, initialized_size 0x%llx.",
+ (long long)mftbmp_ni->allocated_size,
+ (long long)vol->mftbmp_ino->i_size,
+ (long long)mftbmp_ni->initialized_size);
+ if (mftbmp_ni->initialized_size + 8 > mftbmp_ni->allocated_size) {
+ /* Need to extend bitmap by one more cluster. */
+ ntfs_debug("mftbmp: initialized_size + 8 > allocated_size.");
+ err = ntfs_mft_bitmap_extend_allocation_nolock(vol);
+ if (unlikely(err)) {
+ up_write(&vol->mftbmp_lock);
+ goto err_out;
+ }
+ ntfs_debug("Status of mftbmp after allocation extension: "
+ "allocated_size 0x%llx, data_size 0x%llx, "
+ "initialized_size 0x%llx.",
+ (long long)mftbmp_ni->allocated_size,
+ (long long)vol->mftbmp_ino->i_size,
+ (long long)mftbmp_ni->initialized_size);
+ }
+ /*
+ * We now have sufficient allocated space, extend the initialized_size
+ * as well as the data_size if necessary and fill the new space with
+ * zeroes.
+ */
+ err = ntfs_mft_bitmap_extend_initialized_nolock(vol);
+ if (unlikely(err)) {
+ up_write(&vol->mftbmp_lock);
+ goto err_out;
+ }
+ ntfs_debug("Status of mftbmp after initialized extention: "
+ "allocated_size 0x%llx, data_size 0x%llx, "
+ "initialized_size 0x%llx.",
+ (long long)mftbmp_ni->allocated_size,
+ (long long)vol->mftbmp_ino->i_size,
+ (long long)mftbmp_ni->initialized_size);
+ ntfs_debug("Found free record (#3), bit 0x%llx.", (long long)bit);
+found_free_rec:
+ /* @bit is the found free mft record, allocate it in the mft bitmap. */
+ ntfs_debug("At found_free_rec.");
+ err = ntfs_bitmap_set_bit(vol->mftbmp_ino, bit);
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Failed to allocate bit in mft bitmap.");
+ up_write(&vol->mftbmp_lock);
+ goto err_out;
+ }
+ ntfs_debug("Set bit 0x%llx in mft bitmap.", (long long)bit);
+have_alloc_rec:
+ /*
+ * The mft bitmap is now uptodate. Deal with mft data attribute now.
+ * Note, we keep hold of the mft bitmap lock for writing until all
+ * modifications to the mft data attribute are complete, too, as they
+ * will impact decisions for mft bitmap and mft record allocation done
+ * by a parallel allocation and if the lock is not maintained a
+ * parallel allocation could allocate the same mft record as this one.
+ */
+ ll = (bit + 1) << vol->mft_record_size_bits;
+ if (ll <= mft_ni->initialized_size) {
+ ntfs_debug("Allocated mft record already initialized.");
+ goto mft_rec_already_initialized;
+ }
+ ntfs_debug("Initializing allocated mft record.");
+ /*
+ * The mft record is outside the initialized data. Extend the mft data
+ * attribute until it covers the allocated record. The loop is only
+ * actually traversed more than once when a freshly formatted volume is
+ * first written to so it optimizes away nicely in the common case.
+ */
+ ntfs_debug("Status of mft data before extension: "
+ "allocated_size 0x%llx, data_size 0x%llx, "
+ "initialized_size 0x%llx.",
+ (long long)mft_ni->allocated_size,
+ (long long)vol->mft_ino->i_size,
+ (long long)mft_ni->initialized_size);
+ while (ll > mft_ni->allocated_size) {
+ err = ntfs_mft_data_extend_allocation_nolock(vol);
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Failed to extend mft data "
+ "allocation.");
+ goto undo_mftbmp_alloc_nolock;
+ }
+ ntfs_debug("Status of mft data after allocation extension: "
+ "allocated_size 0x%llx, data_size 0x%llx, "
+ "initialized_size 0x%llx.",
+ (long long)mft_ni->allocated_size,
+ (long long)vol->mft_ino->i_size,
+ (long long)mft_ni->initialized_size);
+ }
+ /*
+ * Extend mft data initialized size (and data size of course) to reach
+ * the allocated mft record, formatting the mft records allong the way.
+ * Note: We only modify the ntfs_inode structure as that is all that is
+ * needed by ntfs_mft_record_format(). We will update the attribute
+ * record itself in one fell swoop later on.
+ */
+ old_data_initialized = mft_ni->initialized_size;
+ old_data_size = vol->mft_ino->i_size;
+ while (ll > mft_ni->initialized_size) {
+ s64 new_initialized_size, mft_no;
+
+ new_initialized_size = mft_ni->initialized_size +
+ vol->mft_record_size;
+ mft_no = mft_ni->initialized_size >> vol->mft_record_size_bits;
+ if (new_initialized_size > vol->mft_ino->i_size)
+ vol->mft_ino->i_size = new_initialized_size;
+ ntfs_debug("Initializing mft record 0x%llx.",
+ (long long)mft_no);
+ err = ntfs_mft_record_format(vol, mft_no);
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Failed to format mft record.");
+ goto undo_data_init;
+ }
+ mft_ni->initialized_size = new_initialized_size;
+ }
+ record_formatted = TRUE;
+ /* Update the mft data attribute record to reflect the new sizes. */
+ m = map_mft_record(mft_ni);
+ if (IS_ERR(m)) {
+ ntfs_error(vol->sb, "Failed to map mft record.");
+ err = PTR_ERR(m);
+ goto undo_data_init;
+ }
+ ctx = ntfs_attr_get_search_ctx(mft_ni, m);
+ if (unlikely(!ctx)) {
+ ntfs_error(vol->sb, "Failed to get search context.");
+ err = -ENOMEM;
+ unmap_mft_record(mft_ni);
+ goto undo_data_init;
+ }
+ err = ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
+ CASE_SENSITIVE, 0, NULL, 0, ctx);
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Failed to find first attribute extent of "
+ "mft data attribute.");
+ ntfs_attr_put_search_ctx(ctx);
+ unmap_mft_record(mft_ni);
+ goto undo_data_init;
+ }
+ a = ctx->attr;
+ a->data.non_resident.initialized_size =
+ cpu_to_sle64(mft_ni->initialized_size);
+ a->data.non_resident.data_size = cpu_to_sle64(vol->mft_ino->i_size);
+ /* Ensure the changes make it to disk. */
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
+ ntfs_attr_put_search_ctx(ctx);
+ unmap_mft_record(mft_ni);
+ ntfs_debug("Status of mft data after mft record initialization: "
+ "allocated_size 0x%llx, data_size 0x%llx, "
+ "initialized_size 0x%llx.",
+ (long long)mft_ni->allocated_size,
+ (long long)vol->mft_ino->i_size,
+ (long long)mft_ni->initialized_size);
+ BUG_ON(vol->mft_ino->i_size > mft_ni->allocated_size);
+ BUG_ON(mft_ni->initialized_size > vol->mft_ino->i_size);
+mft_rec_already_initialized:
+ /*
+ * We can finally drop the mft bitmap lock as the mft data attribute
+ * has been fully updated. The only disparity left is that the
+ * allocated mft record still needs to be marked as in use to match the
+ * set bit in the mft bitmap but this is actually not a problem since
+ * this mft record is not referenced from anywhere yet and the fact
+ * that it is allocated in the mft bitmap means that no-one will try to
+ * allocate it either.
+ */
+ up_write(&vol->mftbmp_lock);
+ /*
+ * We now have allocated and initialized the mft record. Calculate the
+ * index of and the offset within the page cache page the record is in.
+ */
+ index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
+ ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+ /* Read, map, and pin the page containing the mft record. */
+ page = ntfs_map_page(vol->mft_ino->i_mapping, index);
+ if (unlikely(IS_ERR(page))) {
+ ntfs_error(vol->sb, "Failed to map page containing allocated "
+ "mft record 0x%llx.", (long long)bit);
+ err = PTR_ERR(page);
+ goto undo_mftbmp_alloc;
+ }
+ lock_page(page);
+ BUG_ON(!PageUptodate(page));
+ ClearPageUptodate(page);
+ m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
+ /* If we just formatted the mft record no need to do it again. */
+ if (!record_formatted) {
+ /* Sanity check that the mft record is really not in use. */
+ if (ntfs_is_file_record(m->magic) &&
+ (m->flags & MFT_RECORD_IN_USE)) {
+ ntfs_error(vol->sb, "Mft record 0x%llx was marked "
+ "free in mft bitmap but is marked "
+ "used itself. Corrupt filesystem. "
+ "Unmount and run chkdsk.",
+ (long long)bit);
+ err = -EIO;
+ SetPageUptodate(page);
+ unlock_page(page);
+ ntfs_unmap_page(page);
+ NVolSetErrors(vol);
+ goto undo_mftbmp_alloc;
+ }
+ /*
+ * We need to (re-)format the mft record, preserving the
+ * sequence number if it is not zero as well as the update
+ * sequence number if it is not zero or -1 (0xffff). This
+ * means we do not need to care whether or not something went
+ * wrong with the previous mft record.
+ */
+ seq_no = m->sequence_number;
+ usn = *(le16*)((u8*)m + le16_to_cpu(m->usa_ofs));
+ err = ntfs_mft_record_layout(vol, bit, m);
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Failed to layout allocated mft "
+ "record 0x%llx.", (long long)bit);
+ SetPageUptodate(page);
+ unlock_page(page);
+ ntfs_unmap_page(page);
+ goto undo_mftbmp_alloc;
+ }
+ if (seq_no)
+ m->sequence_number = seq_no;
+ if (usn && le16_to_cpu(usn) != 0xffff)
+ *(le16*)((u8*)m + le16_to_cpu(m->usa_ofs)) = usn;
+ }
+ /* Set the mft record itself in use. */
+ m->flags |= MFT_RECORD_IN_USE;
+ if (S_ISDIR(mode))
+ m->flags |= MFT_RECORD_IS_DIRECTORY;
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ if (base_ni) {
+ /*
+ * Setup the base mft record in the extent mft record. This
+ * completes initialization of the allocated extent mft record
+ * and we can simply use it with map_extent_mft_record().
+ */
+ m->base_mft_record = MK_LE_MREF(base_ni->mft_no,
+ base_ni->seq_no);
+ /*
+ * Allocate an extent inode structure for the new mft record,
+ * attach it to the base inode @base_ni and map, pin, and lock
+ * its, i.e. the allocated, mft record.
+ */
+ m = map_extent_mft_record(base_ni, bit, &ni);
+ if (IS_ERR(m)) {
+ ntfs_error(vol->sb, "Failed to map allocated extent "
+ "mft record 0x%llx.", (long long)bit);
+ err = PTR_ERR(m);
+ /* Set the mft record itself not in use. */
+ m->flags &= cpu_to_le16(
+ ~le16_to_cpu(MFT_RECORD_IN_USE));
+ flush_dcache_page(page);
+ /* Make sure the mft record is written out to disk. */
+ mark_ntfs_record_dirty(page, ofs);
+ unlock_page(page);
+ ntfs_unmap_page(page);
+ goto undo_mftbmp_alloc;
+ }
+ /*
+ * Make sure the allocated mft record is written out to disk.
+ * No need to set the inode dirty because the caller is going
+ * to do that anyway after finishing with the new extent mft
+ * record (e.g. at a minimum a new attribute will be added to
+ * the mft record.
+ */
+ mark_ntfs_record_dirty(page, ofs);
+ unlock_page(page);
+ /*
+ * Need to unmap the page since map_extent_mft_record() mapped
+ * it as well so we have it mapped twice at the moment.
+ */
+ ntfs_unmap_page(page);
+ } else {
+ /*
+ * Allocate a new VFS inode and set it up. NOTE: @vi->i_nlink
+ * is set to 1 but the mft record->link_count is 0. The caller
+ * needs to bear this in mind.
+ */
+ vi = new_inode(vol->sb);
+ if (unlikely(!vi)) {
+ err = -ENOMEM;
+ /* Set the mft record itself not in use. */
+ m->flags &= cpu_to_le16(
+ ~le16_to_cpu(MFT_RECORD_IN_USE));
+ flush_dcache_page(page);
+ /* Make sure the mft record is written out to disk. */
+ mark_ntfs_record_dirty(page, ofs);
+ unlock_page(page);
+ ntfs_unmap_page(page);
+ goto undo_mftbmp_alloc;
+ }
+ vi->i_ino = bit;
+ /*
+ * This is the optimal IO size (for stat), not the fs block
+ * size.
+ */
+ vi->i_blksize = PAGE_CACHE_SIZE;
+ /*
+ * This is for checking whether an inode has changed w.r.t. a
+ * file so that the file can be updated if necessary (compare
+ * with f_version).
+ */
+ vi->i_version = 1;
+
+ /* The owner and group come from the ntfs volume. */
+ vi->i_uid = vol->uid;
+ vi->i_gid = vol->gid;
+
+ /* Initialize the ntfs specific part of @vi. */
+ ntfs_init_big_inode(vi);
+ ni = NTFS_I(vi);
+ /*
+ * Set the appropriate mode, attribute type, and name. For
+ * directories, also setup the index values to the defaults.
+ */
+ if (S_ISDIR(mode)) {
+ vi->i_mode = S_IFDIR | S_IRWXUGO;
+ vi->i_mode &= ~vol->dmask;
+
+ NInoSetMstProtected(ni);
+ ni->type = AT_INDEX_ALLOCATION;
+ ni->name = I30;
+ ni->name_len = 4;
+
+ ni->itype.index.block_size = 4096;
+ ni->itype.index.block_size_bits = generic_ffs(4096) - 1;
+ ni->itype.index.collation_rule = COLLATION_FILE_NAME;
+ if (vol->cluster_size <= ni->itype.index.block_size) {
+ ni->itype.index.vcn_size = vol->cluster_size;
+ ni->itype.index.vcn_size_bits =
+ vol->cluster_size_bits;
+ } else {
+ ni->itype.index.vcn_size = vol->sector_size;
+ ni->itype.index.vcn_size_bits =
+ vol->sector_size_bits;
+ }
+ } else {
+ vi->i_mode = S_IFREG | S_IRWXUGO;
+ vi->i_mode &= ~vol->fmask;
+
+ ni->type = AT_DATA;
+ ni->name = NULL;
+ ni->name_len = 0;
+ }
+ if (IS_RDONLY(vi))
+ vi->i_mode &= ~S_IWUGO;
+
+ /* Set the inode times to the current time. */
+ vi->i_atime = vi->i_mtime = vi->i_ctime = current_kernel_time();
+ /*
+ * Set the file size to 0, the ntfs inode sizes are set to 0 by
+ * the call to ntfs_init_big_inode() below.
+ */
+ vi->i_size = 0;
+ vi->i_blocks = 0;
+
+ /* Set the sequence number. */
+ vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
+ /*
+ * Manually map, pin, and lock the mft record as we already
+ * have its page mapped and it is very easy to do.
+ */
+ atomic_inc(&ni->count);
+ down(&ni->mrec_lock);
+ ni->page = page;
+ ni->page_ofs = ofs;
+ /*
+ * Make sure the allocated mft record is written out to disk.
+ * NOTE: We do not set the ntfs inode dirty because this would
+ * fail in ntfs_write_inode() because the inode does not have a
+ * standard information attribute yet. Also, there is no need
+ * to set the inode dirty because the caller is going to do
+ * that anyway after finishing with the new mft record (e.g. at
+ * a minimum some new attributes will be added to the mft
+ * record.
+ */
+ mark_ntfs_record_dirty(page, ofs);
+ unlock_page(page);
+
+ /* Add the inode to the inode hash for the superblock. */
+ insert_inode_hash(vi);
+
+ /* Update the default mft allocation position. */
+ vol->mft_data_pos = bit + 1;
+ }
+ /*
+ * Return the opened, allocated inode of the allocated mft record as
+ * well as the mapped, pinned, and locked mft record.
+ */
+ ntfs_debug("Returning opened, allocated %sinode 0x%llx.",
+ base_ni ? "extent " : "", (long long)bit);
+ *mrec = m;
+ return ni;
+undo_data_init:
+ mft_ni->initialized_size = old_data_initialized;
+ vol->mft_ino->i_size = old_data_size;
+ goto undo_mftbmp_alloc_nolock;
+undo_mftbmp_alloc:
+ down_write(&vol->mftbmp_lock);
+undo_mftbmp_alloc_nolock:
+ if (ntfs_bitmap_clear_bit(vol->mftbmp_ino, bit)) {
+ ntfs_error(vol->sb, "Failed to clear bit in mft bitmap.%s", es);
+ NVolSetErrors(vol);
+ }
+ up_write(&vol->mftbmp_lock);
+err_out:
+ return ERR_PTR(err);
+max_err_out:
+ ntfs_warning(vol->sb, "Cannot allocate mft record because the maximum "
+ "number of inodes (2^32) has already been reached.");
+ up_write(&vol->mftbmp_lock);
+ return ERR_PTR(-ENOSPC);
+}
+
+/**
+ * ntfs_extent_mft_record_free - free an extent mft record on an ntfs volume
+ * @ni: ntfs inode of the mapped extent mft record to free
+ * @m: mapped extent mft record of the ntfs inode @ni
+ *
+ * Free the mapped extent mft record @m of the extent ntfs inode @ni.
+ *
+ * Note that this function unmaps the mft record and closes and destroys @ni
+ * internally and hence you cannot use either @ni nor @m any more after this
+ * function returns success.
+ *
+ * On success return 0 and on error return -errno. @ni and @m are still valid
+ * in this case and have not been freed.
+ *
+ * For some errors an error message is displayed and the success code 0 is
+ * returned and the volume is then left dirty on umount. This makes sense in
+ * case we could not rollback the changes that were already done since the
+ * caller no longer wants to reference this mft record so it does not matter to
+ * the caller if something is wrong with it as long as it is properly detached
+ * from the base inode.
+ */
+int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m)
+{
+ unsigned long mft_no = ni->mft_no;
+ ntfs_volume *vol = ni->vol;
+ ntfs_inode *base_ni;
+ ntfs_inode **extent_nis;
+ int i, err;
+ le16 old_seq_no;
+ u16 seq_no;
+
+ BUG_ON(NInoAttr(ni));
+ BUG_ON(ni->nr_extents != -1);
+
+ down(&ni->extent_lock);
+ base_ni = ni->ext.base_ntfs_ino;
+ up(&ni->extent_lock);
+
+ BUG_ON(base_ni->nr_extents <= 0);
+
+ ntfs_debug("Entering for extent inode 0x%lx, base inode 0x%lx.\n",
+ mft_no, base_ni->mft_no);
+
+ down(&base_ni->extent_lock);
+
+ /* Make sure we are holding the only reference to the extent inode. */
+ if (atomic_read(&ni->count) > 2) {
+ ntfs_error(vol->sb, "Tried to free busy extent inode 0x%lx, "
+ "not freeing.", base_ni->mft_no);
+ up(&base_ni->extent_lock);
+ return -EBUSY;
+ }
+
+ /* Dissociate the ntfs inode from the base inode. */
+ extent_nis = base_ni->ext.extent_ntfs_inos;
+ err = -ENOENT;
+ for (i = 0; i < base_ni->nr_extents; i++) {
+ if (ni != extent_nis[i])
+ continue;
+ extent_nis += i;
+ base_ni->nr_extents--;
+ memmove(extent_nis, extent_nis + 1, (base_ni->nr_extents - i) *
+ sizeof(ntfs_inode*));
+ err = 0;
+ break;
+ }
+
+ up(&base_ni->extent_lock);
+
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Extent inode 0x%lx is not attached to "
+ "its base inode 0x%lx.", mft_no,
+ base_ni->mft_no);
+ BUG();
+ }
+
+ /*
+ * The extent inode is no longer attached to the base inode so no one
+ * can get a reference to it any more.
+ */
+
+ /* Mark the mft record as not in use. */
+ m->flags &= const_cpu_to_le16(~const_le16_to_cpu(MFT_RECORD_IN_USE));
+
+ /* Increment the sequence number, skipping zero, if it is not zero. */
+ old_seq_no = m->sequence_number;
+ seq_no = le16_to_cpu(old_seq_no);
+ if (seq_no == 0xffff)
+ seq_no = 1;
+ else if (seq_no)
+ seq_no++;
+ m->sequence_number = cpu_to_le16(seq_no);
+
+ /*
+ * Set the ntfs inode dirty and write it out. We do not need to worry
+ * about the base inode here since whatever caused the extent mft
+ * record to be freed is guaranteed to do it already.
+ */
+ NInoSetDirty(ni);
+ err = write_mft_record(ni, m, 0);
+ if (unlikely(err)) {
+ ntfs_error(vol->sb, "Failed to write mft record 0x%lx, not "
+ "freeing.", mft_no);
+ goto rollback;
+ }
+rollback_error:
+ /* Unmap and throw away the now freed extent inode. */
+ unmap_extent_mft_record(ni);
+ ntfs_clear_extent_inode(ni);
+
+ /* Clear the bit in the $MFT/$BITMAP corresponding to this record. */
+ down_write(&vol->mftbmp_lock);
+ err = ntfs_bitmap_clear_bit(vol->mftbmp_ino, mft_no);
+ up_write(&vol->mftbmp_lock);
+ if (unlikely(err)) {
+ /*
+ * The extent inode is gone but we failed to deallocate it in
+ * the mft bitmap. Just emit a warning and leave the volume
+ * dirty on umount.
+ */
+ ntfs_error(vol->sb, "Failed to clear bit in mft bitmap.%s", es);
+ NVolSetErrors(vol);
+ }
+ return 0;
+rollback:
+ /* Rollback what we did... */
+ down(&base_ni->extent_lock);
+ extent_nis = base_ni->ext.extent_ntfs_inos;
+ if (!(base_ni->nr_extents & 3)) {
+ int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*);
+
+ extent_nis = (ntfs_inode**)kmalloc(new_size, GFP_NOFS);
+ if (unlikely(!extent_nis)) {
+ ntfs_error(vol->sb, "Failed to allocate internal "
+ "buffer during rollback.%s", es);
+ up(&base_ni->extent_lock);
+ NVolSetErrors(vol);
+ goto rollback_error;
+ }
+ if (base_ni->nr_extents) {
+ BUG_ON(!base_ni->ext.extent_ntfs_inos);
+ memcpy(extent_nis, base_ni->ext.extent_ntfs_inos,
+ new_size - 4 * sizeof(ntfs_inode*));
+ kfree(base_ni->ext.extent_ntfs_inos);
+ }
+ base_ni->ext.extent_ntfs_inos = extent_nis;
+ }
+ m->flags |= MFT_RECORD_IN_USE;
+ m->sequence_number = old_seq_no;
+ extent_nis[base_ni->nr_extents++] = ni;
+ up(&base_ni->extent_lock);
+ mark_mft_record_dirty(ni);
+ return err;
+}
#endif /* NTFS_RW */
diff --git a/fs/ntfs/mft.h b/fs/ntfs/mft.h
index 4fd9b5ec6e0c..407de2cef1d6 100644
--- a/fs/ntfs/mft.h
+++ b/fs/ntfs/mft.h
@@ -24,11 +24,11 @@
#define _LINUX_NTFS_MFT_H
#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
#include "inode.h"
-extern int format_mft_record(ntfs_inode *ni, MFT_RECORD *m);
-
extern MFT_RECORD *map_mft_record(ntfs_inode *ni);
extern void unmap_mft_record(ntfs_inode *ni);
@@ -76,6 +76,9 @@ static inline void mark_mft_record_dirty(ntfs_inode *ni)
__mark_mft_record_dirty(ni);
}
+extern int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
+ MFT_RECORD *m, int sync);
+
extern int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync);
/**
@@ -111,6 +114,14 @@ static inline int write_mft_record(ntfs_inode *ni, MFT_RECORD *m, int sync)
return err;
}
+extern BOOL ntfs_may_write_mft_record(ntfs_volume *vol,
+ const unsigned long mft_no, const MFT_RECORD *m,
+ ntfs_inode **locked_ni);
+
+extern ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
+ ntfs_inode *base_ni, MFT_RECORD **mrec);
+extern int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m);
+
#endif /* NTFS_RW */
#endif /* _LINUX_NTFS_MFT_H */
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 70b503a85f4d..ac5997bec5cd 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -23,8 +23,11 @@
#include <linux/dcache.h>
#include <linux/security.h>
-#include "ntfs.h"
+#include "attrib.h"
+#include "debug.h"
#include "dir.h"
+#include "mft.h"
+#include "ntfs.h"
/**
* ntfs_lookup - find the inode represented by a dentry in a directory inode
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index c3fda17158e6..fb71b9fff857 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -29,21 +29,12 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/fs.h>
-#include <linux/buffer_head.h>
#include <linux/nls.h>
-#include <linux/pagemap.h>
#include <linux/smp.h>
-#include <asm/atomic.h>
#include "types.h"
-#include "debug.h"
-#include "malloc.h"
-#include "endian.h"
#include "volume.h"
-#include "inode.h"
#include "layout.h"
-#include "attrib.h"
-#include "mft.h"
typedef enum {
NTFS_BLOCK_SIZE = 512,
@@ -65,7 +56,6 @@ extern kmem_cache_t *ntfs_index_ctx_cache;
extern struct super_operations ntfs_sops;
extern struct address_space_operations ntfs_aops;
extern struct address_space_operations ntfs_mst_aops;
-extern struct address_space_operations ntfs_mft_aops;
extern struct file_operations ntfs_file_ops;
extern struct inode_operations ntfs_file_inode_ops;
@@ -87,72 +77,12 @@ static inline ntfs_volume *NTFS_SB(struct super_block *sb)
return sb->s_fs_info;
}
-/**
- * ntfs_unmap_page - release a page that was mapped using ntfs_map_page()
- * @page: the page to release
- *
- * Unpin, unmap and release a page that was obtained from ntfs_map_page().
- */
-static inline void ntfs_unmap_page(struct page *page)
-{
- kunmap(page);
- page_cache_release(page);
-}
-
-/**
- * ntfs_map_page - map a page into accessible memory, reading it if necessary
- * @mapping: address space for which to obtain the page
- * @index: index into the page cache for @mapping of the page to map
- *
- * Read a page from the page cache of the address space @mapping at position
- * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes.
- *
- * If the page is not in memory it is loaded from disk first using the readpage
- * method defined in the address space operations of @mapping and the page is
- * added to the page cache of @mapping in the process.
- *
- * If the page is in high memory it is mapped into memory directly addressible
- * by the kernel.
- *
- * Finally the page count is incremented, thus pinning the page into place.
- *
- * The above means that page_address(page) can be used on all pages obtained
- * with ntfs_map_page() to get the kernel virtual address of the page.
- *
- * When finished with the page, the caller has to call ntfs_unmap_page() to
- * unpin, unmap and release the page.
- *
- * Note this does not grant exclusive access. If such is desired, the caller
- * must provide it independently of the ntfs_{un}map_page() calls by using
- * a {rw_}semaphore or other means of serialization. A spin lock cannot be
- * used as ntfs_map_page() can block.
- *
- * The unlocked and uptodate page is returned on success or an encoded error
- * on failure. Caller has to test for error using the IS_ERR() macro on the
- * return value. If that evaluates to TRUE, the negative error code can be
- * obtained using PTR_ERR() on the return value of ntfs_map_page().
- */
-static inline struct page *ntfs_map_page(struct address_space *mapping,
- unsigned long index)
-{
- struct page *page = read_cache_page(mapping, index,
- (filler_t*)mapping->a_ops->readpage, NULL);
-
- if (!IS_ERR(page)) {
- wait_on_page_locked(page);
- kmap(page);
- if (PageUptodate(page) && !PageError(page))
- return page;
- ntfs_unmap_page(page);
- return ERR_PTR(-EIO);
- }
- return page;
-}
-
/* Declarations of functions and global variables. */
/* From fs/ntfs/compress.c */
extern int ntfs_read_compressed_block(struct page *page);
+extern int allocate_compression_buffers(void);
+extern void free_compression_buffers(void);
/* From fs/ntfs/super.c */
#define default_upcase_len 0x10000
@@ -166,10 +96,6 @@ typedef struct {
} option_t;
extern const option_t on_errors_arr[];
-/* From fs/ntfs/compress.c */
-extern int allocate_compression_buffers(void);
-extern void free_compression_buffers(void);
-
/* From fs/ntfs/mst.c */
extern int post_read_mst_fixup(NTFS_RECORD *b, const u32 size);
extern int pre_write_mst_fixup(NTFS_RECORD *b, const u32 size);
diff --git a/fs/ntfs/quota.c b/fs/ntfs/quota.c
index b72a85dd446f..8764ebd8d063 100644
--- a/fs/ntfs/quota.c
+++ b/fs/ntfs/quota.c
@@ -22,9 +22,10 @@
#ifdef NTFS_RW
-#include "ntfs.h"
#include "index.h"
#include "quota.h"
+#include "debug.h"
+#include "ntfs.h"
/**
* ntfs_mark_quotas_out_of_date - mark the quotas out of date on an ntfs volume
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
new file mode 100644
index 000000000000..50bfb6b5b3e0
--- /dev/null
+++ b/fs/ntfs/runlist.c
@@ -0,0 +1,1462 @@
+/**
+ * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "debug.h"
+#include "dir.h"
+#include "endian.h"
+#include "malloc.h"
+#include "ntfs.h"
+
+/**
+ * ntfs_rl_mm - runlist memmove
+ *
+ * It is up to the caller to serialize access to the runlist @base.
+ */
+static inline void ntfs_rl_mm(runlist_element *base, int dst, int src,
+ int size)
+{
+ if (likely((dst != src) && (size > 0)))
+ memmove(base + dst, base + src, size * sizeof (*base));
+}
+
+/**
+ * ntfs_rl_mc - runlist memory copy
+ *
+ * It is up to the caller to serialize access to the runlists @dstbase and
+ * @srcbase.
+ */
+static inline void ntfs_rl_mc(runlist_element *dstbase, int dst,
+ runlist_element *srcbase, int src, int size)
+{
+ if (likely(size > 0))
+ memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase));
+}
+
+/**
+ * ntfs_rl_realloc - Reallocate memory for runlists
+ * @rl: original runlist
+ * @old_size: number of runlist elements in the original runlist @rl
+ * @new_size: number of runlist elements we need space for
+ *
+ * As the runlists grow, more memory will be required. To prevent the
+ * kernel having to allocate and reallocate large numbers of small bits of
+ * memory, this function returns and entire page of memory.
+ *
+ * It is up to the caller to serialize access to the runlist @rl.
+ *
+ * N.B. If the new allocation doesn't require a different number of pages in
+ * memory, the function will return the original pointer.
+ *
+ * On success, return a pointer to the newly allocated, or recycled, memory.
+ * On error, return -errno. The following error codes are defined:
+ * -ENOMEM - Not enough memory to allocate runlist array.
+ * -EINVAL - Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_realloc(runlist_element *rl,
+ int old_size, int new_size)
+{
+ runlist_element *new_rl;
+
+ old_size = PAGE_ALIGN(old_size * sizeof(*rl));
+ new_size = PAGE_ALIGN(new_size * sizeof(*rl));
+ if (old_size == new_size)
+ return rl;
+
+ new_rl = ntfs_malloc_nofs(new_size);
+ if (unlikely(!new_rl))
+ return ERR_PTR(-ENOMEM);
+
+ if (likely(rl != NULL)) {
+ if (unlikely(old_size > new_size))
+ old_size = new_size;
+ memcpy(new_rl, rl, old_size);
+ ntfs_free(rl);
+ }
+ return new_rl;
+}
+
+/**
+ * ntfs_are_rl_mergeable - test if two runlists can be joined together
+ * @dst: original runlist
+ * @src: new runlist to test for mergeability with @dst
+ *
+ * Test if two runlists can be joined together. For this, their VCNs and LCNs
+ * must be adjacent.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * Return: TRUE Success, the runlists can be merged.
+ * FALSE Failure, the runlists cannot be merged.
+ */
+static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst,
+ runlist_element *src)
+{
+ BUG_ON(!dst);
+ BUG_ON(!src);
+
+ if ((dst->lcn < 0) || (src->lcn < 0)) /* Are we merging holes? */
+ return FALSE;
+ if ((dst->lcn + dst->length) != src->lcn) /* Are the runs contiguous? */
+ return FALSE;
+ if ((dst->vcn + dst->length) != src->vcn) /* Are the runs misaligned? */
+ return FALSE;
+
+ return TRUE;
+}
+
+/**
+ * __ntfs_rl_merge - merge two runlists without testing if they can be merged
+ * @dst: original, destination runlist
+ * @src: new runlist to merge with @dst
+ *
+ * Merge the two runlists, writing into the destination runlist @dst. The
+ * caller must make sure the runlists can be merged or this will corrupt the
+ * destination runlist.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ */
+static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src)
+{
+ dst->length += src->length;
+}
+
+/**
+ * ntfs_rl_merge - test if two runlists can be joined together and merge them
+ * @dst: original, destination runlist
+ * @src: new runlist to merge with @dst
+ *
+ * Test if two runlists can be joined together. For this, their VCNs and LCNs
+ * must be adjacent. If they can be merged, perform the merge, writing into
+ * the destination runlist @dst.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * Return: TRUE Success, the runlists have been merged.
+ * FALSE Failure, the runlists cannot be merged and have not been
+ * modified.
+ */
+static inline BOOL ntfs_rl_merge(runlist_element *dst, runlist_element *src)
+{
+ BOOL merge = ntfs_are_rl_mergeable(dst, src);
+
+ if (merge)
+ __ntfs_rl_merge(dst, src);
+ return merge;
+}
+
+/**
+ * ntfs_rl_append - append a runlist after a given element
+ * @dst: original runlist to be worked on
+ * @dsize: number of elements in @dst (including end marker)
+ * @src: runlist to be inserted into @dst
+ * @ssize: number of elements in @src (excluding end marker)
+ * @loc: append the new runlist @src after this element in @dst
+ *
+ * Append the runlist @src after element @loc in @dst. Merge the right end of
+ * the new runlist, if necessary. Adjust the size of the hole before the
+ * appended runlist.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @dst and @src are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ * -ENOMEM - Not enough memory to allocate runlist array.
+ * -EINVAL - Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_append(runlist_element *dst,
+ int dsize, runlist_element *src, int ssize, int loc)
+{
+ BOOL right;
+ int magic;
+
+ BUG_ON(!dst);
+ BUG_ON(!src);
+
+ /* First, check if the right hand end needs merging. */
+ right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
+
+ /* Space required: @dst size + @src size, less one if we merged. */
+ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right);
+ if (IS_ERR(dst))
+ return dst;
+ /*
+ * We are guaranteed to succeed from here so can start modifying the
+ * original runlists.
+ */
+
+ /* First, merge the right hand end, if necessary. */
+ if (right)
+ __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
+
+ magic = loc + ssize;
+
+ /* Move the tail of @dst out of the way, then copy in @src. */
+ ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right);
+ ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
+
+ /* Adjust the size of the preceding hole. */
+ dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
+
+ /* We may have changed the length of the file, so fix the end marker */
+ if (dst[magic + 1].lcn == LCN_ENOENT)
+ dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length;
+
+ return dst;
+}
+
+/**
+ * ntfs_rl_insert - insert a runlist into another
+ * @dst: original runlist to be worked on
+ * @dsize: number of elements in @dst (including end marker)
+ * @src: new runlist to be inserted
+ * @ssize: number of elements in @src (excluding end marker)
+ * @loc: insert the new runlist @src before this element in @dst
+ *
+ * Insert the runlist @src before element @loc in the runlist @dst. Merge the
+ * left end of the new runlist, if necessary. Adjust the size of the hole
+ * after the inserted runlist.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @dst and @src are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ * -ENOMEM - Not enough memory to allocate runlist array.
+ * -EINVAL - Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
+ int dsize, runlist_element *src, int ssize, int loc)
+{
+ BOOL left = FALSE;
+ BOOL disc = FALSE; /* Discontinuity */
+ BOOL hole = FALSE; /* Following a hole */
+ int magic;
+
+ BUG_ON(!dst);
+ BUG_ON(!src);
+
+ /* disc => Discontinuity between the end of @dst and the start of @src.
+ * This means we might need to insert a hole.
+ * hole => @dst ends with a hole or an unmapped region which we can
+ * extend to match the discontinuity. */
+ if (loc == 0)
+ disc = (src[0].vcn > 0);
+ else {
+ s64 merged_length;
+
+ left = ntfs_are_rl_mergeable(dst + loc - 1, src);
+
+ merged_length = dst[loc - 1].length;
+ if (left)
+ merged_length += src->length;
+
+ disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
+ if (disc)
+ hole = (dst[loc - 1].lcn == LCN_HOLE);
+ }
+
+ /* Space required: @dst size + @src size, less one if we merged, plus
+ * one if there was a discontinuity, less one for a trailing hole. */
+ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole);
+ if (IS_ERR(dst))
+ return dst;
+ /*
+ * We are guaranteed to succeed from here so can start modifying the
+ * original runlist.
+ */
+
+ if (left)
+ __ntfs_rl_merge(dst + loc - 1, src);
+
+ magic = loc + ssize - left + disc - hole;
+
+ /* Move the tail of @dst out of the way, then copy in @src. */
+ ntfs_rl_mm(dst, magic, loc, dsize - loc);
+ ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left);
+
+ /* Adjust the VCN of the last run ... */
+ if (dst[magic].lcn <= LCN_HOLE)
+ dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
+ /* ... and the length. */
+ if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED)
+ dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn;
+
+ /* Writing beyond the end of the file and there's a discontinuity. */
+ if (disc) {
+ if (hole)
+ dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn;
+ else {
+ if (loc > 0) {
+ dst[loc].vcn = dst[loc - 1].vcn +
+ dst[loc - 1].length;
+ dst[loc].length = dst[loc + 1].vcn -
+ dst[loc].vcn;
+ } else {
+ dst[loc].vcn = 0;
+ dst[loc].length = dst[loc + 1].vcn;
+ }
+ dst[loc].lcn = LCN_RL_NOT_MAPPED;
+ }
+
+ magic += hole;
+
+ if (dst[magic].lcn == LCN_ENOENT)
+ dst[magic].vcn = dst[magic - 1].vcn +
+ dst[magic - 1].length;
+ }
+ return dst;
+}
+
+/**
+ * ntfs_rl_replace - overwrite a runlist element with another runlist
+ * @dst: original runlist to be worked on
+ * @dsize: number of elements in @dst (including end marker)
+ * @src: new runlist to be inserted
+ * @ssize: number of elements in @src (excluding end marker)
+ * @loc: index in runlist @dst to overwrite with @src
+ *
+ * Replace the runlist element @dst at @loc with @src. Merge the left and
+ * right ends of the inserted runlist, if necessary.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @dst and @src are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ * -ENOMEM - Not enough memory to allocate runlist array.
+ * -EINVAL - Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
+ int dsize, runlist_element *src, int ssize, int loc)
+{
+ BOOL left = FALSE;
+ BOOL right;
+ int magic;
+
+ BUG_ON(!dst);
+ BUG_ON(!src);
+
+ /* First, merge the left and right ends, if necessary. */
+ right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
+ if (loc > 0)
+ left = ntfs_are_rl_mergeable(dst + loc - 1, src);
+
+ /* Allocate some space. We'll need less if the left, right, or both
+ * ends were merged. */
+ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right);
+ if (IS_ERR(dst))
+ return dst;
+ /*
+ * We are guaranteed to succeed from here so can start modifying the
+ * original runlists.
+ */
+ if (right)
+ __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
+ if (left)
+ __ntfs_rl_merge(dst + loc - 1, src);
+
+ /* FIXME: What does this mean? (AIA) */
+ magic = loc + ssize - left;
+
+ /* Move the tail of @dst out of the way, then copy in @src. */
+ ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1);
+ ntfs_rl_mc(dst, loc, src, left, ssize - left);
+
+ /* We may have changed the length of the file, so fix the end marker */
+ if (dst[magic].lcn == LCN_ENOENT)
+ dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
+ return dst;
+}
+
+/**
+ * ntfs_rl_split - insert a runlist into the centre of a hole
+ * @dst: original runlist to be worked on
+ * @dsize: number of elements in @dst (including end marker)
+ * @src: new runlist to be inserted
+ * @ssize: number of elements in @src (excluding end marker)
+ * @loc: index in runlist @dst at which to split and insert @src
+ *
+ * Split the runlist @dst at @loc into two and insert @new in between the two
+ * fragments. No merging of runlists is necessary. Adjust the size of the
+ * holes either side.
+ *
+ * It is up to the caller to serialize access to the runlists @dst and @src.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @dst and @src are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ * -ENOMEM - Not enough memory to allocate runlist array.
+ * -EINVAL - Invalid parameters were passed in.
+ */
+static inline runlist_element *ntfs_rl_split(runlist_element *dst, int dsize,
+ runlist_element *src, int ssize, int loc)
+{
+ BUG_ON(!dst);
+ BUG_ON(!src);
+
+ /* Space required: @dst size + @src size + one new hole. */
+ dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1);
+ if (IS_ERR(dst))
+ return dst;
+ /*
+ * We are guaranteed to succeed from here so can start modifying the
+ * original runlists.
+ */
+
+ /* Move the tail of @dst out of the way, then copy in @src. */
+ ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc);
+ ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
+
+ /* Adjust the size of the holes either size of @src. */
+ dst[loc].length = dst[loc+1].vcn - dst[loc].vcn;
+ dst[loc+ssize+1].vcn = dst[loc+ssize].vcn + dst[loc+ssize].length;
+ dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn;
+
+ return dst;
+}
+
+/**
+ * ntfs_runlists_merge - merge two runlists into one
+ * @drl: original runlist to be worked on
+ * @srl: new runlist to be merged into @drl
+ *
+ * First we sanity check the two runlists @srl and @drl to make sure that they
+ * are sensible and can be merged. The runlist @srl must be either after the
+ * runlist @drl or completely within a hole (or unmapped region) in @drl.
+ *
+ * It is up to the caller to serialize access to the runlists @drl and @srl.
+ *
+ * Merging of runlists is necessary in two cases:
+ * 1. When attribute lists are used and a further extent is being mapped.
+ * 2. When new clusters are allocated to fill a hole or extend a file.
+ *
+ * There are four possible ways @srl can be merged. It can:
+ * - be inserted at the beginning of a hole,
+ * - split the hole in two and be inserted between the two fragments,
+ * - be appended at the end of a hole, or it can
+ * - replace the whole hole.
+ * It can also be appended to the end of the runlist, which is just a variant
+ * of the insert case.
+ *
+ * On success, return a pointer to the new, combined, runlist. Note, both
+ * runlists @drl and @srl are deallocated before returning so you cannot use
+ * the pointers for anything any more. (Strictly speaking the returned runlist
+ * may be the same as @dst but this is irrelevant.)
+ *
+ * On error, return -errno. Both runlists are left unmodified. The following
+ * error codes are defined:
+ * -ENOMEM - Not enough memory to allocate runlist array.
+ * -EINVAL - Invalid parameters were passed in.
+ * -ERANGE - The runlists overlap and cannot be merged.
+ */
+runlist_element *ntfs_runlists_merge(runlist_element *drl,
+ runlist_element *srl)
+{
+ int di, si; /* Current index into @[ds]rl. */
+ int sstart; /* First index with lcn > LCN_RL_NOT_MAPPED. */
+ int dins; /* Index into @drl at which to insert @srl. */
+ int dend, send; /* Last index into @[ds]rl. */
+ int dfinal, sfinal; /* The last index into @[ds]rl with
+ lcn >= LCN_HOLE. */
+ int marker = 0;
+ VCN marker_vcn = 0;
+
+#ifdef DEBUG
+ ntfs_debug("dst:");
+ ntfs_debug_dump_runlist(drl);
+ ntfs_debug("src:");
+ ntfs_debug_dump_runlist(srl);
+#endif
+
+ /* Check for silly calling... */
+ if (unlikely(!srl))
+ return drl;
+ if (IS_ERR(srl) || IS_ERR(drl))
+ return ERR_PTR(-EINVAL);
+
+ /* Check for the case where the first mapping is being done now. */
+ if (unlikely(!drl)) {
+ drl = srl;
+ /* Complete the source runlist if necessary. */
+ if (unlikely(drl[0].vcn)) {
+ /* Scan to the end of the source runlist. */
+ for (dend = 0; likely(drl[dend].length); dend++)
+ ;
+ drl = ntfs_rl_realloc(drl, dend, dend + 1);
+ if (IS_ERR(drl))
+ return drl;
+ /* Insert start element at the front of the runlist. */
+ ntfs_rl_mm(drl, 1, 0, dend);
+ drl[0].vcn = 0;
+ drl[0].lcn = LCN_RL_NOT_MAPPED;
+ drl[0].length = drl[1].vcn;
+ }
+ goto finished;
+ }
+
+ si = di = 0;
+
+ /* Skip any unmapped start element(s) in the source runlist. */
+ while (srl[si].length && srl[si].lcn < LCN_HOLE)
+ si++;
+
+ /* Can't have an entirely unmapped source runlist. */
+ BUG_ON(!srl[si].length);
+
+ /* Record the starting points. */
+ sstart = si;
+
+ /*
+ * Skip forward in @drl until we reach the position where @srl needs to
+ * be inserted. If we reach the end of @drl, @srl just needs to be
+ * appended to @drl.
+ */
+ for (; drl[di].length; di++) {
+ if (drl[di].vcn + drl[di].length > srl[sstart].vcn)
+ break;
+ }
+ dins = di;
+
+ /* Sanity check for illegal overlaps. */
+ if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) &&
+ (srl[si].lcn >= 0)) {
+ ntfs_error(NULL, "Run lists overlap. Cannot merge!");
+ return ERR_PTR(-ERANGE);
+ }
+
+ /* Scan to the end of both runlists in order to know their sizes. */
+ for (send = si; srl[send].length; send++)
+ ;
+ for (dend = di; drl[dend].length; dend++)
+ ;
+
+ if (srl[send].lcn == LCN_ENOENT)
+ marker_vcn = srl[marker = send].vcn;
+
+ /* Scan to the last element with lcn >= LCN_HOLE. */
+ for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--)
+ ;
+ for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--)
+ ;
+
+ {
+ BOOL start;
+ BOOL finish;
+ int ds = dend + 1; /* Number of elements in drl & srl */
+ int ss = sfinal - sstart + 1;
+
+ start = ((drl[dins].lcn < LCN_RL_NOT_MAPPED) || /* End of file */
+ (drl[dins].vcn == srl[sstart].vcn)); /* Start of hole */
+ finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) && /* End of file */
+ ((drl[dins].vcn + drl[dins].length) <= /* End of hole */
+ (srl[send - 1].vcn + srl[send - 1].length)));
+
+ /* Or we'll lose an end marker */
+ if (start && finish && (drl[dins].length == 0))
+ ss++;
+ if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn))
+ finish = FALSE;
+#if 0
+ ntfs_debug("dfinal = %i, dend = %i", dfinal, dend);
+ ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send);
+ ntfs_debug("start = %i, finish = %i", start, finish);
+ ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins);
+#endif
+ if (start) {
+ if (finish)
+ drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins);
+ else
+ drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins);
+ } else {
+ if (finish)
+ drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins);
+ else
+ drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins);
+ }
+ if (IS_ERR(drl)) {
+ ntfs_error(NULL, "Merge failed.");
+ return drl;
+ }
+ ntfs_free(srl);
+ if (marker) {
+ ntfs_debug("Triggering marker code.");
+ for (ds = dend; drl[ds].length; ds++)
+ ;
+ /* We only need to care if @srl ended after @drl. */
+ if (drl[ds].vcn <= marker_vcn) {
+ int slots = 0;
+
+ if (drl[ds].vcn == marker_vcn) {
+ ntfs_debug("Old marker = 0x%llx, replacing "
+ "with LCN_ENOENT.",
+ (unsigned long long)
+ drl[ds].lcn);
+ drl[ds].lcn = LCN_ENOENT;
+ goto finished;
+ }
+ /*
+ * We need to create an unmapped runlist element in
+ * @drl or extend an existing one before adding the
+ * ENOENT terminator.
+ */
+ if (drl[ds].lcn == LCN_ENOENT) {
+ ds--;
+ slots = 1;
+ }
+ if (drl[ds].lcn != LCN_RL_NOT_MAPPED) {
+ /* Add an unmapped runlist element. */
+ if (!slots) {
+ /* FIXME/TODO: We need to have the
+ * extra memory already! (AIA) */
+ drl = ntfs_rl_realloc(drl, ds, ds + 2);
+ if (!drl)
+ goto critical_error;
+ slots = 2;
+ }
+ ds++;
+ /* Need to set vcn if it isn't set already. */
+ if (slots != 1)
+ drl[ds].vcn = drl[ds - 1].vcn +
+ drl[ds - 1].length;
+ drl[ds].lcn = LCN_RL_NOT_MAPPED;
+ /* We now used up a slot. */
+ slots--;
+ }
+ drl[ds].length = marker_vcn - drl[ds].vcn;
+ /* Finally add the ENOENT terminator. */
+ ds++;
+ if (!slots) {
+ /* FIXME/TODO: We need to have the extra
+ * memory already! (AIA) */
+ drl = ntfs_rl_realloc(drl, ds, ds + 1);
+ if (!drl)
+ goto critical_error;
+ }
+ drl[ds].vcn = marker_vcn;
+ drl[ds].lcn = LCN_ENOENT;
+ drl[ds].length = (s64)0;
+ }
+ }
+ }
+
+finished:
+ /* The merge was completed successfully. */
+ ntfs_debug("Merged runlist:");
+ ntfs_debug_dump_runlist(drl);
+ return drl;
+
+critical_error:
+ /* Critical error! We cannot afford to fail here. */
+ ntfs_error(NULL, "Critical error! Not enough memory.");
+ panic("NTFS: Cannot continue.");
+}
+
+/**
+ * ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist
+ * @vol: ntfs volume on which the attribute resides
+ * @attr: attribute record whose mapping pairs array to decompress
+ * @old_rl: optional runlist in which to insert @attr's runlist
+ *
+ * It is up to the caller to serialize access to the runlist @old_rl.
+ *
+ * Decompress the attribute @attr's mapping pairs array into a runlist. On
+ * success, return the decompressed runlist.
+ *
+ * If @old_rl is not NULL, decompressed runlist is inserted into the
+ * appropriate place in @old_rl and the resultant, combined runlist is
+ * returned. The original @old_rl is deallocated.
+ *
+ * On error, return -errno. @old_rl is left unmodified in that case.
+ *
+ * The following error codes are defined:
+ * -ENOMEM - Not enough memory to allocate runlist array.
+ * -EIO - Corrupt runlist.
+ * -EINVAL - Invalid parameters were passed in.
+ * -ERANGE - The two runlists overlap.
+ *
+ * FIXME: For now we take the conceptionally simplest approach of creating the
+ * new runlist disregarding the already existing one and then splicing the
+ * two into one, if that is possible (we check for overlap and discard the new
+ * runlist if overlap present before returning ERR_PTR(-ERANGE)).
+ */
+runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol,
+ const ATTR_RECORD *attr, runlist_element *old_rl)
+{
+ VCN vcn; /* Current vcn. */
+ LCN lcn; /* Current lcn. */
+ s64 deltaxcn; /* Change in [vl]cn. */
+ runlist_element *rl; /* The output runlist. */
+ u8 *buf; /* Current position in mapping pairs array. */
+ u8 *attr_end; /* End of attribute. */
+ int rlsize; /* Size of runlist buffer. */
+ u16 rlpos; /* Current runlist position in units of
+ runlist_elements. */
+ u8 b; /* Current byte offset in buf. */
+
+#ifdef DEBUG
+ /* Make sure attr exists and is non-resident. */
+ if (!attr || !attr->non_resident || sle64_to_cpu(
+ attr->data.non_resident.lowest_vcn) < (VCN)0) {
+ ntfs_error(vol->sb, "Invalid arguments.");
+ return ERR_PTR(-EINVAL);
+ }
+#endif
+ /* Start at vcn = lowest_vcn and lcn 0. */
+ vcn = sle64_to_cpu(attr->data.non_resident.lowest_vcn);
+ lcn = 0;
+ /* Get start of the mapping pairs array. */
+ buf = (u8*)attr + le16_to_cpu(
+ attr->data.non_resident.mapping_pairs_offset);
+ attr_end = (u8*)attr + le32_to_cpu(attr->length);
+ if (unlikely(buf < (u8*)attr || buf > attr_end)) {
+ ntfs_error(vol->sb, "Corrupt attribute.");
+ return ERR_PTR(-EIO);
+ }
+ /* Current position in runlist array. */
+ rlpos = 0;
+ /* Allocate first page and set current runlist size to one page. */
+ rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE);
+ if (unlikely(!rl))
+ return ERR_PTR(-ENOMEM);
+ /* Insert unmapped starting element if necessary. */
+ if (vcn) {
+ rl->vcn = 0;
+ rl->lcn = LCN_RL_NOT_MAPPED;
+ rl->length = vcn;
+ rlpos++;
+ }
+ while (buf < attr_end && *buf) {
+ /*
+ * Allocate more memory if needed, including space for the
+ * not-mapped and terminator elements. ntfs_malloc_nofs()
+ * operates on whole pages only.
+ */
+ if (((rlpos + 3) * sizeof(*old_rl)) > rlsize) {
+ runlist_element *rl2;
+
+ rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE);
+ if (unlikely(!rl2)) {
+ ntfs_free(rl);
+ return ERR_PTR(-ENOMEM);
+ }
+ memcpy(rl2, rl, rlsize);
+ ntfs_free(rl);
+ rl = rl2;
+ rlsize += PAGE_SIZE;
+ }
+ /* Enter the current vcn into the current runlist element. */
+ rl[rlpos].vcn = vcn;
+ /*
+ * Get the change in vcn, i.e. the run length in clusters.
+ * Doing it this way ensures that we signextend negative values.
+ * A negative run length doesn't make any sense, but hey, I
+ * didn't make up the NTFS specs and Windows NT4 treats the run
+ * length as a signed value so that's how it is...
+ */
+ b = *buf & 0xf;
+ if (b) {
+ if (unlikely(buf + b > attr_end))
+ goto io_error;
+ for (deltaxcn = (s8)buf[b--]; b; b--)
+ deltaxcn = (deltaxcn << 8) + buf[b];
+ } else { /* The length entry is compulsory. */
+ ntfs_error(vol->sb, "Missing length entry in mapping "
+ "pairs array.");
+ deltaxcn = (s64)-1;
+ }
+ /*
+ * Assume a negative length to indicate data corruption and
+ * hence clean-up and return NULL.
+ */
+ if (unlikely(deltaxcn < 0)) {
+ ntfs_error(vol->sb, "Invalid length in mapping pairs "
+ "array.");
+ goto err_out;
+ }
+ /*
+ * Enter the current run length into the current runlist
+ * element.
+ */
+ rl[rlpos].length = deltaxcn;
+ /* Increment the current vcn by the current run length. */
+ vcn += deltaxcn;
+ /*
+ * There might be no lcn change at all, as is the case for
+ * sparse clusters on NTFS 3.0+, in which case we set the lcn
+ * to LCN_HOLE.
+ */
+ if (!(*buf & 0xf0))
+ rl[rlpos].lcn = LCN_HOLE;
+ else {
+ /* Get the lcn change which really can be negative. */
+ u8 b2 = *buf & 0xf;
+ b = b2 + ((*buf >> 4) & 0xf);
+ if (buf + b > attr_end)
+ goto io_error;
+ for (deltaxcn = (s8)buf[b--]; b > b2; b--)
+ deltaxcn = (deltaxcn << 8) + buf[b];
+ /* Change the current lcn to its new value. */
+ lcn += deltaxcn;
+#ifdef DEBUG
+ /*
+ * On NTFS 1.2-, apparently can have lcn == -1 to
+ * indicate a hole. But we haven't verified ourselves
+ * whether it is really the lcn or the deltaxcn that is
+ * -1. So if either is found give us a message so we
+ * can investigate it further!
+ */
+ if (vol->major_ver < 3) {
+ if (unlikely(deltaxcn == (LCN)-1))
+ ntfs_error(vol->sb, "lcn delta == -1");
+ if (unlikely(lcn == (LCN)-1))
+ ntfs_error(vol->sb, "lcn == -1");
+ }
+#endif
+ /* Check lcn is not below -1. */
+ if (unlikely(lcn < (LCN)-1)) {
+ ntfs_error(vol->sb, "Invalid LCN < -1 in "
+ "mapping pairs array.");
+ goto err_out;
+ }
+ /* Enter the current lcn into the runlist element. */
+ rl[rlpos].lcn = lcn;
+ }
+ /* Get to the next runlist element. */
+ rlpos++;
+ /* Increment the buffer position to the next mapping pair. */
+ buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1;
+ }
+ if (unlikely(buf >= attr_end))
+ goto io_error;
+ /*
+ * If there is a highest_vcn specified, it must be equal to the final
+ * vcn in the runlist - 1, or something has gone badly wrong.
+ */
+ deltaxcn = sle64_to_cpu(attr->data.non_resident.highest_vcn);
+ if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) {
+mpa_err:
+ ntfs_error(vol->sb, "Corrupt mapping pairs array in "
+ "non-resident attribute.");
+ goto err_out;
+ }
+ /* Setup not mapped runlist element if this is the base extent. */
+ if (!attr->data.non_resident.lowest_vcn) {
+ VCN max_cluster;
+
+ max_cluster = (sle64_to_cpu(
+ attr->data.non_resident.allocated_size) +
+ vol->cluster_size - 1) >>
+ vol->cluster_size_bits;
+ /*
+ * If there is a difference between the highest_vcn and the
+ * highest cluster, the runlist is either corrupt or, more
+ * likely, there are more extents following this one.
+ */
+ if (deltaxcn < --max_cluster) {
+ ntfs_debug("More extents to follow; deltaxcn = 0x%llx, "
+ "max_cluster = 0x%llx",
+ (unsigned long long)deltaxcn,
+ (unsigned long long)max_cluster);
+ rl[rlpos].vcn = vcn;
+ vcn += rl[rlpos].length = max_cluster - deltaxcn;
+ rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
+ rlpos++;
+ } else if (unlikely(deltaxcn > max_cluster)) {
+ ntfs_error(vol->sb, "Corrupt attribute. deltaxcn = "
+ "0x%llx, max_cluster = 0x%llx",
+ (unsigned long long)deltaxcn,
+ (unsigned long long)max_cluster);
+ goto mpa_err;
+ }
+ rl[rlpos].lcn = LCN_ENOENT;
+ } else /* Not the base extent. There may be more extents to follow. */
+ rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
+
+ /* Setup terminating runlist element. */
+ rl[rlpos].vcn = vcn;
+ rl[rlpos].length = (s64)0;
+ /* If no existing runlist was specified, we are done. */
+ if (!old_rl) {
+ ntfs_debug("Mapping pairs array successfully decompressed:");
+ ntfs_debug_dump_runlist(rl);
+ return rl;
+ }
+ /* Now combine the new and old runlists checking for overlaps. */
+ old_rl = ntfs_runlists_merge(old_rl, rl);
+ if (likely(!IS_ERR(old_rl)))
+ return old_rl;
+ ntfs_free(rl);
+ ntfs_error(vol->sb, "Failed to merge runlists.");
+ return old_rl;
+io_error:
+ ntfs_error(vol->sb, "Corrupt attribute.");
+err_out:
+ ntfs_free(rl);
+ return ERR_PTR(-EIO);
+}
+
+/**
+ * ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist
+ * @rl: runlist to use for conversion
+ * @vcn: vcn to convert
+ *
+ * Convert the virtual cluster number @vcn of an attribute into a logical
+ * cluster number (lcn) of a device using the runlist @rl to map vcns to their
+ * corresponding lcns.
+ *
+ * It is up to the caller to serialize access to the runlist @rl.
+ *
+ * Since lcns must be >= 0, we use negative return values with special meaning:
+ *
+ * Return value Meaning / Description
+ * ==================================================
+ * -1 = LCN_HOLE Hole / not allocated on disk.
+ * -2 = LCN_RL_NOT_MAPPED This is part of the runlist which has not been
+ * inserted into the runlist yet.
+ * -3 = LCN_ENOENT There is no such vcn in the attribute.
+ *
+ * Locking: - The caller must have locked the runlist (for reading or writing).
+ * - This function does not touch the lock.
+ */
+LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn)
+{
+ int i;
+
+ BUG_ON(vcn < 0);
+ /*
+ * If rl is NULL, assume that we have found an unmapped runlist. The
+ * caller can then attempt to map it and fail appropriately if
+ * necessary.
+ */
+ if (unlikely(!rl))
+ return LCN_RL_NOT_MAPPED;
+
+ /* Catch out of lower bounds vcn. */
+ if (unlikely(vcn < rl[0].vcn))
+ return LCN_ENOENT;
+
+ for (i = 0; likely(rl[i].length); i++) {
+ if (unlikely(vcn < rl[i+1].vcn)) {
+ if (likely(rl[i].lcn >= (LCN)0))
+ return rl[i].lcn + (vcn - rl[i].vcn);
+ return rl[i].lcn;
+ }
+ }
+ /*
+ * The terminator element is setup to the correct value, i.e. one of
+ * LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT.
+ */
+ if (likely(rl[i].lcn < (LCN)0))
+ return rl[i].lcn;
+ /* Just in case... We could replace this with BUG() some day. */
+ return LCN_ENOENT;
+}
+
+/**
+ * ntfs_get_nr_significant_bytes - get number of bytes needed to store a number
+ * @n: number for which to get the number of bytes for
+ *
+ * Return the number of bytes required to store @n unambiguously as
+ * a signed number.
+ *
+ * This is used in the context of the mapping pairs array to determine how
+ * many bytes will be needed in the array to store a given logical cluster
+ * number (lcn) or a specific run length.
+ *
+ * Return the number of bytes written. This function cannot fail.
+ */
+static inline int ntfs_get_nr_significant_bytes(const s64 n)
+{
+ s64 l = n;
+ int i;
+ s8 j;
+
+ i = 0;
+ do {
+ l >>= 8;
+ i++;
+ } while (l != 0 && l != -1);
+ j = (n >> 8 * (i - 1)) & 0xff;
+ /* If the sign bit is wrong, we need an extra byte. */
+ if ((n < 0 && j >= 0) || (n > 0 && j < 0))
+ i++;
+ return i;
+}
+
+/**
+ * ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array
+ * @vol: ntfs volume (needed for the ntfs version)
+ * @rl: locked runlist to determine the size of the mapping pairs of
+ * @start_vcn: vcn at which to start the mapping pairs array
+ *
+ * Walk the locked runlist @rl and calculate the size in bytes of the mapping
+ * pairs array corresponding to the runlist @rl, starting at vcn @start_vcn.
+ * This for example allows us to allocate a buffer of the right size when
+ * building the mapping pairs array.
+ *
+ * If @rl is NULL, just return 1 (for the single terminator byte).
+ *
+ * Return the calculated size in bytes on success. On error, return -errno.
+ * The following error codes are defined:
+ * -EINVAL - Run list contains unmapped elements. Make sure to only pass
+ * fully mapped runlists to this function.
+ * -EIO - The runlist is corrupt.
+ *
+ * Locking: @rl must be locked on entry (either for reading or writing), it
+ * remains locked throughout, and is left locked upon return.
+ */
+int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
+ const runlist_element *rl, const VCN start_vcn)
+{
+ LCN prev_lcn;
+ int rls;
+
+ BUG_ON(start_vcn < 0);
+ if (!rl) {
+ BUG_ON(start_vcn);
+ return 1;
+ }
+ /* Skip to runlist element containing @start_vcn. */
+ while (rl->length && start_vcn >= rl[1].vcn)
+ rl++;
+ if ((!rl->length && start_vcn > rl->vcn) || start_vcn < rl->vcn)
+ return -EINVAL;
+ prev_lcn = 0;
+ /* Always need the termining zero byte. */
+ rls = 1;
+ /* Do the first partial run if present. */
+ if (start_vcn > rl->vcn) {
+ s64 delta;
+
+ /* We know rl->length != 0 already. */
+ if (rl->length < 0 || rl->lcn < LCN_HOLE)
+ goto err_out;
+ delta = start_vcn - rl->vcn;
+ /* Header byte + length. */
+ rls += 1 + ntfs_get_nr_significant_bytes(rl->length - delta);
+ /*
+ * If the logical cluster number (lcn) denotes a hole and we
+ * are on NTFS 3.0+, we don't store it at all, i.e. we need
+ * zero space. On earlier NTFS versions we just store the lcn.
+ * Note: this assumes that on NTFS 1.2-, holes are stored with
+ * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
+ */
+ if (rl->lcn >= 0 || vol->major_ver < 3) {
+ prev_lcn = rl->lcn;
+ if (rl->lcn >= 0)
+ prev_lcn += delta;
+ /* Change in lcn. */
+ rls += ntfs_get_nr_significant_bytes(prev_lcn);
+ }
+ /* Go to next runlist element. */
+ rl++;
+ }
+ /* Do the full runs. */
+ for (; rl->length; rl++) {
+ if (rl->length < 0 || rl->lcn < LCN_HOLE)
+ goto err_out;
+ /* Header byte + length. */
+ rls += 1 + ntfs_get_nr_significant_bytes(rl->length);
+ /*
+ * If the logical cluster number (lcn) denotes a hole and we
+ * are on NTFS 3.0+, we don't store it at all, i.e. we need
+ * zero space. On earlier NTFS versions we just store the lcn.
+ * Note: this assumes that on NTFS 1.2-, holes are stored with
+ * an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
+ */
+ if (rl->lcn >= 0 || vol->major_ver < 3) {
+ /* Change in lcn. */
+ rls += ntfs_get_nr_significant_bytes(rl->lcn -
+ prev_lcn);
+ prev_lcn = rl->lcn;
+ }
+ }
+ return rls;
+err_out:
+ if (rl->lcn == LCN_RL_NOT_MAPPED)
+ rls = -EINVAL;
+ else
+ rls = -EIO;
+ return rls;
+}
+
+/**
+ * ntfs_write_significant_bytes - write the significant bytes of a number
+ * @dst: destination buffer to write to
+ * @dst_max: pointer to last byte of destination buffer for bounds checking
+ * @n: number whose significant bytes to write
+ *
+ * Store in @dst, the minimum bytes of the number @n which are required to
+ * identify @n unambiguously as a signed number, taking care not to exceed
+ * @dest_max, the maximum position within @dst to which we are allowed to
+ * write.
+ *
+ * This is used when building the mapping pairs array of a runlist to compress
+ * a given logical cluster number (lcn) or a specific run length to the minumum
+ * size possible.
+ *
+ * Return the number of bytes written on success. On error, i.e. the
+ * destination buffer @dst is too small, return -ENOSPC.
+ */
+static inline int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max,
+ const s64 n)
+{
+ s64 l = n;
+ int i;
+ s8 j;
+
+ i = 0;
+ do {
+ if (dst > dst_max)
+ goto err_out;
+ *dst++ = l & 0xffll;
+ l >>= 8;
+ i++;
+ } while (l != 0 && l != -1);
+ j = (n >> 8 * (i - 1)) & 0xff;
+ /* If the sign bit is wrong, we need an extra byte. */
+ if (n < 0 && j >= 0) {
+ if (dst > dst_max)
+ goto err_out;
+ i++;
+ *dst = (s8)-1;
+ } else if (n > 0 && j < 0) {
+ if (dst > dst_max)
+ goto err_out;
+ i++;
+ *dst = (s8)0;
+ }
+ return i;
+err_out:
+ return -ENOSPC;
+}
+
+/**
+ * ntfs_mapping_pairs_build - build the mapping pairs array from a runlist
+ * @vol: ntfs volume (needed for the ntfs version)
+ * @dst: destination buffer to which to write the mapping pairs array
+ * @dst_len: size of destination buffer @dst in bytes
+ * @rl: locked runlist for which to build the mapping pairs array
+ * @start_vcn: vcn at which to start the mapping pairs array
+ * @stop_vcn: first vcn outside destination buffer on success or -ENOSPC
+ *
+ * Create the mapping pairs array from the locked runlist @rl, starting at vcn
+ * @start_vcn and save the array in @dst. @dst_len is the size of @dst in
+ * bytes and it should be at least equal to the value obtained by calling
+ * ntfs_get_size_for_mapping_pairs().
+ *
+ * If @rl is NULL, just write a single terminator byte to @dst.
+ *
+ * On success or -ENOSPC error, if @stop_vcn is not NULL, *@stop_vcn is set to
+ * the first vcn outside the destination buffer. Note that on error, @dst has
+ * been filled with all the mapping pairs that will fit, thus it can be treated
+ * as partial success, in that a new attribute extent needs to be created or
+ * the next extent has to be used and the mapping pairs build has to be
+ * continued with @start_vcn set to *@stop_vcn.
+ *
+ * Return 0 on success and -errno on error. The following error codes are
+ * defined:
+ * -EINVAL - Run list contains unmapped elements. Make sure to only pass
+ * fully mapped runlists to this function.
+ * -EIO - The runlist is corrupt.
+ * -ENOSPC - The destination buffer is too small.
+ *
+ * Locking: @rl must be locked on entry (either for reading or writing), it
+ * remains locked throughout, and is left locked upon return.
+ */
+int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
+ const int dst_len, const runlist_element *rl,
+ const VCN start_vcn, VCN *const stop_vcn)
+{
+ LCN prev_lcn;
+ s8 *dst_max, *dst_next;
+ int err = -ENOSPC;
+ s8 len_len, lcn_len;
+
+ BUG_ON(start_vcn < 0);
+ BUG_ON(dst_len < 1);
+ if (!rl) {
+ BUG_ON(start_vcn);
+ if (stop_vcn)
+ *stop_vcn = 0;
+ /* Terminator byte. */
+ *dst = 0;
+ return 0;
+ }
+ /* Skip to runlist element containing @start_vcn. */
+ while (rl->length && start_vcn >= rl[1].vcn)
+ rl++;
+ if ((!rl->length && start_vcn > rl->vcn) || start_vcn < rl->vcn)
+ return -EINVAL;
+ /*
+ * @dst_max is used for bounds checking in
+ * ntfs_write_significant_bytes().
+ */
+ dst_max = dst + dst_len - 1;
+ prev_lcn = 0;
+ /* Do the first partial run if present. */
+ if (start_vcn > rl->vcn) {
+ s64 delta;
+
+ /* We know rl->length != 0 already. */
+ if (rl->length < 0 || rl->lcn < LCN_HOLE)
+ goto err_out;
+ delta = start_vcn - rl->vcn;
+ /* Write length. */
+ len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
+ rl->length - delta);
+ if (len_len < 0)
+ goto size_err;
+ /*
+ * If the logical cluster number (lcn) denotes a hole and we
+ * are on NTFS 3.0+, we don't store it at all, i.e. we need
+ * zero space. On earlier NTFS versions we just write the lcn
+ * change. FIXME: Do we need to write the lcn change or just
+ * the lcn in that case? Not sure as I have never seen this
+ * case on NT4. - We assume that we just need to write the lcn
+ * change until someone tells us otherwise... (AIA)
+ */
+ if (rl->lcn >= 0 || vol->major_ver < 3) {
+ prev_lcn = rl->lcn;
+ if (rl->lcn >= 0)
+ prev_lcn += delta;
+ /* Write change in lcn. */
+ lcn_len = ntfs_write_significant_bytes(dst + 1 +
+ len_len, dst_max, prev_lcn);
+ if (lcn_len < 0)
+ goto size_err;
+ } else
+ lcn_len = 0;
+ dst_next = dst + len_len + lcn_len + 1;
+ if (dst_next > dst_max)
+ goto size_err;
+ /* Update header byte. */
+ *dst = lcn_len << 4 | len_len;
+ /* Position at next mapping pairs array element. */
+ dst = dst_next;
+ /* Go to next runlist element. */
+ rl++;
+ }
+ /* Do the full runs. */
+ for (; rl->length; rl++) {
+ if (rl->length < 0 || rl->lcn < LCN_HOLE)
+ goto err_out;
+ /* Write length. */
+ len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
+ rl->length);
+ if (len_len < 0)
+ goto size_err;
+ /*
+ * If the logical cluster number (lcn) denotes a hole and we
+ * are on NTFS 3.0+, we don't store it at all, i.e. we need
+ * zero space. On earlier NTFS versions we just write the lcn
+ * change. FIXME: Do we need to write the lcn change or just
+ * the lcn in that case? Not sure as I have never seen this
+ * case on NT4. - We assume that we just need to write the lcn
+ * change until someone tells us otherwise... (AIA)
+ */
+ if (rl->lcn >= 0 || vol->major_ver < 3) {
+ /* Write change in lcn. */
+ lcn_len = ntfs_write_significant_bytes(dst + 1 +
+ len_len, dst_max, rl->lcn - prev_lcn);
+ if (lcn_len < 0)
+ goto size_err;
+ prev_lcn = rl->lcn;
+ } else
+ lcn_len = 0;
+ dst_next = dst + len_len + lcn_len + 1;
+ if (dst_next > dst_max)
+ goto size_err;
+ /* Update header byte. */
+ *dst = lcn_len << 4 | len_len;
+ /* Position at next mapping pairs array element. */
+ dst = dst_next;
+ }
+ /* Success. */
+ err = 0;
+size_err:
+ /* Set stop vcn. */
+ if (stop_vcn)
+ *stop_vcn = rl->vcn;
+ /* Add terminator byte. */
+ *dst = 0;
+ return err;
+err_out:
+ if (rl->lcn == LCN_RL_NOT_MAPPED)
+ err = -EINVAL;
+ else
+ err = -EIO;
+ return err;
+}
+
+/**
+ * ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn
+ * @runlist: runlist to truncate
+ * @new_length: the new length of the runlist in VCNs
+ *
+ * Truncate the runlist described by @runlist as well as the memory buffer
+ * holding the runlist elements to a length of @new_length VCNs.
+ *
+ * If @new_length lies within the runlist, the runlist elements with VCNs of
+ * @new_length and above are discarded.
+ *
+ * If @new_length lies beyond the runlist, a sparse runlist element is added to
+ * the end of the runlist @runlist or if the last runlist element is a sparse
+ * one already, this is extended.
+ *
+ * Return 0 on success and -errno on error.
+ *
+ * Locking: The caller must hold @runlist->lock for writing.
+ */
+int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist,
+ const s64 new_length)
+{
+ runlist_element *rl;
+ int old_size;
+
+ ntfs_debug("Entering for new_length 0x%llx.", (long long)new_length);
+ BUG_ON(!runlist);
+ BUG_ON(new_length < 0);
+ rl = runlist->rl;
+ if (unlikely(!rl)) {
+ /*
+ * Create a runlist consisting of a sparse runlist element of
+ * length @new_length followed by a terminator runlist element.
+ */
+ rl = ntfs_malloc_nofs(PAGE_SIZE);
+ if (unlikely(!rl)) {
+ ntfs_error(vol->sb, "Not enough memory to allocate "
+ "runlist element buffer.");
+ return -ENOMEM;
+ }
+ runlist->rl = rl;
+ rl[1].length = rl->vcn = 0;
+ rl->lcn = LCN_HOLE;
+ rl[1].vcn = rl->length = new_length;
+ rl[1].lcn = LCN_ENOENT;
+ return 0;
+ }
+ BUG_ON(new_length < rl->vcn);
+ /* Find @new_length in the runlist. */
+ while (likely(rl->length && new_length >= rl[1].vcn))
+ rl++;
+ /*
+ * If not at the end of the runlist we need to shrink it.
+ * If at the end of the runlist we need to expand it.
+ */
+ if (rl->length) {
+ runlist_element *trl;
+ BOOL is_end;
+
+ ntfs_debug("Shrinking runlist.");
+ /* Determine the runlist size. */
+ trl = rl + 1;
+ while (likely(trl->length))
+ trl++;
+ old_size = trl - runlist->rl + 1;
+ /* Truncate the run. */
+ rl->length = new_length - rl->vcn;
+ /*
+ * If a run was partially truncated, make the following runlist
+ * element a terminator.
+ */
+ is_end = FALSE;
+ if (rl->length) {
+ rl++;
+ if (!rl->length)
+ is_end = TRUE;
+ rl->vcn = new_length;
+ rl->length = 0;
+ }
+ rl->lcn = LCN_ENOENT;
+ /* Reallocate memory if necessary. */
+ if (!is_end) {
+ int new_size = rl - runlist->rl + 1;
+ rl = ntfs_rl_realloc(runlist->rl, old_size, new_size);
+ if (IS_ERR(rl))
+ ntfs_warning(vol->sb, "Failed to shrink "
+ "runlist buffer. This just "
+ "wastes a bit of memory "
+ "temporarily so we ignore it "
+ "and return success.");
+ else
+ runlist->rl = rl;
+ }
+ } else if (likely(/* !rl->length && */ new_length > rl->vcn)) {
+ ntfs_debug("Expanding runlist.");
+ /*
+ * If there is a previous runlist element and it is a sparse
+ * one, extend it. Otherwise need to add a new, sparse runlist
+ * element.
+ */
+ if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE))
+ (rl - 1)->length = new_length - (rl - 1)->vcn;
+ else {
+ /* Determine the runlist size. */
+ old_size = rl - runlist->rl + 1;
+ /* Reallocate memory if necessary. */
+ rl = ntfs_rl_realloc(runlist->rl, old_size,
+ old_size + 1);
+ if (IS_ERR(rl)) {
+ ntfs_error(vol->sb, "Failed to expand runlist "
+ "buffer, aborting.");
+ return PTR_ERR(rl);
+ }
+ runlist->rl = rl;
+ /*
+ * Set @rl to the same runlist element in the new
+ * runlist as before in the old runlist.
+ */
+ rl += old_size - 1;
+ /* Add a new, sparse runlist element. */
+ rl->lcn = LCN_HOLE;
+ rl->length = new_length - rl->vcn;
+ /* Add a new terminator runlist element. */
+ rl++;
+ rl->length = 0;
+ }
+ rl->vcn = new_length;
+ rl->lcn = LCN_ENOENT;
+ } else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ {
+ /* Runlist already has same size as requested. */
+ rl->lcn = LCN_ENOENT;
+ }
+ ntfs_debug("Done.");
+ return 0;
+}
diff --git a/fs/ntfs/runlist.h b/fs/ntfs/runlist.h
new file mode 100644
index 000000000000..7107fde59df9
--- /dev/null
+++ b/fs/ntfs/runlist.h
@@ -0,0 +1,89 @@
+/*
+ * runlist.h - Defines for runlist handling in NTFS Linux kernel driver.
+ * Part of the Linux-NTFS project.
+ *
+ * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2002 Richard Russon
+ *
+ * This program/include file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program/include file is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS
+ * distribution in the file COPYING); if not, write to the Free Software
+ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_NTFS_RUNLIST_H
+#define _LINUX_NTFS_RUNLIST_H
+
+#include "types.h"
+#include "layout.h"
+#include "volume.h"
+
+/**
+ * runlist_element - in memory vcn to lcn mapping array element
+ * @vcn: starting vcn of the current array element
+ * @lcn: starting lcn of the current array element
+ * @length: length in clusters of the current array element
+ *
+ * The last vcn (in fact the last vcn + 1) is reached when length == 0.
+ *
+ * When lcn == -1 this means that the count vcns starting at vcn are not
+ * physically allocated (i.e. this is a hole / data is sparse).
+ */
+typedef struct { /* In memory vcn to lcn mapping structure element. */
+ VCN vcn; /* vcn = Starting virtual cluster number. */
+ LCN lcn; /* lcn = Starting logical cluster number. */
+ s64 length; /* Run length in clusters. */
+} runlist_element;
+
+/**
+ * runlist - in memory vcn to lcn mapping array including a read/write lock
+ * @rl: pointer to an array of runlist elements
+ * @lock: read/write spinlock for serializing access to @rl
+ *
+ */
+typedef struct {
+ runlist_element *rl;
+ struct rw_semaphore lock;
+} runlist;
+
+static inline void ntfs_init_runlist(runlist *rl)
+{
+ rl->rl = NULL;
+ init_rwsem(&rl->lock);
+}
+
+typedef enum {
+ LCN_HOLE = -1, /* Keep this as highest value or die! */
+ LCN_RL_NOT_MAPPED = -2,
+ LCN_ENOENT = -3,
+} LCN_SPECIAL_VALUES;
+
+extern runlist_element *ntfs_runlists_merge(runlist_element *drl,
+ runlist_element *srl);
+
+extern runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol,
+ const ATTR_RECORD *attr, runlist_element *old_rl);
+
+extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn);
+
+extern int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
+ const runlist_element *rl, const VCN start_vcn);
+
+extern int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
+ const int dst_len, const runlist_element *rl,
+ const VCN start_vcn, VCN *const stop_vcn);
+
+extern int ntfs_rl_truncate_nolock(const ntfs_volume *vol,
+ runlist *const runlist, const s64 new_length);
+
+#endif /* _LINUX_NTFS_RUNLIST_H */
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 2c93c59186c7..d915ee20873f 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -31,12 +31,15 @@
#include <linux/moduleparam.h>
#include <linux/smp_lock.h>
-#include "ntfs.h"
#include "sysctl.h"
#include "logfile.h"
#include "quota.h"
#include "dir.h"
+#include "debug.h"
#include "index.h"
+#include "aops.h"
+#include "malloc.h"
+#include "ntfs.h"
/* Number of mounted file systems which have compression enabled. */
static unsigned long ntfs_nr_compression_users;
@@ -827,12 +830,12 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
}
/**
- * setup_lcn_allocator - initialize the cluster allocator
- * @vol: volume structure for which to setup the lcn allocator
+ * ntfs_setup_allocators - initialize the cluster and mft allocators
+ * @vol: volume structure for which to setup the allocators
*
- * Setup the cluster (lcn) allocator to the starting values.
+ * Setup the cluster (lcn) and mft allocators to the starting values.
*/
-static void setup_lcn_allocator(ntfs_volume *vol)
+static void ntfs_setup_allocators(ntfs_volume *vol)
{
#ifdef NTFS_RW
LCN mft_zone_size, mft_lcn;
@@ -902,6 +905,11 @@ static void setup_lcn_allocator(ntfs_volume *vol)
vol->data2_zone_pos = 0;
ntfs_debug("vol->data2_zone_pos = 0x%llx",
(unsigned long long)vol->data2_zone_pos);
+
+ /* Set the mft data allocation position to mft record 24. */
+ vol->mft_data_pos = 24;
+ ntfs_debug("vol->mft_data_pos = 0x%llx",
+ (unsigned long long)vol->mft_data_pos);
#endif /* NTFS_RW */
}
@@ -938,8 +946,8 @@ static BOOL load_and_init_mft_mirror(ntfs_volume *vol)
/* No VFS initiated operations allowed for $MFTMirr. */
tmp_ino->i_op = &ntfs_empty_inode_ops;
tmp_ino->i_fop = &ntfs_empty_file_ops;
- /* Put back our special address space operations. */
- tmp_ino->i_mapping->a_ops = &ntfs_mft_aops;
+ /* Put in our special address space operations. */
+ tmp_ino->i_mapping->a_ops = &ntfs_mst_aops;
tmp_ni = NTFS_I(tmp_ino);
/* The $MFTMirr, like the $MFT is multi sector transfer protected. */
NInoSetMstProtected(tmp_ni);
@@ -2334,8 +2342,8 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
*/
result = parse_ntfs_boot_sector(vol, (NTFS_BOOT_SECTOR*)bh->b_data);
- /* Initialize the cluster allocator. */
- setup_lcn_allocator(vol);
+ /* Initialize the cluster and mft allocators. */
+ ntfs_setup_allocators(vol);
brelse(bh);
diff --git a/fs/ntfs/types.h b/fs/ntfs/types.h
index a98731ece42e..08a55aa53d4e 100644
--- a/fs/ntfs/types.h
+++ b/fs/ntfs/types.h
@@ -53,34 +53,6 @@ typedef sle64 leLCN;
typedef s64 LSN;
typedef sle64 leLSN;
-/**
- * runlist_element - in memory vcn to lcn mapping array element
- * @vcn: starting vcn of the current array element
- * @lcn: starting lcn of the current array element
- * @length: length in clusters of the current array element
- *
- * The last vcn (in fact the last vcn + 1) is reached when length == 0.
- *
- * When lcn == -1 this means that the count vcns starting at vcn are not
- * physically allocated (i.e. this is a hole / data is sparse).
- */
-typedef struct { /* In memory vcn to lcn mapping structure element. */
- VCN vcn; /* vcn = Starting virtual cluster number. */
- LCN lcn; /* lcn = Starting logical cluster number. */
- s64 length; /* Run length in clusters. */
-} runlist_element;
-
-/**
- * runlist - in memory vcn to lcn mapping array including a read/write lock
- * @rl: pointer to an array of runlist elements
- * @lock: read/write spinlock for serializing access to @rl
- *
- */
-typedef struct {
- runlist_element *rl;
- struct rw_semaphore lock;
-} runlist;
-
typedef enum {
FALSE = 0,
TRUE = 1
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c
index ec7405a80b4c..560b0ea255b0 100644
--- a/fs/ntfs/unistr.c
+++ b/fs/ntfs/unistr.c
@@ -19,6 +19,8 @@
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include "types.h"
+#include "debug.h"
#include "ntfs.h"
/*
diff --git a/fs/ntfs/upcase.c b/fs/ntfs/upcase.c
index 276ed97982d3..879cdf1d5bd3 100644
--- a/fs/ntfs/upcase.c
+++ b/fs/ntfs/upcase.c
@@ -24,6 +24,7 @@
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include "malloc.h"
#include "ntfs.h"
ntfschar *generate_default_upcase(void)
diff --git a/fs/ntfs/volume.h b/fs/ntfs/volume.h
index 9775fa65895e..4b97fa8635a8 100644
--- a/fs/ntfs/volume.h
+++ b/fs/ntfs/volume.h
@@ -24,6 +24,8 @@
#ifndef _LINUX_NTFS_VOLUME_H
#define _LINUX_NTFS_VOLUME_H
+#include <linux/rwsem.h>
+
#include "types.h"
#include "layout.h"
@@ -81,6 +83,8 @@ typedef struct {
#ifdef NTFS_RW
/* Variables used by the cluster and mft allocators. */
+ s64 mft_data_pos; /* Mft record number at which to
+ allocate the next mft record. */
LCN mft_zone_start; /* First cluster of the mft zone. */
LCN mft_zone_end; /* First cluster beyond the mft zone. */
LCN mft_zone_pos; /* Current position in the mft zone. */
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index c802d5a2f16a..97fbb86195ef 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -29,7 +29,6 @@ EXPORT_SYMBOL(posix_acl_equiv_mode);
EXPORT_SYMBOL(posix_acl_from_mode);
EXPORT_SYMBOL(posix_acl_create_masq);
EXPORT_SYMBOL(posix_acl_chmod_masq);
-EXPORT_SYMBOL(posix_acl_masq_nfs_mode);
EXPORT_SYMBOL(posix_acl_permission);
/*
@@ -380,44 +379,3 @@ posix_acl_chmod_masq(struct posix_acl *acl, mode_t mode)
return 0;
}
-
-/*
- * Adjust the mode parameter so that NFSv2 grants nobody permissions
- * that may not be granted by the ACL. This is necessary because NFSv2
- * may compute access permissions on the client side, and may serve cached
- * data whenever it assumes access would be granted. Since ACLs may also
- * be used to deny access to specific users, the minimal permissions
- * for secure operation over NFSv2 are very restrictive. Permissions
- * granted to users via Access Control Lists will not be effective over
- * NFSv2.
- *
- * Privilege escalation can only happen for read operations, as writes are
- * always carried out on the NFS server, where the proper access checks are
- * implemented.
- */
-int
-posix_acl_masq_nfs_mode(struct posix_acl *acl, mode_t *mode_p)
-{
- struct posix_acl_entry *pa, *pe; int min_perm = S_IRWXO;
-
- FOREACH_ACL_ENTRY(pa, acl, pe) {
- switch(pa->e_tag) {
- case ACL_USER_OBJ:
- break;
-
- case ACL_USER:
- case ACL_GROUP_OBJ:
- case ACL_GROUP:
- case ACL_MASK:
- case ACL_OTHER:
- min_perm &= pa->e_perm;
- break;
-
- default:
- return -EIO;
- }
- }
- *mode_p = (*mode_p & ~(S_IRWXG|S_IRWXO)) | (min_perm << 3) | min_perm;
-
- return 0;
-}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 987240398c4a..91060b9921cc 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -473,7 +473,7 @@ out:
static int proc_permission(struct inode *inode, int mask, struct nameidata *nd)
{
- if (vfs_permission(inode, mask) != 0)
+ if (generic_permission(inode, mask, NULL) != 0)
return -EACCES;
return proc_check_root(inode);
}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 6151f0592f28..76779d4e5a75 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -149,9 +149,6 @@ struct proc_dir_entry proc_root = {
.parent = &proc_root,
};
-#ifdef CONFIG_SYSCTL
-EXPORT_SYMBOL(proc_sys_root);
-#endif
EXPORT_SYMBOL(proc_symlink);
EXPORT_SYMBOL(proc_mkdir);
EXPORT_SYMBOL(create_proc_entry);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7998fa56282a..01b379310276 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -9,7 +9,7 @@ char *task_mem(struct mm_struct *mm, char *buffer)
unsigned long data, text, lib;
data = mm->total_vm - mm->shared_vm - mm->stack_vm;
- text = (mm->end_code - mm->start_code) >> 10;
+ text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
buffer += sprintf(buffer,
"VmSize:\t%8lu kB\n"
@@ -18,12 +18,14 @@ char *task_mem(struct mm_struct *mm, char *buffer)
"VmData:\t%8lu kB\n"
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
- "VmLib:\t%8lu kB\n",
+ "VmLib:\t%8lu kB\n"
+ "VmPTE:\t%8lu kB\n",
(mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10),
mm->rss << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10),
- mm->stack_vm << (PAGE_SHIFT-10), text, lib);
+ mm->stack_vm << (PAGE_SHIFT-10), text, lib,
+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
return buffer;
}
@@ -36,7 +38,8 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
int *data, int *resident)
{
*shared = mm->shared_vm;
- *text = (mm->end_code - mm->start_code) >> PAGE_SHIFT;
+ *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
+ >> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm - *text;
*resident = mm->rss;
return mm->total_vm;
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index cdc2a91609d7..c9f8e2feef10 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -736,7 +736,7 @@ static inline int this_blocknr_allocation_would_make_it_a_large_file(reiserfs_bl
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
static inline void displace_new_packing_locality (reiserfs_blocknr_hint_t *hint)
{
- struct key * key = &hint->key;
+ struct reiserfs_key * key = &hint->key;
hint->th->displace_new_blocks = 0;
hint->search_start = hint->beg + keyed_hash((char*)(&key->k_objectid),4) % (hint->end - hint->beg);
@@ -777,7 +777,7 @@ static inline int old_way (reiserfs_blocknr_hint_t * hint)
static inline void hundredth_slices (reiserfs_blocknr_hint_t * hint)
{
- struct key * key = &hint->key;
+ struct reiserfs_key * key = &hint->key;
b_blocknr_t slice_start;
slice_start = (keyed_hash((char*)(&key->k_dir_id),4) % 100) * (hint->end / 100);
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index c222a92b5a94..268474f46830 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -12,7 +12,7 @@
#include <linux/buffer_head.h>
#include <asm/uaccess.h>
-extern struct key MIN_KEY;
+extern struct reiserfs_key MIN_KEY;
static int reiserfs_readdir (struct file *, void *, filldir_t);
int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, int datasync) ;
@@ -46,7 +46,7 @@ static int reiserfs_readdir (struct file * filp, void * dirent, filldir_t filldi
INITIALIZE_PATH (path_to_entry);
struct buffer_head * bh;
int item_num, entry_num;
- const struct key * rkey;
+ const struct reiserfs_key * rkey;
struct item_head * ih, tmp_ih;
int search_res;
char * local_buf;
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index f038aa7075b2..814561b10324 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -163,7 +163,7 @@ static void create_virtual_node (struct tree_balance * tb, int h)
/* set right merge flag we take right delimiting key and check whether it is a mergeable item */
if (tb->CFR[0]) {
- struct key * key;
+ struct reiserfs_key * key;
key = B_N_PDELIM_KEY (tb->CFR[0], tb->rkey[0]);
if (op_is_left_mergeable (key, Sh->b_size) && (vn->vn_mode != M_DELETE ||
@@ -1140,7 +1140,7 @@ static inline int can_node_be_removed (int mode, int lfree, int sfree, int rfree
struct buffer_head * Sh = PATH_H_PBUFFER (tb->tb_path, h);
int levbytes = tb->insert_size[h];
struct item_head * ih;
- struct key * r_key = NULL;
+ struct reiserfs_key * r_key = NULL;
ih = B_N_PITEM_HEAD (Sh, 0);
if ( tb->CFR[h] )
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
index 5b9dee2ac2ae..d1033e3f89ea 100644
--- a/fs/reiserfs/ibalance.c
+++ b/fs/reiserfs/ibalance.c
@@ -133,7 +133,7 @@ static void internal_insert_childs (struct buffer_info * cur_bi,
struct buffer_head * cur = cur_bi->bi_bh;
struct block_head * blkh;
int nr;
- struct key * ih;
+ struct reiserfs_key * ih;
struct disk_child new_dc[2];
struct disk_child * dc;
int i;
@@ -209,7 +209,7 @@ static void internal_delete_pointers_items (
struct buffer_head * cur = cur_bi->bi_bh;
int nr;
struct block_head * blkh;
- struct key * key;
+ struct reiserfs_key * key;
struct disk_child * dc;
RFALSE( cur == NULL, "buffer is 0");
@@ -300,7 +300,7 @@ static void internal_copy_pointers_items (
int nr_dest, nr_src;
int dest_order, src_order;
struct block_head * blkh;
- struct key * key;
+ struct reiserfs_key * key;
struct disk_child * dc;
nr_src = B_NR_ITEMS (src);
@@ -409,7 +409,7 @@ static void internal_insert_key (struct buffer_info * dest_bi,
struct buffer_head * dest = dest_bi->bi_bh;
int nr;
struct block_head * blkh;
- struct key * key;
+ struct reiserfs_key * key;
RFALSE( dest == NULL || src == NULL,
"source(%p) or dest(%p) buffer is 0", src, dest);
diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
index c315edbb2187..57779620a626 100644
--- a/fs/reiserfs/item_ops.c
+++ b/fs/reiserfs/item_ops.c
@@ -26,7 +26,7 @@ static void sd_decrement_key (struct cpu_key * key)
set_cpu_key_k_offset(key, (loff_t)(-1));
}
-static int sd_is_left_mergeable (struct key * key, unsigned long bsize)
+static int sd_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
{
return 0;
}
@@ -145,7 +145,7 @@ static void direct_decrement_key (struct cpu_key * key)
}
-static int direct_is_left_mergeable (struct key * key, unsigned long bsize)
+static int direct_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
{
int version = le_key_version (key);
return ((le_key_k_offset (version, key) & (bsize - 1)) != 1);
@@ -250,7 +250,7 @@ static void indirect_decrement_key (struct cpu_key * key)
// if it is not first item of the body, then it is mergeable
-static int indirect_is_left_mergeable (struct key * key, unsigned long bsize)
+static int indirect_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
{
int version = le_key_version (key);
return (le_key_k_offset (version, key) != 1);
@@ -403,7 +403,7 @@ static void direntry_decrement_key (struct cpu_key * key)
}
-static int direntry_is_left_mergeable (struct key * key, unsigned long bsize)
+static int direntry_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
{
if (le32_to_cpu (key->u.k_offset_v1.k_offset) == DOT_OFFSET)
return 0;
@@ -691,7 +691,7 @@ static void errcatch_decrement_key (struct cpu_key * key)
}
-static int errcatch_is_left_mergeable (struct key * key, unsigned long bsize)
+static int errcatch_is_left_mergeable (struct reiserfs_key * key, unsigned long bsize)
{
reiserfs_warning (NULL, "green-16003: Invalid item type observed, run fsck ASAP");
return 0;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index e12fcdff5a69..30e19a145fa7 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -1166,7 +1166,7 @@ static int entry_points_to_object (const char * name, int len, struct reiserfs_d
/* sets key of objectid the entry has to point to */
-static void set_ino_in_dir_entry (struct reiserfs_dir_entry * de, struct key * key)
+static void set_ino_in_dir_entry (struct reiserfs_dir_entry * de, struct reiserfs_key * key)
{
/* JDM These operations are endian safe - both are le */
de->de_deh[de->de_entry_num].deh_dir_id = key->k_dir_id;
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 333ad7c001b3..5f08f356da1a 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -28,7 +28,7 @@ static char * reiserfs_cpu_offset (struct cpu_key * key)
}
-static char * le_offset (struct key * key)
+static char * le_offset (struct reiserfs_key * key)
{
int version;
@@ -57,7 +57,7 @@ static char * cpu_type (struct cpu_key * key)
}
-static char * le_type (struct key * key)
+static char * le_type (struct reiserfs_key * key)
{
int version;
@@ -76,7 +76,7 @@ static char * le_type (struct key * key)
/* %k */
-static void sprintf_le_key (char * buf, struct key * key)
+static void sprintf_le_key (char * buf, struct reiserfs_key * key)
{
if (key)
sprintf (buf, "[%d %d %s %s]", le32_to_cpu (key->k_dir_id),
@@ -213,7 +213,7 @@ prepare_error_buf( const char *fmt, va_list args )
switch (what) {
case 'k':
- sprintf_le_key (p, va_arg(args, struct key *));
+ sprintf_le_key (p, va_arg(args, struct reiserfs_key *));
break;
case 'K':
sprintf_cpu_key (p, va_arg(args, struct cpu_key *));
@@ -462,7 +462,7 @@ void print_path (struct tree_balance * tb, struct path * path)
dc_size)...*/
static int print_internal (struct buffer_head * bh, int first, int last)
{
- struct key * key;
+ struct reiserfs_key * key;
struct disk_child * dc;
int i;
int from, to;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index eae0b197fd34..caea0377b668 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -93,7 +93,7 @@ inline void copy_item_head(struct item_head * p_v_to,
Returns: -1 if key1 < key2
0 if key1 == key2
1 if key1 > key2 */
-inline int comp_short_keys (const struct key * le_key,
+inline int comp_short_keys (const struct reiserfs_key * le_key,
const struct cpu_key * cpu_key)
{
__u32 * p_s_le_u32, * p_s_cpu_u32;
@@ -117,7 +117,7 @@ inline int comp_short_keys (const struct key * le_key,
Compare keys using all 4 key fields.
Returns: -1 if key1 < key2 0
if key1 = key2 1 if key1 > key2 */
-inline int comp_keys (const struct key * le_key, const struct cpu_key * cpu_key)
+inline int comp_keys (const struct reiserfs_key * le_key, const struct cpu_key * cpu_key)
{
int retval;
@@ -174,7 +174,7 @@ inline int comp_cpu_keys (const struct cpu_key * key1,
return 0;
}
-inline int comp_short_le_keys (const struct key * key1, const struct key * key2)
+inline int comp_short_le_keys (const struct reiserfs_key * key1, const struct reiserfs_key * key2)
{
__u32 * p_s_1_u32, * p_s_2_u32;
int n_key_length = REISERFS_SHORT_KEY_LEN;
@@ -216,7 +216,7 @@ inline void cpu_key2cpu_key (struct cpu_key * to, const struct cpu_key * from)
}
-inline void le_key2cpu_key (struct cpu_key * to, const struct key * from)
+inline void le_key2cpu_key (struct cpu_key * to, const struct reiserfs_key * from)
{
to->on_disk_key.k_dir_id = le32_to_cpu (from->k_dir_id);
to->on_disk_key.k_objectid = le32_to_cpu (from->k_objectid);
@@ -236,9 +236,9 @@ inline void le_key2cpu_key (struct cpu_key * to, const struct key * from)
// this does not say which one is bigger, it only returns 1 if keys
// are not equal, 0 otherwise
-inline int comp_le_keys (const struct key * k1, const struct key * k2)
+inline int comp_le_keys (const struct reiserfs_key * k1, const struct reiserfs_key * k2)
{
- return memcmp (k1, k2, sizeof (struct key));
+ return memcmp (k1, k2, sizeof (struct reiserfs_key));
}
/**************************************************************************
@@ -272,7 +272,7 @@ inline int bin_search (
int n_rbound, n_lbound, n_j;
for ( n_j = ((n_rbound = p_n_num - 1) + (n_lbound = 0))/2; n_lbound <= n_rbound; n_j = (n_rbound + n_lbound)/2 )
- switch( COMP_KEYS((struct key *)((char * )p_v_base + n_j * p_n_width), (struct cpu_key *)p_v_key) ) {
+ switch( COMP_KEYS((struct reiserfs_key *)((char * )p_v_base + n_j * p_n_width), (struct cpu_key *)p_v_key) ) {
case -1: n_lbound = n_j + 1; continue;
case 1: n_rbound = n_j - 1; continue;
case 0: *p_n_pos = n_j; return ITEM_FOUND; /* Key found in the array. */
@@ -291,17 +291,17 @@ extern struct tree_balance * cur_tb;
/* Minimal possible key. It is never in the tree. */
-const struct key MIN_KEY = {0, 0, {{0, 0},}};
+const struct reiserfs_key MIN_KEY = {0, 0, {{0, 0},}};
/* Maximal possible key. It is never in the tree. */
-const struct key MAX_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}};
+const struct reiserfs_key MAX_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}};
/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
of the path, and going upwards. We must check the path's validity at each step. If the key is not in
the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
case we return a special key, either MIN_KEY or MAX_KEY. */
-inline const struct key * get_lkey (
+inline const struct reiserfs_key * get_lkey (
const struct path * p_s_chk_path,
const struct super_block * p_s_sb
) {
@@ -340,7 +340,7 @@ inline const struct key * get_lkey (
/* Get delimiting key of the buffer at the path and its right neighbor. */
-inline const struct key * get_rkey (
+inline const struct reiserfs_key * get_rkey (
const struct path * p_s_chk_path,
const struct super_block * p_s_sb
) {
@@ -802,7 +802,7 @@ io_error:
{
int pos = p_s_last_element->pe_position;
int limit = B_NR_ITEMS(p_s_bh);
- struct key *le_key;
+ struct reiserfs_key *le_key;
if (p_s_search_path->reada & PATH_READA_BACK)
limit = 0;
@@ -1247,7 +1247,7 @@ void padd_item (char * item, int total_length, int length)
}
#ifdef REISERQUOTA_DEBUG
-char key2type(struct key *ih)
+char key2type(struct reiserfs_key *ih)
{
if (is_direntry_le_key(2, ih))
return 'd';
@@ -1417,7 +1417,7 @@ int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
/* this deletes item which never gets split */
void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
struct inode *inode,
- struct key * key)
+ struct reiserfs_key * key)
{
struct tree_balance tb;
INITIALIZE_PATH (path);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 23dd560a38aa..3e7d7e0845e8 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -106,7 +106,7 @@ void reiserfs_unlockfs(struct super_block *s) {
reiserfs_allow_writes(s) ;
}
-extern const struct key MAX_KEY;
+extern const struct reiserfs_key MAX_KEY;
/* this is used to delete "save link" when there are no items of a
@@ -116,7 +116,7 @@ extern const struct key MAX_KEY;
protecting unlink is bigger that a key lf "save link" which
protects truncate), so there left no items to make truncate
completion on */
-static int remove_save_link_only (struct super_block * s, struct key * key, int oid_free)
+static int remove_save_link_only (struct super_block * s, struct reiserfs_key * key, int oid_free)
{
struct reiserfs_transaction_handle th;
int err;
@@ -140,7 +140,7 @@ static int finish_unfinished (struct super_block * s)
{
INITIALIZE_PATH (path);
struct cpu_key max_cpu_key, obj_key;
- struct key save_link_key;
+ struct reiserfs_key save_link_key;
int retval = 0;
struct item_head * ih;
struct buffer_head * bh;
@@ -335,7 +335,7 @@ void add_save_link (struct reiserfs_transaction_handle * th,
int remove_save_link (struct inode * inode, int truncate)
{
struct reiserfs_transaction_handle th;
- struct key key;
+ struct reiserfs_key key;
int err;
/* we are going to do one balancing only */
diff --git a/fs/select.c b/fs/select.c
index e0a87cb4f733..57e776cafb3b 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -268,8 +268,6 @@ int do_select(int n, fd_set_bits *fds, long *timeout)
return retval;
}
-EXPORT_SYMBOL(do_select);
-
static void *select_bits_alloc(int size)
{
return kmalloc(6 * size, GFP_KERNEL);
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 1001a64b35da..6db62f3aa397 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -368,7 +368,6 @@ parse_options(struct smb_mount_data_kernel *mnt, char *options)
&optopt, &optarg, &flags, &value)) > 0) {
VERBOSE("'%s' -> '%s'\n", optopt, optarg ? optarg : "<none>");
-
switch (c) {
case 1:
/* got a "flag" option */
@@ -383,15 +382,19 @@ parse_options(struct smb_mount_data_kernel *mnt, char *options)
break;
case 'u':
mnt->uid = value;
+ flags |= SMB_MOUNT_UID;
break;
case 'g':
mnt->gid = value;
+ flags |= SMB_MOUNT_GID;
break;
case 'f':
mnt->file_mode = (value & S_IRWXUGO) | S_IFREG;
+ flags |= SMB_MOUNT_FMODE;
break;
case 'd':
mnt->dir_mode = (value & S_IRWXUGO) | S_IFDIR;
+ flags |= SMB_MOUNT_DMODE;
break;
case 'i':
strlcpy(mnt->codepage.local_name, optarg,
@@ -429,9 +432,9 @@ smb_show_options(struct seq_file *s, struct vfsmount *m)
if (mnt->flags & opts[i].flag)
seq_printf(s, ",%s", opts[i].name);
- if (mnt->uid != 0)
+ if (mnt->flags & SMB_MOUNT_UID)
seq_printf(s, ",uid=%d", mnt->uid);
- if (mnt->gid != 0)
+ if (mnt->flags & SMB_MOUNT_GID)
seq_printf(s, ",gid=%d", mnt->gid);
if (mnt->mounted_uid != 0)
seq_printf(s, ",mounted_uid=%d", mnt->mounted_uid);
@@ -440,8 +443,10 @@ smb_show_options(struct seq_file *s, struct vfsmount *m)
* Defaults for file_mode and dir_mode are unknown to us; they
* depend on the current umask of the user doing the mount.
*/
- seq_printf(s, ",file_mode=%04o", mnt->file_mode & S_IRWXUGO);
- seq_printf(s, ",dir_mode=%04o", mnt->dir_mode & S_IRWXUGO);
+ if (mnt->flags & SMB_MOUNT_FMODE)
+ seq_printf(s, ",file_mode=%04o", mnt->file_mode & S_IRWXUGO);
+ if (mnt->flags & SMB_MOUNT_DMODE)
+ seq_printf(s, ",dir_mode=%04o", mnt->dir_mode & S_IRWXUGO);
if (strcmp(mnt->codepage.local_name, CONFIG_NLS_DEFAULT))
seq_printf(s, ",iocharset=%s", mnt->codepage.local_name);
@@ -566,8 +571,13 @@ int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
mnt->file_mode = (oldmnt->file_mode & S_IRWXUGO) | S_IFREG;
mnt->dir_mode = (oldmnt->dir_mode & S_IRWXUGO) | S_IFDIR;
- mnt->flags = (oldmnt->file_mode >> 9);
+ mnt->flags = (oldmnt->file_mode >> 9) | SMB_MOUNT_UID |
+ SMB_MOUNT_GID | SMB_MOUNT_FMODE | SMB_MOUNT_DMODE;
} else {
+ mnt->file_mode = mnt->dir_mode = S_IRWXU | S_IRGRP | S_IXGRP |
+ S_IROTH | S_IXOTH | S_IFREG;
+ mnt->dir_mode = mnt->dir_mode = S_IRWXU | S_IRGRP | S_IXGRP |
+ S_IROTH | S_IXOTH | S_IFDIR;
if (parse_options(mnt, raw_data))
goto out_bad_option;
}
@@ -599,6 +609,7 @@ int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
sb->s_root = d_alloc_root(root_inode);
if (!sb->s_root)
goto out_no_root;
+
smb_new_dentry(sb->s_root);
return 0;
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index 418c0c36ac8b..1780d50afe2d 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -2074,7 +2074,7 @@ out:
return result;
}
-void smb_decode_unix_basic(struct smb_fattr *fattr, char *p)
+void smb_decode_unix_basic(struct smb_fattr *fattr, struct smb_sb_info *server, char *p)
{
u64 size, disk_bytes;
@@ -2111,8 +2111,17 @@ void smb_decode_unix_basic(struct smb_fattr *fattr, char *p)
fattr->f_ctime = smb_ntutc2unixutc(LVAL(p, 16));
fattr->f_atime = smb_ntutc2unixutc(LVAL(p, 24));
fattr->f_mtime = smb_ntutc2unixutc(LVAL(p, 32));
- fattr->f_uid = LVAL(p, 40);
- fattr->f_gid = LVAL(p, 48);
+
+ if (server->mnt->flags & SMB_MOUNT_UID)
+ fattr->f_uid = server->mnt->uid;
+ else
+ fattr->f_uid = LVAL(p, 40);
+
+ if (server->mnt->flags & SMB_MOUNT_GID)
+ fattr->f_gid = server->mnt->gid;
+ else
+ fattr->f_gid = LVAL(p, 48);
+
fattr->f_mode |= smb_filetype_to_mode(WVAL(p, 56));
if (S_ISBLK(fattr->f_mode) || S_ISCHR(fattr->f_mode)) {
@@ -2121,10 +2130,19 @@ void smb_decode_unix_basic(struct smb_fattr *fattr, char *p)
fattr->f_rdev = MKDEV(major & 0xffffffff, minor & 0xffffffff);
if (MAJOR(fattr->f_rdev) != (major & 0xffffffff) ||
- MINOR(fattr->f_rdev) != (minor & 0xffffffff))
+ MINOR(fattr->f_rdev) != (minor & 0xffffffff))
fattr->f_rdev = 0;
}
+
fattr->f_mode |= LVAL(p, 84);
+
+ if ( (server->mnt->flags & SMB_MOUNT_DMODE) &&
+ (S_ISDIR(fattr->f_mode)) )
+ fattr->f_mode = (server->mnt->dir_mode & (S_IRWXU | S_IRWXG | S_IRWXO)) | S_IFDIR;
+ else if ( (server->mnt->flags & SMB_MOUNT_FMODE) &&
+ !(S_ISDIR(fattr->f_mode)) )
+ fattr->f_mode = (server->mnt->file_mode & (S_IRWXU | S_IRWXG | S_IRWXO)) | S_IFREG;
+
}
/*
@@ -2210,7 +2228,7 @@ smb_decode_long_dirent(struct smb_sb_info *server, char *p, int level,
/* FIXME: should we check the length?? */
p += 8;
- smb_decode_unix_basic(fattr, p);
+ smb_decode_unix_basic(fattr, server, p);
VERBOSE("info SMB_FIND_FILE_UNIX at %p, len=%d, name=%.*s\n",
p, len, len, qname->name);
break;
@@ -2769,7 +2787,7 @@ smb_proc_getattr_unix(struct smb_sb_info *server, struct dentry *dir,
if (result < 0)
goto out_free;
- smb_decode_unix_basic(attr, req->rq_data);
+ smb_decode_unix_basic(attr, server, req->rq_data);
out_free:
smb_rput(req);
diff --git a/fs/smbfs/proto.h b/fs/smbfs/proto.h
index e8946c455d8b..50df777b6189 100644
--- a/fs/smbfs/proto.h
+++ b/fs/smbfs/proto.h
@@ -24,7 +24,7 @@ extern int smb_proc_rmdir(struct dentry *dentry);
extern int smb_proc_unlink(struct dentry *dentry);
extern int smb_proc_flush(struct smb_sb_info *server, __u16 fileid);
extern void smb_init_root_dirent(struct smb_sb_info *server, struct smb_fattr *fattr);
-extern void smb_decode_unix_basic(struct smb_fattr *fattr, char *p);
+extern void smb_decode_unix_basic(struct smb_fattr *fattr, struct smb_sb_info *server, char *p);
extern int smb_proc_getattr(struct dentry *dir, struct smb_fattr *fattr);
extern int smb_proc_setattr(struct dentry *dir, struct smb_fattr *fattr);
extern int smb_proc_setattr_unix(struct dentry *d, struct iattr *attr, unsigned int major, unsigned int minor);
diff --git a/fs/xattr.c b/fs/xattr.c
index a71900b5349a..93dee70a1dbe 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -5,6 +5,7 @@
Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
Copyright (C) 2001 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com>
+ Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*/
#include <linux/fs.h>
#include <linux/slab.h>
@@ -14,6 +15,7 @@
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/module.h>
#include <asm/uaccess.h>
/*
@@ -348,3 +350,131 @@ sys_fremovexattr(int fd, char __user *name)
fput(f);
return error;
}
+
+
+static const char *
+strcmp_prefix(const char *a, const char *a_prefix)
+{
+ while (*a_prefix && *a == *a_prefix) {
+ a++;
+ a_prefix++;
+ }
+ return *a_prefix ? NULL : a;
+}
+
+/*
+ * In order to implement different sets of xattr operations for each xattr
+ * prefix with the generic xattr API, a filesystem should create a
+ * null-terminated array of struct xattr_handler (one for each prefix) and
+ * hang a pointer to it off of the s_xattr field of the superblock.
+ *
+ * The generic_fooxattr() functions will use this list to dispatch xattr
+ * operations to the correct xattr_handler.
+ */
+#define for_each_xattr_handler(handlers, handler) \
+ for ((handler) = *(handlers)++; \
+ (handler) != NULL; \
+ (handler) = *(handlers)++)
+
+/*
+ * Find the xattr_handler with the matching prefix.
+ */
+static struct xattr_handler *
+xattr_resolve_name(struct xattr_handler **handlers, const char **name)
+{
+ struct xattr_handler *handler;
+
+ if (!*name)
+ return NULL;
+
+ for_each_xattr_handler(handlers, handler) {
+ const char *n = strcmp_prefix(*name, handler->prefix);
+ if (n) {
+ *name = n;
+ break;
+ }
+ }
+ return handler;
+}
+
+/*
+ * Find the handler for the prefix and dispatch its get() operation.
+ */
+ssize_t
+generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size)
+{
+ struct xattr_handler *handler;
+ struct inode *inode = dentry->d_inode;
+
+ handler = xattr_resolve_name(inode->i_sb->s_xattr, &name);
+ if (!handler)
+ return -EOPNOTSUPP;
+ return handler->get(inode, name, buffer, size);
+}
+
+/*
+ * Combine the results of the list() operation from every xattr_handler in the
+ * list.
+ */
+ssize_t
+generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct xattr_handler *handler, **handlers = inode->i_sb->s_xattr;
+ unsigned int size = 0;
+
+ if (!buffer) {
+ for_each_xattr_handler(handlers, handler)
+ size += handler->list(inode, NULL, 0, NULL, 0);
+ } else {
+ char *buf = buffer;
+
+ for_each_xattr_handler(handlers, handler) {
+ size = handler->list(inode, buf, buffer_size, NULL, 0);
+ if (size > buffer_size)
+ return -ERANGE;
+ buf += size;
+ buffer_size -= size;
+ }
+ size = buf - buffer;
+ }
+ return size;
+}
+
+/*
+ * Find the handler for the prefix and dispatch its set() operation.
+ */
+int
+generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+{
+ struct xattr_handler *handler;
+ struct inode *inode = dentry->d_inode;
+
+ if (size == 0)
+ value = ""; /* empty EA, do not remove */
+ handler = xattr_resolve_name(inode->i_sb->s_xattr, &name);
+ if (!handler)
+ return -EOPNOTSUPP;
+ return handler->set(inode, name, value, size, flags);
+}
+
+/*
+ * Find the handler for the prefix and dispatch its set() operation to remove
+ * any associated extended attribute.
+ */
+int
+generic_removexattr(struct dentry *dentry, const char *name)
+{
+ struct xattr_handler *handler;
+ struct inode *inode = dentry->d_inode;
+
+ handler = xattr_resolve_name(inode->i_sb->s_xattr, &name);
+ if (!handler)
+ return -EOPNOTSUPP;
+ return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
+}
+
+EXPORT_SYMBOL(generic_getxattr);
+EXPORT_SYMBOL(generic_listxattr);
+EXPORT_SYMBOL(generic_setxattr);
+EXPORT_SYMBOL(generic_removexattr);
diff --git a/include/asm-alpha/errno.h b/include/asm-alpha/errno.h
index 677dc9b2989e..a521d0100f99 100644
--- a/include/asm-alpha/errno.h
+++ b/include/asm-alpha/errno.h
@@ -110,5 +110,9 @@
#define ENOMEDIUM 129 /* No medium found */
#define EMEDIUMTYPE 130 /* Wrong medium type */
+#define ENOKEY 131 /* Required key not available */
+#define EKEYEXPIRED 132 /* Key has expired */
+#define EKEYREVOKED 133 /* Key has been revoked */
+#define EKEYREJECTED 134 /* Key was rejected by service */
#endif
diff --git a/include/asm-arm/arch-clps711x/time.h b/include/asm-arm/arch-clps711x/time.h
index 10d1038f3cfd..9cb27cd4e6ae 100644
--- a/include/asm-arm/arch-clps711x/time.h
+++ b/include/asm-arm/arch-clps711x/time.h
@@ -30,6 +30,9 @@ p720t_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_leds();
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
do_profile(regs);
return IRQ_HANDLED;
}
diff --git a/include/asm-arm/arch-integrator/time.h b/include/asm-arm/arch-integrator/time.h
index 2ecbfa7f1259..01729e4d0644 100644
--- a/include/asm-arm/arch-integrator/time.h
+++ b/include/asm-arm/arch-integrator/time.h
@@ -107,6 +107,9 @@ integrator_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do_leds();
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
do_profile(regs);
return IRQ_HANDLED;
diff --git a/include/asm-arm/arch-l7200/time.h b/include/asm-arm/arch-l7200/time.h
index 31d791ac1d8c..7b98b533e63a 100644
--- a/include/asm-arm/arch-l7200/time.h
+++ b/include/asm-arm/arch-l7200/time.h
@@ -46,6 +46,9 @@ static irqreturn_t
timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
do_profile(regs);
RTC_RTCC = 0; /* Clear interrupt */
diff --git a/include/asm-arm/arch-lh7a40x/timex.h b/include/asm-arm/arch-lh7a40x/timex.h
index e9c296ec5961..fa726b670829 100644
--- a/include/asm-arm/arch-lh7a40x/timex.h
+++ b/include/asm-arm/arch-lh7a40x/timex.h
@@ -14,5 +14,4 @@
/*
#define CLOCK_TICK_RATE 3686400
-#define CLOCK_TICK_FACTOR 80
*/
diff --git a/include/asm-arm/arch-sa1100/timex.h b/include/asm-arm/arch-sa1100/timex.h
index 82aa1a3c16a0..837be9b797dd 100644
--- a/include/asm-arm/arch-sa1100/timex.h
+++ b/include/asm-arm/arch-sa1100/timex.h
@@ -10,4 +10,3 @@
* SA1100 timer
*/
#define CLOCK_TICK_RATE 3686400
-#define CLOCK_TICK_FACTOR 80
diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h
index d20dc0fed21d..5a72bb61a0a0 100644
--- a/include/asm-generic/errno.h
+++ b/include/asm-generic/errno.h
@@ -96,5 +96,9 @@
#define ENOMEDIUM 123 /* No medium found */
#define EMEDIUMTYPE 124 /* Wrong medium type */
+#define ENOKEY 125 /* Required key not available */
+#define EKEYEXPIRED 126 /* Key has expired */
+#define EKEYREVOKED 127 /* Key has been revoked */
+#define EKEYREJECTED 128 /* Key was rejected by service */
#endif
diff --git a/include/asm-h8300/timex.h b/include/asm-h8300/timex.h
index 7a61dee5dccc..20413145fabb 100644
--- a/include/asm-h8300/timex.h
+++ b/include/asm-h8300/timex.h
@@ -7,10 +7,6 @@
#define _ASM_H8300_TIMEX_H
#define CLOCK_TICK_RATE CONFIG_CPU_CLOCK*1000/8192 /* Timer input freq. */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long cycles_t;
extern short h8300_timer_count;
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
index d962258e2a79..c5d591e4902b 100644
--- a/include/asm-i386/cpu.h
+++ b/include/asm-i386/cpu.h
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/topology.h>
+#include <linux/nodemask.h>
#include <asm/node.h>
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 9eb6c9dde893..03dd13a48a8c 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -16,6 +16,9 @@
static inline void do_timer_interrupt_hook(struct pt_regs *regs)
{
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
/*
* In the SMP case we use the local APIC timer interrupt to do the
* profiling, except when we simulate SMP mode on a uniprocessor
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h
index 1cce2b924a80..2b9e6d55bef1 100644
--- a/include/asm-i386/mach-summit/mach_mpparse.h
+++ b/include/asm-i386/mach-summit/mach_mpparse.h
@@ -22,6 +22,7 @@ static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
{
}
+extern int usb_early_handoff;
static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid)
{
@@ -31,6 +32,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
|| !strncmp(productid, "RUTHLESS SMP", 12))){
use_cyclone = 1; /*enable cyclone-timer*/
setup_summit();
+ usb_early_handoff = 1;
return 1;
}
return 0;
@@ -44,6 +46,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|| !strncmp(oem_table_id, "EXA", 3))){
use_cyclone = 1; /*enable cyclone-timer*/
setup_summit();
+ usb_early_handoff = 1;
return 1;
}
return 0;
diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h
index 17287326962e..33acd50fd9a8 100644
--- a/include/asm-i386/mach-visws/do_timer.h
+++ b/include/asm-i386/mach-visws/do_timer.h
@@ -9,6 +9,9 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
co_cpu_write(CO_CPU_STAT,co_cpu_read(CO_CPU_STAT) & ~CO_STAT_TIMEINTR);
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
/*
* In the SMP case we use the local APIC timer interrupt to do the
* profiling, except when we simulate SMP mode on a uniprocessor
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index 75c642f9af74..ae510e5d0d78 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -4,6 +4,9 @@
static inline void do_timer_interrupt_hook(struct pt_regs *regs)
{
do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
voyager_timer_interrupt(regs);
}
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index 8170e019af8d..e96814d75bae 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -33,7 +33,7 @@ extern void mp_register_lapic_address (u64 address);
extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
extern void mp_config_acpi_legacy_irqs (void);
-extern void mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
+extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
#endif /*CONFIG_ACPI_BOOT*/
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
diff --git a/include/asm-i386/node.h b/include/asm-i386/node.h
index c1aeb27b1354..e13c6ffa72ae 100644
--- a/include/asm-i386/node.h
+++ b/include/asm-i386/node.h
@@ -5,6 +5,7 @@
#include <linux/mmzone.h>
#include <linux/node.h>
#include <linux/topology.h>
+#include <linux/nodemask.h>
struct i386_node {
struct node node;
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 59f4a1ad3a49..8814b54c75d4 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -55,7 +55,7 @@ extern unsigned char boot_params[PARAM_SIZE];
#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
#define INITRD_START (*(unsigned long *) (PARAM+0x218))
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
-#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
+#define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index d4bb72a58ad5..9bb0f63ff64e 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -321,7 +321,7 @@ struct alt_instr {
* If you use variable sized constraints like "m" or "g" in the
* replacement maake sure to pad to the worst case length.
*/
-#define alternative_input(oldinstr, newinstr, feature, input) \
+#define alternative_input(oldinstr, newinstr, feature, input...) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 4\n" \
@@ -333,7 +333,7 @@ struct alt_instr {
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature), input)
+ ".previous" :: "i" (feature), ##input)
/*
* Force strict CPU ordering.
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h
index 6d5d757b1b40..ef53334ac043 100644
--- a/include/asm-i386/timex.h
+++ b/include/asm-i386/timex.h
@@ -15,10 +15,6 @@
# define CLOCK_TICK_RATE 1193182 /* Underlying HZ */
#endif
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
/*
* Standard way to access the cycle counter on i586+ CPUs.
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 2d9e45d2d5f8..e094249c8942 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -291,14 +291,19 @@
#define __NR_sys_kexec_load 283
#define __NR_waitid 284
#define __NR_sys_setaltroot 285
+#define __NR_add_key 286
+#define __NR_request_key 287
+#define __NR_keyctl 288
-#define NR_syscalls 286
-
-/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
+#define NR_syscalls 289
+/*
+ * user-visible error numbers are in the range -1 - -128: see
+ * <asm-i386/errno.h>
+ */
#define __syscall_return(type, res) \
do { \
- if ((unsigned long)(res) >= (unsigned long)(-125)) { \
+ if ((unsigned long)(res) >= (unsigned long)(-(128 + 1))) { \
errno = -(res); \
res = -1; \
} \
diff --git a/include/asm-m68k/timex.h b/include/asm-m68k/timex.h
index 7a05262f29a1..b87f2f278f67 100644
--- a/include/asm-m68k/timex.h
+++ b/include/asm-m68k/timex.h
@@ -7,10 +7,6 @@
#define _ASMm68k_TIMEX_H
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long cycles_t;
diff --git a/include/asm-mips/errno.h b/include/asm-mips/errno.h
index 35d47a882801..2b458f9538cd 100644
--- a/include/asm-mips/errno.h
+++ b/include/asm-mips/errno.h
@@ -110,6 +110,10 @@
*/
#define ENOMEDIUM 159 /* No medium found */
#define EMEDIUMTYPE 160 /* Wrong medium type */
+#define ENOKEY 161 /* Required key not available */
+#define EKEYEXPIRED 162 /* Key has expired */
+#define EKEYREVOKED 163 /* Key has been revoked */
+#define EKEYREJECTED 164 /* Key was rejected by service */
#define EDQUOT 1133 /* Quota exceeded */
diff --git a/include/asm-parisc/errno.h b/include/asm-parisc/errno.h
index 3b0fbce3e0b0..a10f109770f1 100644
--- a/include/asm-parisc/errno.h
+++ b/include/asm-parisc/errno.h
@@ -67,6 +67,10 @@
#define EREMOTEIO 181 /* Remote I/O error */
#define ENOMEDIUM 182 /* No medium found */
#define EMEDIUMTYPE 183 /* Wrong medium type */
+#define ENOKEY 184 /* Required key not available */
+#define EKEYEXPIRED 185 /* Key has expired */
+#define EKEYREVOKED 186 /* Key has been revoked */
+#define EKEYREJECTED 187 /* Key was rejected by service */
/* We now return you to your regularly scheduled HPUX. */
diff --git a/include/asm-ppc/timex.h b/include/asm-ppc/timex.h
index 65192b74ef06..b9bffff66edd 100644
--- a/include/asm-ppc/timex.h
+++ b/include/asm-ppc/timex.h
@@ -11,10 +11,6 @@
#include <asm/cputable.h>
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long cycles_t;
diff --git a/include/asm-ppc64/timex.h b/include/asm-ppc64/timex.h
index cb7149a8f5c6..8db4da4064cd 100644
--- a/include/asm-ppc64/timex.h
+++ b/include/asm-ppc64/timex.h
@@ -12,10 +12,6 @@
#define _ASMPPC64_TIMEX_H
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long cycles_t;
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 226acd000639..c878d6ac97ab 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -12,10 +12,6 @@
#define _ASM_S390_TIMEX_H
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long long cycles_t;
diff --git a/include/asm-sh/timex.h b/include/asm-sh/timex.h
index 96a33d1964cb..bd2f43571fca 100644
--- a/include/asm-sh/timex.h
+++ b/include/asm-sh/timex.h
@@ -7,10 +7,6 @@
#define __ASM_SH_TIMEX_H
#define CLOCK_TICK_RATE (CONFIG_SH_PCLK_FREQ / 4) /* Underlying HZ */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long long cycles_t;
diff --git a/include/asm-sparc/errno.h b/include/asm-sparc/errno.h
index 5b0194e6f78f..ee91f3b44444 100644
--- a/include/asm-sparc/errno.h
+++ b/include/asm-sparc/errno.h
@@ -101,5 +101,9 @@
#define ENOMEDIUM 125 /* No medium found */
#define EMEDIUMTYPE 126 /* Wrong medium type */
+#define ENOKEY 127 /* Required key not available */
+#define EKEYEXPIRED 128 /* Key has expired */
+#define EKEYREVOKED 129 /* Key has been revoked */
+#define EKEYREJECTED 130 /* Key was rejected by service */
#endif
diff --git a/include/asm-sparc/timex.h b/include/asm-sparc/timex.h
index 4b628788b86b..870a3b39c8f5 100644
--- a/include/asm-sparc/timex.h
+++ b/include/asm-sparc/timex.h
@@ -7,10 +7,6 @@
#define _ASMsparc_TIMEX_H
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
/* XXX Maybe do something better at some point... -DaveM */
typedef unsigned long cycles_t;
diff --git a/include/asm-sparc64/errno.h b/include/asm-sparc64/errno.h
index e7890f0e9aa4..6dc57bc985c0 100644
--- a/include/asm-sparc64/errno.h
+++ b/include/asm-sparc64/errno.h
@@ -101,5 +101,9 @@
#define ENOMEDIUM 125 /* No medium found */
#define EMEDIUMTYPE 126 /* Wrong medium type */
+#define ENOKEY 127 /* Required key not available */
+#define EKEYEXPIRED 128 /* Key has expired */
+#define EKEYREVOKED 129 /* Key has been revoked */
+#define EKEYREJECTED 130 /* Key was rejected by service */
#endif /* !(_SPARC64_ERRNO_H) */
diff --git a/include/asm-sparc64/timex.h b/include/asm-sparc64/timex.h
index 904100a07a7d..9e8d4175bcb2 100644
--- a/include/asm-sparc64/timex.h
+++ b/include/asm-sparc64/timex.h
@@ -9,10 +9,6 @@
#include <asm/timer.h>
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
/* Getting on the cycle counter on sparc64. */
typedef unsigned long cycles_t;
diff --git a/include/asm-v850/timex.h b/include/asm-v850/timex.h
index 729e2789fcf3..6279e5a0ee8e 100644
--- a/include/asm-v850/timex.h
+++ b/include/asm-v850/timex.h
@@ -7,10 +7,6 @@
#define __V850_TIMEX_H__
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long cycles_t;
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
index 9839b92d2706..cfeec4e555ab 100644
--- a/include/asm-x86_64/elf.h
+++ b/include/asm-x86_64/elf.h
@@ -143,6 +143,11 @@ typedef struct user_i387_struct elf_fpregset_t;
#ifdef __KERNEL__
extern void set_personality_64bit(void);
#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ */
+#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
/*
* An executable for which elf_read_implies_exec() returns TRUE will
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
index 98aa63e6bab6..89ba26806b7a 100644
--- a/include/asm-x86_64/mpspec.h
+++ b/include/asm-x86_64/mpspec.h
@@ -188,7 +188,7 @@ extern void mp_register_lapic_address (u64 address);
extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
extern void mp_config_acpi_legacy_irqs (void);
-extern void mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
+extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
#endif /*CONFIG_X86_IO_APIC*/
#endif
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index 9962665c3a98..e3ee503a0a21 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X8664_NUMA_H
#define _ASM_X8664_NUMA_H 1
+#include <linux/nodemask.h>
+
#define MAXNODE 8
#define NODEMASK 0xff
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index c5c2b01cfcfa..b45e80c76653 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -130,18 +130,10 @@ extern __inline__ int get_order(unsigned long size)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-#define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
#define VM_DATA_DEFAULT_FLAGS \
- (test_thread_flag(TIF_IA32) ? vm_data_default_flags32 : \
- vm_data_default_flags)
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define VM_STACK_DEFAULT_FLAGS \
- (test_thread_flag(TIF_IA32) ? vm_stack_flags32 : vm_stack_flags)
-
#define CONFIG_ARCH_GATE_AREA 1
#ifndef __ASSEMBLY__
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 6671eb65500b..55ac71fd4a60 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -123,7 +123,7 @@ struct alt_instr {
* If you use variable sized constraints like "m" or "g" in the
* replacement maake sure to pad to the worst case length.
*/
-#define alternative_input(oldinstr, newinstr, feature, input) \
+#define alternative_input(oldinstr, newinstr, feature, input...) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
" .align 8\n" \
@@ -135,7 +135,7 @@ struct alt_instr {
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature), input)
+ ".previous" :: "i" (feature), ##input)
/*
* Clear and set 'TS' bit respectively
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index 895294a7cd3f..5eb8ff3d6bbd 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -13,10 +13,6 @@
#include <asm/hpet.h>
#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
-#define FINETUNE ((((((int)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
- << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long long cycles_t;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4efe45d1af7e..b2059869cb92 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -19,8 +19,8 @@
struct request_queue;
typedef struct request_queue request_queue_t;
-struct elevator_s;
-typedef struct elevator_s elevator_t;
+struct elevator_queue;
+typedef struct elevator_queue elevator_t;
struct request_pm_state;
#define BLKDEV_MIN_RQ 4
@@ -52,6 +52,20 @@ struct as_io_context {
sector_t seek_mean;
};
+struct cfq_queue;
+struct cfq_io_context {
+ void (*dtor)(struct cfq_io_context *);
+ void (*exit)(struct cfq_io_context *);
+
+ struct io_context *ioc;
+
+ /*
+ * circular list of cfq_io_contexts belonging to a process io context
+ */
+ struct list_head list;
+ struct cfq_queue *cfqq;
+};
+
/*
* This is the per-process I/O subsystem state. It is refcounted and
* kmalloc'ed. Currently all fields are modified in process io context
@@ -67,7 +81,10 @@ struct io_context {
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
+ spinlock_t lock;
+
struct as_io_context *aic;
+ struct cfq_io_context *cic;
};
void put_io_context(struct io_context *ioc);
@@ -80,6 +97,7 @@ struct request_list {
int count[2];
mempool_t *rq_pool;
wait_queue_head_t wait[2];
+ wait_queue_head_t drain;
};
#define BLK_MAX_CDB 16
@@ -279,7 +297,7 @@ struct request_queue
*/
struct list_head queue_head;
struct request *last_merge;
- elevator_t elevator;
+ elevator_t *elevator;
/*
* the queue request freelist, one for reads and one for writes
@@ -342,6 +360,7 @@ struct request_queue
unsigned long nr_requests; /* Max # of requests */
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
+ unsigned int nr_batching;
unsigned short max_sectors;
unsigned short max_hw_sectors;
@@ -381,6 +400,7 @@ struct request_queue
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
#define QUEUE_FLAG_ORDERED 8 /* supports ordered writes */
+#define QUEUE_FLAG_DRAIN 9 /* draining queue for sched switch */
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
@@ -617,6 +637,8 @@ extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(request_queue_t *);
extern void __generic_unplug_device(request_queue_t *);
extern long nr_blockdev_pages(void);
+extern void blk_wait_queue_drained(request_queue_t *);
+extern void blk_finish_queue_drain(request_queue_t *);
int blk_get_queue(request_queue_t *);
request_queue_t *blk_alloc_queue(int);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 367a8a313506..47fb6a02d630 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -155,7 +155,6 @@ void invalidate_bdev(struct block_device *, int);
int sync_blockdev(struct block_device *bdev);
void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
-void wake_up_buffer(struct buffer_head *bh);
int fsync_bdev(struct block_device *);
struct super_block *freeze_bdev(struct block_device *);
void thaw_bdev(struct block_device *, struct super_block *);
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index bcc9410761d9..1c1f5efffd64 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -499,6 +499,7 @@ struct cdrom_generic_command
#define GPMODE_VENDOR_PAGE 0x00
#define GPMODE_R_W_ERROR_PAGE 0x01
#define GPMODE_WRITE_PARMS_PAGE 0x05
+#define GPMODE_WCACHING_PAGE 0x08
#define GPMODE_AUDIO_CTL_PAGE 0x0e
#define GPMODE_POWER_PAGE 0x1a
#define GPMODE_FAULT_FAIL_PAGE 0x1c
@@ -947,6 +948,8 @@ struct cdrom_device_info {
__u8 reserved : 6; /* not used yet */
int cdda_method; /* see flags */
__u8 last_sense;
+ __u8 media_written; /* dirty flag, DVD+RW bookkeeping */
+ unsigned short mmc3_profile; /* current MMC3 profile */
int for_data;
int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 77f59742c407..71da7d1260cd 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -382,6 +382,8 @@ COMPATIBLE_IOCTL(CDROMREADALL)
COMPATIBLE_IOCTL(DVD_READ_STRUCT)
COMPATIBLE_IOCTL(DVD_WRITE_STRUCT)
COMPATIBLE_IOCTL(DVD_AUTH)
+/* pktcdvd */
+COMPATIBLE_IOCTL(PACKET_CTRL_CMD)
/* Big L */
ULONG_IOCTL(LOOP_SET_FD)
ULONG_IOCTL(LOOP_CHANGE_FD)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b378e57b2743..f4bc1ac23daa 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -28,7 +28,7 @@ struct vfsmount;
* "quick string" -- eases parameter passing, but more importantly
* saves "metadata" about the string (ie length and the hash).
*
- * hash comes first so it snuggles against d_parent and d_bucket in the
+ * hash comes first so it snuggles against d_parent in the
* dentry.
*/
struct qstr {
@@ -91,7 +91,6 @@ struct dentry {
* so they all fit in a 16-byte range, with 16-byte alignment.
*/
struct dentry *d_parent; /* parent directory */
- struct hlist_head *d_bucket; /* lookup hash bucket */
struct qstr d_name;
struct list_head d_lru; /* LRU list */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 27e8183f4776..8cf0e3f290bf 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -22,9 +22,9 @@ typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int);
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_init_fn) (request_queue_t *, elevator_t *);
-typedef void (elevator_exit_fn) (request_queue_t *, elevator_t *);
+typedef void (elevator_exit_fn) (elevator_t *);
-struct elevator_s
+struct elevator_ops
{
elevator_merge_fn *elevator_merge_fn;
elevator_merged_fn *elevator_merged_fn;
@@ -48,12 +48,32 @@ struct elevator_s
elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn;
+};
- void *elevator_data;
+#define ELV_NAME_MAX (16)
- struct kobject kobj;
+/*
+ * identifies an elevator type, such as AS or deadline
+ */
+struct elevator_type
+{
+ struct list_head list;
+ struct elevator_ops ops;
+ struct elevator_type *elevator_type;
struct kobj_type *elevator_ktype;
- const char *elevator_name;
+ char elevator_name[ELV_NAME_MAX];
+ struct module *elevator_owner;
+};
+
+/*
+ * each queue has an elevator_queue assoicated with it
+ */
+struct elevator_queue
+{
+ struct elevator_ops *ops;
+ void *elevator_data;
+ struct kobject kobj;
+ struct elevator_type *elevator_type;
};
/*
@@ -79,28 +99,19 @@ extern int elv_set_request(request_queue_t *, struct request *, int);
extern void elv_put_request(request_queue_t *, struct request *);
/*
- * noop I/O scheduler. always merges, always inserts new request at tail
- */
-extern elevator_t elevator_noop;
-
-/*
- * deadline i/o scheduler. uses request time outs to prevent indefinite
- * starvation
+ * io scheduler registration
*/
-extern elevator_t iosched_deadline;
+extern int elv_register(struct elevator_type *);
+extern void elv_unregister(struct elevator_type *);
/*
- * anticipatory I/O scheduler
+ * io scheduler sysfs switching
*/
-extern elevator_t iosched_as;
+extern ssize_t elv_iosched_show(request_queue_t *, char *);
+extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t);
-/*
- * completely fair queueing I/O scheduler
- */
-extern elevator_t iosched_cfq;
-
-extern int elevator_init(request_queue_t *, elevator_t *);
-extern void elevator_exit(request_queue_t *);
+extern int elevator_init(request_queue_t *, char *);
+extern void elevator_exit(elevator_t *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
extern int elv_try_merge(struct request *, struct bio *);
extern int elv_try_last_merge(request_queue_t *, struct bio *);
@@ -119,4 +130,13 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *);
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
+/*
+ * return values from elevator_may_queue_fn
+ */
+enum {
+ ELV_MQUEUE_MAY,
+ ELV_MQUEUE_NO,
+ ELV_MQUEUE_MUST,
+};
+
#endif
diff --git a/include/linux/fb.h b/include/linux/fb.h
index c38132231c64..a09f31d7c6ee 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -318,6 +318,7 @@ struct fb_cursor {
struct fbcurpos hot; /* cursor hot spot */
struct fb_image image; /* Cursor image */
/* all fields below are for fbcon use only */
+ int flash; /* cursor blink */
char *data; /* copy of bitmap */
};
@@ -555,6 +556,82 @@ struct fb_ops {
int (*fb_mmap)(struct fb_info *info, struct file *file, struct vm_area_struct *vma);
};
+#ifdef CONFIG_FB_TILEBLITTING
+
+#define FB_TILE_CURSOR_NONE 0
+#define FB_TILE_CURSOR_UNDERLINE 1
+#define FB_TILE_CURSOR_LOWER_THIRD 2
+#define FB_TILE_CURSOR_LOWER_HALF 3
+#define FB_TILE_CURSOR_TWO_THIRDS 4
+#define FB_TILE_CURSOR_BLOCK 5
+
+struct fb_tilemap {
+ __u32 width; /* width of each tile in pixels */
+ __u32 height; /* height of each tile in scanlines */
+ __u32 depth; /* color depth of each tile */
+ __u32 length; /* number of tiles in the map */
+ __u8 *data; /* actual tile map: a bitmap array, packed
+ to the nearest byte */
+};
+
+struct fb_tilerect {
+ __u32 sx; /* origin in the x-axis */
+ __u32 sy; /* origin in the y-axis */
+ __u32 width; /* number of tiles in the x-axis */
+ __u32 height; /* number of tiles in the y-axis */
+ __u32 index; /* what tile to use: index to tile map */
+ __u32 fg; /* foreground color */
+ __u32 bg; /* background color */
+ __u32 rop; /* raster operation */
+};
+
+struct fb_tilearea {
+ __u32 sx; /* source origin in the x-axis */
+ __u32 sy; /* source origin in the y-axis */
+ __u32 dx; /* destination origin in the x-axis */
+ __u32 dy; /* destination origin in the y-axis */
+ __u32 width; /* number of tiles in the x-axis */
+ __u32 height; /* number of tiles in the y-axis */
+};
+
+struct fb_tileblit {
+ __u32 sx; /* origin in the x-axis */
+ __u32 sy; /* origin in the y-axis */
+ __u32 width; /* number of tiles in the x-axis */
+ __u32 height; /* number of tiles in the y-axis */
+ __u32 fg; /* foreground color */
+ __u32 bg; /* background color */
+ __u32 length; /* number of tiles to draw */
+ __u32 *indices; /* array of indices to tile map */
+};
+
+struct fb_tilecursor {
+ __u32 sx; /* cursor position in the x-axis */
+ __u32 sy; /* cursor position in the y-axis */
+ __u32 mode; /* 0 = erase, 1 = draw */
+ __u32 shape; /* see FB_TILE_CURSOR_* */
+ __u32 fg; /* foreground color */
+ __u32 bg; /* background color */
+};
+
+struct fb_tile_ops {
+ /* set tile characteristics */
+ void (*fb_settile)(struct fb_info *info, struct fb_tilemap *map);
+
+ /* all dimensions from hereon are in terms of tiles */
+
+ /* move a rectangular region of tiles from one area to another*/
+ void (*fb_tilecopy)(struct fb_info *info, struct fb_tilearea *area);
+ /* fill a rectangular region with a tile */
+ void (*fb_tilefill)(struct fb_info *info, struct fb_tilerect *rect);
+ /* copy an array of tiles */
+ void (*fb_tileblit)(struct fb_info *info, struct fb_tileblit *blit);
+ /* cursor */
+ void (*fb_tilecursor)(struct fb_info *info,
+ struct fb_tilecursor *cursor);
+};
+#endif /* CONFIG_FB_TILEBLITTING */
+
/* FBINFO_* = fb_info.flags bit flags */
#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */
#define FBINFO_HWACCEL_DISABLED 0x0002
@@ -586,6 +663,7 @@ struct fb_ops {
from userspace */
#define FBINFO_MISC_MODESWITCH 0x20000 /* mode switch */
#define FBINFO_MISC_MODESWITCHLATE 0x40000 /* init hardware later */
+#define FBINFO_MISC_TILEBLITTING 0x80000 /* use tile blitting */
struct fb_info {
int node;
@@ -601,6 +679,10 @@ struct fb_info {
struct fb_cmap cmap; /* Current cmap */
struct list_head modelist; /* mode list */
struct fb_ops *fbops;
+ struct device *device;
+#ifdef CONFIG_FB_TILEBLITTING
+ struct fb_tile_ops *tileops; /* Tile Blitting */
+#endif
char __iomem *screen_base; /* Virtual address */
unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
int currcon; /* Current VC. */
@@ -608,7 +690,7 @@ struct fb_info {
#define FBINFO_STATE_RUNNING 0
#define FBINFO_STATE_SUSPENDED 1
u32 state; /* Hardware state i.e suspend */
-
+ void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4f6fe6b575a8..6768655fd11d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -758,6 +758,7 @@ struct super_block {
int s_need_sync_fs;
atomic_t s_active;
void *s_security;
+ struct xattr_handler **s_xattr;
struct list_head s_dirty; /* dirty inodes */
struct list_head s_io; /* parked for writeback */
@@ -981,7 +982,8 @@ struct super_operations {
#define I_DIRTY_SYNC 1 /* Not dirty enough for O_DATASYNC */
#define I_DIRTY_DATASYNC 2 /* Data-related inode changes pending */
#define I_DIRTY_PAGES 4 /* Data-related inode changes pending */
-#define I_LOCK 8
+#define __I_LOCK 3
+#define I_LOCK (1 << __I_LOCK)
#define I_FREEING 16
#define I_CLEAR 32
#define I_NEW 64
@@ -1254,7 +1256,6 @@ extern struct block_device *bdget(dev_t);
extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
-extern int blkdev_open(struct inode *, struct file *);
extern struct block_device *open_by_devnum(dev_t, unsigned);
extern struct file_operations def_blk_fops;
extern struct address_space_operations def_blk_aops;
@@ -1339,7 +1340,9 @@ extern sector_t bmap(struct inode *, sector_t);
extern int setattr_mask(unsigned int);
extern int notify_change(struct dentry *, struct iattr *);
extern int permission(struct inode *, int, struct nameidata *);
-extern int vfs_permission(struct inode *, int);
+extern int generic_permission(struct inode *, int,
+ int (*check_acl)(struct inode *, int));
+
extern int get_write_access(struct inode *);
extern int deny_write_access(struct file *);
static inline void put_write_access(struct inode * inode)
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index a68437640135..3481de9c5837 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -147,10 +147,10 @@ struct i2o_controller {
struct pci_dev *pdev; /* PCI device */
- int short_req:1; /* use small block sizes */
- int no_quiesce:1; /* dont quiesce before reset */
- int raptor:1; /* split bar */
- int promise:1; /* Promise controller */
+ unsigned int short_req:1; /* use small block sizes */
+ unsigned int no_quiesce:1; /* dont quiesce before reset */
+ unsigned int raptor:1; /* split bar */
+ unsigned int promise:1; /* Promise controller */
#ifdef CONFIG_MTRR
int mtrr_reg0;
@@ -180,9 +180,9 @@ struct i2o_controller {
struct i2o_dma in_queue; /* inbound message queue Host->IOP */
struct i2o_dma out_queue; /* outbound message queue IOP->Host */
- int battery:1; /* Has a battery backup */
- int io_alloc:1; /* An I/O resource was allocated */
- int mem_alloc:1; /* A memory resource was allocated */
+ unsigned int battery:1; /* Has a battery backup */
+ unsigned int io_alloc:1; /* An I/O resource was allocated */
+ unsigned int mem_alloc:1; /* A memory resource was allocated */
struct resource io_resource; /* I/O resource allocated to the IOP */
struct resource mem_resource; /* Mem resource allocated to the IOP */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index e65b90f1962c..dfdd307872bb 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -299,6 +299,7 @@ enum jbd_state_bits {
BH_JBDDirty, /* Is dirty but journaled */
BH_State, /* Pins most journal_head state */
BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
+ BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
};
BUFFER_FNS(JBD, jbd)
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 459630f41680..d45eff83b906 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -3,10 +3,72 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <asm/system.h>
+#include <linux/time.h>
+#include <linux/timex.h>
#include <asm/param.h> /* for HZ */
+#include <asm/div64.h>
+
+#ifndef div_long_long_rem
+#define div_long_long_rem(dividend,divisor,remainder) \
+({ \
+ u64 result = dividend; \
+ *remainder = do_div(result,divisor); \
+ result; \
+})
+#endif
+
+/*
+ * The following defines establish the engineering parameters of the PLL
+ * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
+ * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
+ * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
+ * nearest power of two in order to avoid hardware multiply operations.
+ */
+#if HZ >= 12 && HZ < 24
+# define SHIFT_HZ 4
+#elif HZ >= 24 && HZ < 48
+# define SHIFT_HZ 5
+#elif HZ >= 48 && HZ < 96
+# define SHIFT_HZ 6
+#elif HZ >= 96 && HZ < 192
+# define SHIFT_HZ 7
+#elif HZ >= 192 && HZ < 384
+# define SHIFT_HZ 8
+#elif HZ >= 384 && HZ < 768
+# define SHIFT_HZ 9
+#elif HZ >= 768 && HZ < 1536
+# define SHIFT_HZ 10
+#else
+# error You lose.
+#endif
+
+/* LATCH is used in the interval timer and ftape setup. */
+#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
+
+/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
+ * improve accuracy by shifting LSH bits, hence calculating:
+ * (NOM << LSH) / DEN
+ * This however means trouble for large NOM, because (NOM << LSH) may no
+ * longer fit in 32 bits. The following way of calculating this gives us
+ * some slack, under the following conditions:
+ * - (NOM / DEN) fits in (32 - LSH) bits.
+ * - (NOM % DEN) fits in (32 - LSH) bits.
+ */
+#define SH_DIV(NOM,DEN,LSH) ( ((NOM / DEN) << LSH) \
+ + (((NOM % DEN) << LSH) + DEN / 2) / DEN)
+
+/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
+#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
+
+/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
+#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
+
+/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+
+/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
+/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
+#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
/*
* The 64-bit value is not volatile - you MUST NOT read it
@@ -50,4 +112,320 @@ static inline u64 get_jiffies_64(void)
((long)(a) - (long)(b) >= 0))
#define time_before_eq(a,b) time_after_eq(b,a)
+/*
+ * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * so jiffies wrap bugs show up earlier.
+ */
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+
+/*
+ * Change timeval to jiffies, trying to avoid the
+ * most obvious overflows..
+ *
+ * And some not so obvious.
+ *
+ * Note that we don't want to return MAX_LONG, because
+ * for various timeout reasons we often end up having
+ * to wait "jiffies+1" in order to guarantee that we wait
+ * at _least_ "jiffies" - so "jiffies+1" had better still
+ * be positive.
+ */
+#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
+
+/*
+ * We want to do realistic conversions of time so we need to use the same
+ * values the update wall clock code uses as the jiffies size. This value
+ * is: TICK_NSEC (which is defined in timex.h). This
+ * is a constant and is in nanoseconds. We will used scaled math
+ * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
+ * NSEC_JIFFIE_SC. Note that these defines contain nothing but
+ * constants and so are computed at compile time. SHIFT_HZ (computed in
+ * timex.h) adjusts the scaling for different HZ values.
+
+ * Scaled math??? What is that?
+ *
+ * Scaled math is a way to do integer math on values that would,
+ * otherwise, either overflow, underflow, or cause undesired div
+ * instructions to appear in the execution path. In short, we "scale"
+ * up the operands so they take more bits (more precision, less
+ * underflow), do the desired operation and then "scale" the result back
+ * by the same amount. If we do the scaling by shifting we avoid the
+ * costly mpy and the dastardly div instructions.
+
+ * Suppose, for example, we want to convert from seconds to jiffies
+ * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The
+ * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
+ * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
+ * might calculate at compile time, however, the result will only have
+ * about 3-4 bits of precision (less for smaller values of HZ).
+ *
+ * So, we scale as follows:
+ * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
+ * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
+ * Then we make SCALE a power of two so:
+ * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
+ * Now we define:
+ * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
+ * jiff = (sec * SEC_CONV) >> SCALE;
+ *
+ * Often the math we use will expand beyond 32-bits so we tell C how to
+ * do this and pass the 64-bit result of the mpy through the ">> SCALE"
+ * which should take the result back to 32-bits. We want this expansion
+ * to capture as much precision as possible. At the same time we don't
+ * want to overflow so we pick the SCALE to avoid this. In this file,
+ * that means using a different scale for each range of HZ values (as
+ * defined in timex.h).
+ *
+ * For those who want to know, gcc will give a 64-bit result from a "*"
+ * operator if the result is a long long AND at least one of the
+ * operands is cast to long long (usually just prior to the "*" so as
+ * not to confuse it into thinking it really has a 64-bit operand,
+ * which, buy the way, it can do, but it take more code and at least 2
+ * mpys).
+
+ * We also need to be aware that one second in nanoseconds is only a
+ * couple of bits away from overflowing a 32-bit word, so we MUST use
+ * 64-bits to get the full range time in nanoseconds.
+
+ */
+
+/*
+ * Here are the scales we will use. One for seconds, nanoseconds and
+ * microseconds.
+ *
+ * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
+ * check if the sign bit is set. If not, we bump the shift count by 1.
+ * (Gets an extra bit of precision where we can use it.)
+ * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
+ * Haven't tested others.
+
+ * Limits of cpp (for #if expressions) only long (no long long), but
+ * then we only need the most signicant bit.
+ */
+
+#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
+#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
+#undef SEC_JIFFIE_SC
+#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
+#endif
+#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
+#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
+#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+
+#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+#define USEC_CONVERSION \
+ ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+/*
+ * USEC_ROUND is used in the timeval to jiffie conversion. See there
+ * for more details. It is the scaled resolution rounding value. Note
+ * that it is a 64-bit value. Since, when it is applied, we are already
+ * in jiffies (albit scaled), it is nothing but the bits we will shift
+ * off.
+ */
+#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
+/*
+ * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
+ * into seconds. The 64-bit case will overflow if we are not careful,
+ * so use the messy SH_DIV macro to do it. Still all constants.
+ */
+#if BITS_PER_LONG < 64
+# define MAX_SEC_IN_JIFFIES \
+ (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
+#else /* take care of overflow on 64 bits machines */
+# define MAX_SEC_IN_JIFFIES \
+ (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
+
+#endif
+
+/*
+ * Convert jiffies to milliseconds and back.
+ *
+ * Avoid unnecessary multiplications/divisions in the
+ * two most common HZ cases:
+ */
+static inline unsigned int jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= 1000 && !(1000 % HZ)
+ return (1000 / HZ) * j;
+#elif HZ > 1000 && !(HZ % 1000)
+ return (j + (HZ / 1000) - 1)/(HZ / 1000);
+#else
+ return (j * 1000) / HZ;
+#endif
+}
+
+static inline unsigned int jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= 1000 && !(1000 % HZ)
+ return (1000000 / HZ) * j;
+#elif HZ > 1000 && !(HZ % 1000)
+ return (j*1000 + (HZ - 1000))/(HZ / 1000);
+#else
+ return (j * 1000000) / HZ;
+#endif
+}
+
+static inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+ if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= 1000 && !(1000 % HZ)
+ return (m + (1000 / HZ) - 1) / (1000 / HZ);
+#elif HZ > 1000 && !(HZ % 1000)
+ return m * (HZ / 1000);
+#else
+ return (m * HZ + 999) / 1000;
+#endif
+}
+
+/*
+ * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
+ * that a remainder subtract here would not do the right thing as the
+ * resolution values don't fall on second boundries. I.e. the line:
+ * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ *
+ * Rather, we just shift the bits off the right.
+ *
+ * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
+ * value to a scaled second value.
+ */
+static __inline__ unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+ unsigned long sec = value->tv_sec;
+ long nsec = value->tv_nsec + TICK_NSEC - 1;
+
+ if (sec >= MAX_SEC_IN_JIFFIES){
+ sec = MAX_SEC_IN_JIFFIES;
+ nsec = 0;
+ }
+ return (((u64)sec * SEC_CONVERSION) +
+ (((u64)nsec * NSEC_CONVERSION) >>
+ (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+
+}
+
+static __inline__ void
+jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
+{
+ /*
+ * Convert jiffies to nanoseconds and separate with
+ * one divide.
+ */
+ u64 nsec = (u64)jiffies * TICK_NSEC;
+ value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
+}
+
+/* Same for "timeval"
+ *
+ * Well, almost. The problem here is that the real system resolution is
+ * in nanoseconds and the value being converted is in micro seconds.
+ * Also for some machines (those that use HZ = 1024, in-particular),
+ * there is a LARGE error in the tick size in microseconds.
+
+ * The solution we use is to do the rounding AFTER we convert the
+ * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
+ * Instruction wise, this should cost only an additional add with carry
+ * instruction above the way it was done above.
+ */
+static __inline__ unsigned long
+timeval_to_jiffies(const struct timeval *value)
+{
+ unsigned long sec = value->tv_sec;
+ long usec = value->tv_usec;
+
+ if (sec >= MAX_SEC_IN_JIFFIES){
+ sec = MAX_SEC_IN_JIFFIES;
+ usec = 0;
+ }
+ return (((u64)sec * SEC_CONVERSION) +
+ (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
+ (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+}
+
+static __inline__ void
+jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
+{
+ /*
+ * Convert jiffies to nanoseconds and separate with
+ * one divide.
+ */
+ u64 nsec = (u64)jiffies * TICK_NSEC;
+ value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
+ value->tv_usec /= NSEC_PER_USEC;
+}
+
+/*
+ * Convert jiffies/jiffies_64 to clock_t and back.
+ */
+static inline clock_t jiffies_to_clock_t(long x)
+{
+#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+ return x / (HZ / USER_HZ);
+#else
+ u64 tmp = (u64)x * TICK_NSEC;
+ do_div(tmp, (NSEC_PER_SEC / USER_HZ));
+ return (long)tmp;
+#endif
+}
+
+static inline unsigned long clock_t_to_jiffies(unsigned long x)
+{
+#if (HZ % USER_HZ)==0
+ if (x >= ~0UL / (HZ / USER_HZ))
+ return ~0UL;
+ return x * (HZ / USER_HZ);
+#else
+ u64 jif;
+
+ /* Don't worry about loss of precision here .. */
+ if (x >= ~0UL / HZ * USER_HZ)
+ return ~0UL;
+
+ /* .. but do try to contain it here */
+ jif = x * (u64) HZ;
+ do_div(jif, USER_HZ);
+ return jif;
+#endif
+}
+
+static inline u64 jiffies_64_to_clock_t(u64 x)
+{
+#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+ do_div(x, HZ / USER_HZ);
+#else
+ /*
+ * There are better ways that don't overflow early,
+ * but even this doesn't overflow in hundreds of years
+ * in 64 bits, so..
+ */
+ x *= TICK_NSEC;
+ do_div(x, (NSEC_PER_SEC / USER_HZ));
+#endif
+ return x;
+}
+
+static inline u64 nsec_to_clock_t(u64 x)
+{
+#if (NSEC_PER_SEC % USER_HZ) == 0
+ do_div(x, (NSEC_PER_SEC / USER_HZ));
+#elif (USER_HZ % 512) == 0
+ x *= USER_HZ/512;
+ do_div(x, (NSEC_PER_SEC / 512));
+#else
+ /*
+ * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
+ * overflow after 64.99 years.
+ * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
+ */
+ x *= 9;
+ do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
+ / USER_HZ));
+#endif
+ return x;
+}
+
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 40307a28bb4b..cf2f984e1b95 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -136,6 +136,7 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in
extern int panic_on_oops;
extern int tainted;
extern const char *print_tainted(void);
+extern void add_taint(unsigned);
/* Values used for system_state */
extern enum system_states {
@@ -150,6 +151,8 @@ extern enum system_states {
#define TAINT_FORCED_MODULE (1<<1)
#define TAINT_UNSAFE_SMP (1<<2)
#define TAINT_FORCED_RMMOD (1<<3)
+#define TAINT_MACHINE_CHECK (1<<4)
+#define TAINT_BAD_PAGE (1<<5)
extern void dump_stack(void);
diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h
new file mode 100644
index 000000000000..60cc7b762e78
--- /dev/null
+++ b/include/linux/key-ui.h
@@ -0,0 +1,97 @@
+/* key-ui.h: key userspace interface stuff for use by keyfs
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_KEY_UI_H
+#define _LINUX_KEY_UI_H
+
+#include <linux/key.h>
+
+/* the key tree */
+extern struct rb_root key_serial_tree;
+extern spinlock_t key_serial_lock;
+
+/* required permissions */
+#define KEY_VIEW 0x01 /* require permission to view attributes */
+#define KEY_READ 0x02 /* require permission to read content */
+#define KEY_WRITE 0x04 /* require permission to update / modify */
+#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */
+#define KEY_LINK 0x10 /* require permission to link */
+#define KEY_ALL 0x1f /* all the above permissions */
+
+/*
+ * the keyring payload contains a list of the keys to which the keyring is
+ * subscribed
+ */
+struct keyring_list {
+ unsigned maxkeys; /* max keys this list can hold */
+ unsigned nkeys; /* number of keys currently held */
+ struct key *keys[0];
+};
+
+
+/*
+ * check to see whether permission is granted to use a key in the desired way
+ */
+static inline int key_permission(const struct key *key, key_perm_t perm)
+{
+ key_perm_t kperm;
+
+ if (key->uid == current->fsuid)
+ kperm = key->perm >> 16;
+ else if (key->gid != -1 &&
+ key->perm & KEY_GRP_ALL &&
+ in_group_p(key->gid)
+ )
+ kperm = key->perm >> 8;
+ else
+ kperm = key->perm;
+
+ kperm = kperm & perm & KEY_ALL;
+
+ return kperm == perm;
+}
+
+/*
+ * check to see whether permission is granted to use a key in at least one of
+ * the desired ways
+ */
+static inline int key_any_permission(const struct key *key, key_perm_t perm)
+{
+ key_perm_t kperm;
+
+ if (key->uid == current->fsuid)
+ kperm = key->perm >> 16;
+ else if (key->gid != -1 &&
+ key->perm & KEY_GRP_ALL &&
+ in_group_p(key->gid)
+ )
+ kperm = key->perm >> 8;
+ else
+ kperm = key->perm;
+
+ kperm = kperm & perm & KEY_ALL;
+
+ return kperm != 0;
+}
+
+
+extern struct key *lookup_user_key(key_serial_t id, int create, int part,
+ key_perm_t perm);
+
+extern long join_session_keyring(const char *name);
+
+extern struct key_type *key_type_lookup(const char *type);
+extern void key_type_put(struct key_type *ktype);
+
+#define key_negative_timeout 60 /* default timeout on a negative key's existence */
+
+
+#endif /* _LINUX_KEY_UI_H */
diff --git a/include/linux/key.h b/include/linux/key.h
new file mode 100644
index 000000000000..e914be777c4a
--- /dev/null
+++ b/include/linux/key.h
@@ -0,0 +1,284 @@
+/* key.h: authentication token and access key management
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * See Documentation/keys.txt for information on keys/keyrings.
+ */
+
+#ifndef _LINUX_KEY_H
+#define _LINUX_KEY_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+#ifdef __KERNEL__
+
+/* key handle serial number */
+typedef int32_t key_serial_t;
+
+/* key handle permissions mask */
+typedef uint32_t key_perm_t;
+
+struct key;
+
+#ifdef CONFIG_KEYS
+
+#undef KEY_DEBUGGING
+
+#define KEY_USR_VIEW 0x00010000 /* user can view a key's attributes */
+#define KEY_USR_READ 0x00020000 /* user can read key payload / view keyring */
+#define KEY_USR_WRITE 0x00040000 /* user can update key payload / add link to keyring */
+#define KEY_USR_SEARCH 0x00080000 /* user can find a key in search / search a keyring */
+#define KEY_USR_LINK 0x00100000 /* user can create a link to a key/keyring */
+#define KEY_USR_ALL 0x001f0000
+
+#define KEY_GRP_VIEW 0x00000100 /* group permissions... */
+#define KEY_GRP_READ 0x00000200
+#define KEY_GRP_WRITE 0x00000400
+#define KEY_GRP_SEARCH 0x00000800
+#define KEY_GRP_LINK 0x00001000
+#define KEY_GRP_ALL 0x00001f00
+
+#define KEY_OTH_VIEW 0x00000001 /* third party permissions... */
+#define KEY_OTH_READ 0x00000002
+#define KEY_OTH_WRITE 0x00000004
+#define KEY_OTH_SEARCH 0x00000008
+#define KEY_OTH_LINK 0x00000010
+#define KEY_OTH_ALL 0x0000001f
+
+struct seq_file;
+struct user_struct;
+
+struct key_type;
+struct key_owner;
+struct keyring_list;
+struct keyring_name;
+
+/*****************************************************************************/
+/*
+ * authentication token / access credential / keyring
+ * - types of key include:
+ * - keyrings
+ * - disk encryption IDs
+ * - Kerberos TGTs and tickets
+ */
+struct key {
+ atomic_t usage; /* number of references */
+ key_serial_t serial; /* key serial number */
+ struct rb_node serial_node;
+ struct key_type *type; /* type of key */
+ rwlock_t lock; /* examination vs change lock */
+ struct rw_semaphore sem; /* change vs change sem */
+ struct key_user *user; /* owner of this key */
+ time_t expiry; /* time at which key expires (or 0) */
+ uid_t uid;
+ gid_t gid;
+ key_perm_t perm; /* access permissions */
+ unsigned short quotalen; /* length added to quota */
+ unsigned short datalen; /* payload data length */
+ unsigned short flags; /* status flags (change with lock writelocked) */
+#define KEY_FLAG_INSTANTIATED 0x00000001 /* set if key has been instantiated */
+#define KEY_FLAG_DEAD 0x00000002 /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED 0x00000004 /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA 0x00000008 /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT 0x00000010 /* set if key is being constructed in userspace */
+#define KEY_FLAG_NEGATIVE 0x00000020 /* set if key is negative */
+
+#ifdef KEY_DEBUGGING
+ unsigned magic;
+#define KEY_DEBUG_MAGIC 0x18273645u
+#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu
+#endif
+
+ /* the description string
+ * - this is used to match a key against search criteria
+ * - this should be a printable string
+ * - eg: for krb5 AFS, this might be "afs@REDHAT.COM"
+ */
+ char *description;
+
+ /* type specific data
+ * - this is used by the keyring type to index the name
+ */
+ union {
+ struct list_head link;
+ } type_data;
+
+ /* key data
+ * - this is used to hold the data actually used in cryptography or
+ * whatever
+ */
+ union {
+ unsigned long value;
+ void *data;
+ struct keyring_list *subscriptions;
+ } payload;
+};
+
+/*****************************************************************************/
+/*
+ * kernel managed key type definition
+ */
+struct key_type {
+ /* name of the type */
+ const char *name;
+
+ /* default payload length for quota precalculation (optional)
+ * - this can be used instead of calling key_payload_reserve(), that
+ * function only needs to be called if the real datalen is different
+ */
+ size_t def_datalen;
+
+ /* instantiate a key of this type
+ * - this method should call key_payload_reserve() to determine if the
+ * user's quota will hold the payload
+ */
+ int (*instantiate)(struct key *key, const void *data, size_t datalen);
+
+ /* duplicate a key of this type (optional)
+ * - the source key will be locked against change
+ * - the new description will be attached
+ * - the quota will have been adjusted automatically from
+ * source->quotalen
+ */
+ int (*duplicate)(struct key *key, const struct key *source);
+
+ /* update a key of this type (optional)
+ * - this method should call key_payload_reserve() to recalculate the
+ * quota consumption
+ * - the key must be locked against read when modifying
+ */
+ int (*update)(struct key *key, const void *data, size_t datalen);
+
+ /* match a key against a description */
+ int (*match)(const struct key *key, const void *desc);
+
+ /* clear the data from a key (optional) */
+ void (*destroy)(struct key *key);
+
+ /* describe a key */
+ void (*describe)(const struct key *key, struct seq_file *p);
+
+ /* read a key's data (optional)
+ * - permission checks will be done by the caller
+ * - the key's semaphore will be readlocked by the caller
+ * - should return the amount of data that could be read, no matter how
+ * much is copied into the buffer
+ * - shouldn't do the copy if the buffer is NULL
+ */
+ long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+
+ /* internal fields */
+ struct list_head link; /* link in types list */
+};
+
+extern struct key_type key_type_keyring;
+
+extern int register_key_type(struct key_type *ktype);
+extern void unregister_key_type(struct key_type *ktype);
+
+extern struct key *key_alloc(struct key_type *type,
+ const char *desc,
+ uid_t uid, gid_t gid, key_perm_t perm,
+ int not_in_quota);
+extern int key_payload_reserve(struct key *key, size_t datalen);
+extern int key_instantiate_and_link(struct key *key,
+ const void *data,
+ size_t datalen,
+ struct key *keyring);
+extern int key_negate_and_link(struct key *key,
+ unsigned timeout,
+ struct key *keyring);
+extern void key_revoke(struct key *key);
+extern void key_put(struct key *key);
+
+static inline struct key *key_get(struct key *key)
+{
+ if (key)
+ atomic_inc(&key->usage);
+ return key;
+}
+
+extern struct key *request_key(struct key_type *type,
+ const char *description,
+ const char *callout_info);
+
+extern int key_validate(struct key *key);
+
+extern struct key *key_create_or_update(struct key *keyring,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ int not_in_quota);
+
+extern int key_update(struct key *key,
+ const void *payload,
+ size_t plen);
+
+extern int key_link(struct key *keyring,
+ struct key *key);
+
+extern int key_unlink(struct key *keyring,
+ struct key *key);
+
+extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+ int not_in_quota, struct key *dest);
+
+extern int keyring_clear(struct key *keyring);
+
+extern struct key *keyring_search(struct key *keyring,
+ struct key_type *type,
+ const char *description);
+
+extern struct key *search_process_keyrings(struct key_type *type,
+ const char *description);
+
+extern int keyring_add_key(struct key *keyring,
+ struct key *key);
+
+extern struct key *key_lookup(key_serial_t id);
+
+#define key_serial(key) ((key) ? (key)->serial : 0)
+
+/*
+ * the userspace interface
+ */
+extern struct key root_user_keyring, root_session_keyring;
+extern int alloc_uid_keyring(struct user_struct *user);
+extern void switch_uid_keyring(struct user_struct *new_user);
+extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk);
+extern void exit_keys(struct task_struct *tsk);
+extern int suid_keys(struct task_struct *tsk);
+extern int exec_keys(struct task_struct *tsk);
+extern void key_fsuid_changed(struct task_struct *tsk);
+extern void key_fsgid_changed(struct task_struct *tsk);
+
+#else /* CONFIG_KEYS */
+
+#define key_validate(k) 0
+#define key_serial(k) 0
+#define key_get(k) NULL
+#define key_put(k) do { } while(0)
+#define alloc_uid_keyring(u) 0
+#define switch_uid_keyring(u) do { } while(0)
+#define copy_keys(f,t) 0
+#define exit_keys(t) do { } while(0)
+#define suid_keys(t) do { } while(0)
+#define exec_keys(t) do { } while(0)
+#define key_fsuid_changed(t) do { } while(0)
+#define key_fsgid_changed(t) do { } while(0)
+
+#endif /* CONFIG_KEYS */
+#endif /* __KERNEL__ */
+#endif /* _LINUX_KEY_H */
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
new file mode 100644
index 000000000000..381dedc370a3
--- /dev/null
+++ b/include/linux/keyctl.h
@@ -0,0 +1,39 @@
+/* keyctl.h: keyctl command IDs
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_KEYCTL_H
+#define _LINUX_KEYCTL_H
+
+/* special process keyring shortcut IDs */
+#define KEY_SPEC_THREAD_KEYRING -1 /* - key ID for thread-specific keyring */
+#define KEY_SPEC_PROCESS_KEYRING -2 /* - key ID for process-specific keyring */
+#define KEY_SPEC_SESSION_KEYRING -3 /* - key ID for session-specific keyring */
+#define KEY_SPEC_USER_KEYRING -4 /* - key ID for UID-specific keyring */
+#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */
+#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */
+
+/* keyctl commands */
+#define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */
+#define KEYCTL_JOIN_SESSION_KEYRING 1 /* join or start named session keyring */
+#define KEYCTL_UPDATE 2 /* update a key */
+#define KEYCTL_REVOKE 3 /* revoke a key */
+#define KEYCTL_CHOWN 4 /* set ownership of a key */
+#define KEYCTL_SETPERM 5 /* set perms on a key */
+#define KEYCTL_DESCRIBE 6 /* describe a key */
+#define KEYCTL_CLEAR 7 /* clear contents of a keyring */
+#define KEYCTL_LINK 8 /* link a key into a keyring */
+#define KEYCTL_UNLINK 9 /* unlink a key from a keyring */
+#define KEYCTL_SEARCH 10 /* search for a key in a keyring */
+#define KEYCTL_READ 11 /* read a key or keyring's contents */
+#define KEYCTL_INSTANTIATE 12 /* instantiate a partially constructed key */
+#define KEYCTL_NEGATE 13 /* negate a partially constructed key */
+
+#endif /* _LINUX_KEYCTL_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
new file mode 100644
index 000000000000..0e6e972a9f70
--- /dev/null
+++ b/include/linux/kfifo.h
@@ -0,0 +1,157 @@
+/*
+ * A simple kernel FIFO implementation.
+ *
+ * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#ifndef _LINUX_KFIFO_H
+#define _LINUX_KFIFO_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+
+struct kfifo {
+ unsigned char *buffer; /* the buffer holding the data */
+ unsigned int size; /* the size of the allocated buffer */
+ unsigned int in; /* data is added at offset (in % size) */
+ unsigned int out; /* data is extracted from off. (out % size) */
+ spinlock_t *lock; /* protects concurrent modifications */
+};
+
+extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
+ int gfp_mask, spinlock_t *lock);
+extern struct kfifo *kfifo_alloc(unsigned int size, int gfp_mask,
+ spinlock_t *lock);
+extern void kfifo_free(struct kfifo *fifo);
+extern unsigned int __kfifo_put(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len);
+extern unsigned int __kfifo_get(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len);
+
+/*
+ * __kfifo_reset - removes the entire FIFO contents, no locking version
+ * @fifo: the fifo to be emptied.
+ */
+static inline void __kfifo_reset(struct kfifo *fifo)
+{
+ fifo->in = fifo->out = 0;
+}
+
+/*
+ * kfifo_reset - removes the entire FIFO contents
+ * @fifo: the fifo to be emptied.
+ */
+static inline void kfifo_reset(struct kfifo *fifo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(fifo->lock, flags);
+
+ __kfifo_reset(fifo);
+
+ spin_unlock_irqrestore(fifo->lock, flags);
+}
+
+/*
+ * kfifo_put - puts some data into the FIFO
+ * @fifo: the fifo to be used.
+ * @buffer: the data to be added.
+ * @len: the length of the data to be added.
+ *
+ * This function copies at most 'len' bytes from the 'buffer' into
+ * the FIFO depending on the free space, and returns the number of
+ * bytes copied.
+ */
+static inline unsigned int kfifo_put(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(fifo->lock, flags);
+
+ ret = __kfifo_put(fifo, buffer, len);
+
+ spin_unlock_irqrestore(fifo->lock, flags);
+
+ return ret;
+}
+
+/*
+ * kfifo_get - gets some data from the FIFO
+ * @fifo: the fifo to be used.
+ * @buffer: where the data must be copied.
+ * @len: the size of the destination buffer.
+ *
+ * This function copies at most 'len' bytes from the FIFO into the
+ * 'buffer' and returns the number of copied bytes.
+ */
+static inline unsigned int kfifo_get(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(fifo->lock, flags);
+
+ ret = __kfifo_get(fifo, buffer, len);
+
+ /*
+ * optimization: if the FIFO is empty, set the indices to 0
+ * so we don't wrap the next time
+ */
+ if (fifo->in == fifo->out)
+ fifo->in = fifo->out = 0;
+
+ spin_unlock_irqrestore(fifo->lock, flags);
+
+ return ret;
+}
+
+/*
+ * __kfifo_len - returns the number of bytes available in the FIFO, no locking version
+ * @fifo: the fifo to be used.
+ */
+static inline unsigned int __kfifo_len(struct kfifo *fifo)
+{
+ return fifo->in - fifo->out;
+}
+
+/*
+ * kfifo_len - returns the number of bytes available in the FIFO
+ * @fifo: the fifo to be used.
+ */
+static inline unsigned int kfifo_len(struct kfifo *fifo)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(fifo->lock, flags);
+
+ ret = __kfifo_len(fifo);
+
+ spin_unlock_irqrestore(fifo->lock, flags);
+
+ return ret;
+}
+
+#else
+#warning "don't include kernel headers in userspace"
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 7738749e1285..15a806ad61ee 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -56,9 +56,7 @@ int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
sector_t, unsigned int[]);
void mb_cache_entry_rehash(struct mb_cache_entry *, unsigned int[]);
void mb_cache_entry_release(struct mb_cache_entry *);
-void mb_cache_entry_takeout(struct mb_cache_entry *);
void mb_cache_entry_free(struct mb_cache_entry *);
-struct mb_cache_entry *mb_cache_entry_dup(struct mb_cache_entry *);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *,
struct block_device *,
sector_t);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7c36a10f6720..b812151cdd07 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -410,35 +410,6 @@ extern struct pglist_data contig_page_data;
#error ZONES_SHIFT > MAX_ZONES_SHIFT
#endif
-extern DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
-
-#if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_NUMA)
-
-#define node_online(node) test_bit(node, node_online_map)
-#define node_set_online(node) set_bit(node, node_online_map)
-#define node_set_offline(node) clear_bit(node, node_online_map)
-static inline unsigned int num_online_nodes(void)
-{
- int i, num = 0;
-
- for(i = 0; i < MAX_NUMNODES; i++){
- if (node_online(i))
- num++;
- }
- return num;
-}
-
-#else /* !CONFIG_DISCONTIGMEM && !CONFIG_NUMA */
-
-#define node_online(node) \
- ({ BUG_ON((node) != 0); test_bit(node, node_online_map); })
-#define node_set_online(node) \
- ({ BUG_ON((node) != 0); set_bit(node, node_online_map); })
-#define node_set_offline(node) \
- ({ BUG_ON((node) != 0); clear_bit(node, node_online_map); })
-#define num_online_nodes() 1
-
-#endif /* CONFIG_DISCONTIGMEM || CONFIG_NUMA */
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index c6fa6a8ef95c..e5d2d6112f4b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -567,40 +567,20 @@ struct obsolete_modparm {
#define MODULE_PARM(var,type) \
struct obsolete_modparm __parm_##var __attribute__((section("__obsparm"))) = \
{ __stringify(var), type };
-
-static inline void __deprecated MOD_INC_USE_COUNT(struct module *module)
-{
- __unsafe(module);
-
-#if defined(CONFIG_MODULE_UNLOAD) && defined(MODULE)
- local_inc(&module->ref[get_cpu()].count);
- put_cpu();
-#else
- (void)try_module_get(module);
-#endif
-}
-
-static inline void __deprecated MOD_DEC_USE_COUNT(struct module *module)
-{
- module_put(module);
-}
-
-#define MOD_INC_USE_COUNT MOD_INC_USE_COUNT(THIS_MODULE)
-#define MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT(THIS_MODULE)
#else
#define MODULE_PARM(var,type)
-#define MOD_INC_USE_COUNT do { } while (0)
-#define MOD_DEC_USE_COUNT do { } while (0)
#endif
#define __MODULE_STRING(x) __stringify(x)
/* Use symbol_get and symbol_put instead. You'll thank me. */
#define HAVE_INTER_MODULE
-extern void inter_module_register(const char *, struct module *, const void *);
-extern void inter_module_unregister(const char *);
-extern const void *inter_module_get(const char *);
-extern const void *inter_module_get_request(const char *, const char *);
-extern void inter_module_put(const char *);
+extern void __deprecated inter_module_register(const char *,
+ struct module *, const void *);
+extern void __deprecated inter_module_unregister(const char *);
+extern const void * __deprecated inter_module_get(const char *);
+extern const void * __deprecated inter_module_get_request(const char *,
+ const char *);
+extern void __deprecated inter_module_put(const char *);
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
new file mode 100644
index 000000000000..4de843d94147
--- /dev/null
+++ b/include/linux/nodemask.h
@@ -0,0 +1,326 @@
+#ifndef __LINUX_NODEMASK_H
+#define __LINUX_NODEMASK_H
+
+/*
+ * Nodemasks provide a bitmap suitable for representing the
+ * set of Node's in a system, one bit position per Node number.
+ *
+ * See detailed comments in the file linux/bitmap.h describing the
+ * data type on which these nodemasks are based.
+ *
+ * For details of nodemask_scnprintf() and nodemask_parse(),
+ * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ *
+ * The available nodemask operations are:
+ *
+ * void node_set(node, mask) turn on bit 'node' in mask
+ * void node_clear(node, mask) turn off bit 'node' in mask
+ * void nodes_setall(mask) set all bits
+ * void nodes_clear(mask) clear all bits
+ * int node_isset(node, mask) true iff bit 'node' set in mask
+ * int node_test_and_set(node, mask) test and set bit 'node' in mask
+ *
+ * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection]
+ * void nodes_or(dst, src1, src2) dst = src1 | src2 [union]
+ * void nodes_xor(dst, src1, src2) dst = src1 ^ src2
+ * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2
+ * void nodes_complement(dst, src) dst = ~src
+ *
+ * int nodes_equal(mask1, mask2) Does mask1 == mask2?
+ * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect?
+ * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2?
+ * int nodes_empty(mask) Is mask empty (no bits sets)?
+ * int nodes_full(mask) Is mask full (all bits sets)?
+ * int nodes_weight(mask) Hamming weight - number of set bits
+ *
+ * void nodes_shift_right(dst, src, n) Shift right
+ * void nodes_shift_left(dst, src, n) Shift left
+ *
+ * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
+ * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ *
+ * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
+ * NODE_MASK_ALL Initializer - all bits set
+ * NODE_MASK_NONE Initializer - no bits set
+ * unsigned long *nodes_addr(mask) Array of unsigned long's in mask
+ *
+ * int nodemask_scnprintf(buf, len, mask) Format nodemask for printing
+ * int nodemask_parse(ubuf, ulen, mask) Parse ascii string as nodemask
+ *
+ * for_each_node_mask(node, mask) for-loop node over mask
+ *
+ * int num_online_nodes() Number of online Nodes
+ * int num_possible_nodes() Number of all possible Nodes
+ *
+ * int node_online(node) Is some node online?
+ * int node_possible(node) Is some node possible?
+ *
+ * int any_online_node(mask) First online node in mask
+ *
+ * node_set_online(node) set bit 'node' in node_online_map
+ * node_set_offline(node) clear bit 'node' in node_online_map
+ *
+ * for_each_node(node) for-loop node over node_possible_map
+ * for_each_online_node(node) for-loop node over node_online_map
+ *
+ * Subtlety:
+ * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway)
+ * to generate slightly worse code. So use a simple one-line #define
+ * for node_isset(), instead of wrapping an inline inside a macro, the
+ * way we do the other calls.
+ */
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/bitmap.h>
+#include <linux/numa.h>
+#include <asm/bug.h>
+
+typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
+extern nodemask_t _unused_nodemask_arg_;
+
+#define node_set(node, dst) __node_set((node), &(dst))
+static inline void __node_set(int node, volatile nodemask_t *dstp)
+{
+ set_bit(node, dstp->bits);
+}
+
+#define node_clear(node, dst) __node_clear((node), &(dst))
+static inline void __node_clear(int node, volatile nodemask_t *dstp)
+{
+ clear_bit(node, dstp->bits);
+}
+
+#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
+static inline void __nodes_setall(nodemask_t *dstp, int nbits)
+{
+ bitmap_fill(dstp->bits, nbits);
+}
+
+#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
+static inline void __nodes_clear(nodemask_t *dstp, int nbits)
+{
+ bitmap_zero(dstp->bits, nbits);
+}
+
+/* No static inline type checking - see Subtlety (1) above. */
+#define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
+
+#define node_test_and_set(node, nodemask) \
+ __node_test_and_set((node), &(nodemask))
+static inline int __node_test_and_set(int node, nodemask_t *addr)
+{
+ return test_and_set_bit(node, addr->bits);
+}
+
+#define nodes_and(dst, src1, src2) \
+ __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_or(dst, src1, src2) \
+ __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_xor(dst, src1, src2) \
+ __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_andnot(dst, src1, src2) \
+ __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_complement(dst, src) \
+ __nodes_complement(&(dst), &(src), MAX_NUMNODES)
+static inline void __nodes_complement(nodemask_t *dstp,
+ const nodemask_t *srcp, int nbits)
+{
+ bitmap_complement(dstp->bits, srcp->bits, nbits);
+}
+
+#define nodes_equal(src1, src2) \
+ __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
+static inline int __nodes_equal(const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_intersects(src1, src2) \
+ __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
+static inline int __nodes_intersects(const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_subset(src1, src2) \
+ __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
+static inline int __nodes_subset(const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
+static inline int __nodes_empty(const nodemask_t *srcp, int nbits)
+{
+ return bitmap_empty(srcp->bits, nbits);
+}
+
+#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
+static inline int __nodes_full(const nodemask_t *srcp, int nbits)
+{
+ return bitmap_full(srcp->bits, nbits);
+}
+
+#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
+static inline int __nodes_weight(const nodemask_t *srcp, int nbits)
+{
+ return bitmap_weight(srcp->bits, nbits);
+}
+
+#define nodes_shift_right(dst, src, n) \
+ __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
+static inline void __nodes_shift_right(nodemask_t *dstp,
+ const nodemask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define nodes_shift_left(dst, src, n) \
+ __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
+static inline void __nodes_shift_left(nodemask_t *dstp,
+ const nodemask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define first_node(src) __first_node(&(src), MAX_NUMNODES)
+static inline int __first_node(const nodemask_t *srcp, int nbits)
+{
+ return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+}
+
+#define next_node(n, src) __next_node((n), &(src), MAX_NUMNODES)
+static inline int __next_node(int n, const nodemask_t *srcp, int nbits)
+{
+ return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+}
+
+#define nodemask_of_node(node) \
+({ \
+ typeof(_unused_nodemask_arg_) m; \
+ if (sizeof(m) == sizeof(unsigned long)) { \
+ m.bits[0] = 1UL<<(node); \
+ } else { \
+ nodes_clear(m); \
+ node_set((node), m); \
+ } \
+ m; \
+})
+
+#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
+
+#if MAX_NUMNODES <= BITS_PER_LONG
+
+#define NODE_MASK_ALL \
+((nodemask_t) { { \
+ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
+} })
+
+#else
+
+#define NODE_MASK_ALL \
+((nodemask_t) { { \
+ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \
+ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
+} })
+
+#endif
+
+#define NODE_MASK_NONE \
+((nodemask_t) { { \
+ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \
+} })
+
+#define nodes_addr(src) ((src).bits)
+
+#define nodemask_scnprintf(buf, len, src) \
+ __nodemask_scnprintf((buf), (len), &(src), MAX_NUMNODES)
+static inline int __nodemask_scnprintf(char *buf, int len,
+ const nodemask_t *srcp, int nbits)
+{
+ return bitmap_scnprintf(buf, len, srcp->bits, nbits);
+}
+
+#define nodemask_parse(ubuf, ulen, src) \
+ __nodemask_parse((ubuf), (ulen), &(src), MAX_NUMNODES)
+static inline int __nodemask_parse(const char __user *buf, int len,
+ nodemask_t *dstp, int nbits)
+{
+ return bitmap_parse(buf, len, dstp->bits, nbits);
+}
+
+#if MAX_NUMNODES > 1
+#define for_each_node_mask(node, mask) \
+ for ((node) = first_node(mask); \
+ (node) < MAX_NUMNODES; \
+ (node) = next_node((node), (mask)))
+#else /* MAX_NUMNODES == 1 */
+#define for_each_node_mask(node, mask) \
+ if (!nodes_empty(mask)) \
+ for ((node) = 0; (node) < 1; (node)++)
+#endif /* MAX_NUMNODES */
+
+/*
+ * The following particular system nodemasks and operations
+ * on them manage all possible and online nodes.
+ */
+
+extern nodemask_t node_online_map;
+extern nodemask_t node_possible_map;
+
+#if MAX_NUMNODES > 1
+#define num_online_nodes() nodes_weight(node_online_map)
+#define num_possible_nodes() nodes_weight(node_possible_map)
+#define node_online(node) node_isset((node), node_online_map)
+#define node_possible(node) node_isset((node), node_possible_map)
+#else
+#define num_online_nodes() 1
+#define num_possible_nodes() 1
+#define node_online(node) ((node) == 0)
+#define node_possible(node) ((node) == 0)
+#endif
+
+#define any_online_node(mask) \
+({ \
+ int node; \
+ for_each_node_mask(node, (mask)) \
+ if (node_online(node)) \
+ break; \
+ node; \
+})
+
+#define node_set_online(node) set_bit((node), node_online_map.bits)
+#define node_set_offline(node) clear_bit((node), node_online_map.bits)
+
+#define for_each_node(node) for_each_node_mask((node), node_possible_map)
+#define for_each_online_node(node) for_each_node_mask((node), node_online_map)
+
+#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index e6e43ce82b55..39cca92a8d63 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -5,14 +5,14 @@
* pages. A pagevec is a multipage container which is used for that.
*/
-#define PAGEVEC_SIZE 16
+#define PAGEVEC_SIZE 15
struct page;
struct address_space;
struct pagevec {
- unsigned nr;
- int cold;
+ unsigned short nr;
+ unsigned short cold;
struct page *pages[PAGEVEC_SIZE];
};
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
new file mode 100644
index 000000000000..4e2d2a942ecb
--- /dev/null
+++ b/include/linux/pktcdvd.h
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices.
+ *
+ */
+#ifndef __PKTCDVD_H
+#define __PKTCDVD_H
+
+#include <linux/types.h>
+
+/*
+ * 1 for normal debug messages, 2 is very verbose. 0 to turn it off.
+ */
+#define PACKET_DEBUG 1
+
+#define MAX_WRITERS 8
+
+#define PKT_RB_POOL_SIZE 512
+
+/*
+ * How long we should hold a non-full packet before starting data gathering.
+ */
+#define PACKET_WAIT_TIME (HZ * 5 / 1000)
+
+/*
+ * use drive write caching -- we need deferred error handling to be
+ * able to sucessfully recover with this option (drive will return good
+ * status as soon as the cdb is validated).
+ */
+#if defined(CONFIG_CDROM_PKTCDVD_WCACHE)
+#define USE_WCACHING 1
+#else
+#define USE_WCACHING 0
+#endif
+
+/*
+ * No user-servicable parts beyond this point ->
+ */
+
+/*
+ * device types
+ */
+#define PACKET_CDR 1
+#define PACKET_CDRW 2
+#define PACKET_DVDR 3
+#define PACKET_DVDRW 4
+
+/*
+ * flags
+ */
+#define PACKET_WRITABLE 1 /* pd is writable */
+#define PACKET_NWA_VALID 2 /* next writable address valid */
+#define PACKET_LRA_VALID 3 /* last recorded address valid */
+#define PACKET_MERGE_SEGS 4 /* perform segment merging to keep */
+ /* underlying cdrom device happy */
+
+/*
+ * Disc status -- from READ_DISC_INFO
+ */
+#define PACKET_DISC_EMPTY 0
+#define PACKET_DISC_INCOMPLETE 1
+#define PACKET_DISC_COMPLETE 2
+#define PACKET_DISC_OTHER 3
+
+/*
+ * write type, and corresponding data block type
+ */
+#define PACKET_MODE1 1
+#define PACKET_MODE2 2
+#define PACKET_BLOCK_MODE1 8
+#define PACKET_BLOCK_MODE2 10
+
+/*
+ * Last session/border status
+ */
+#define PACKET_SESSION_EMPTY 0
+#define PACKET_SESSION_INCOMPLETE 1
+#define PACKET_SESSION_RESERVED 2
+#define PACKET_SESSION_COMPLETE 3
+
+#define PACKET_MCN "4a656e734178626f65323030300000"
+
+#undef PACKET_USE_LS
+
+#define PKT_CTRL_CMD_SETUP 0
+#define PKT_CTRL_CMD_TEARDOWN 1
+#define PKT_CTRL_CMD_STATUS 2
+
+struct pkt_ctrl_command {
+ __u32 command; /* in: Setup, teardown, status */
+ __u32 dev_index; /* in/out: Device index */
+ __u32 dev; /* in/out: Device nr for cdrw device */
+ __u32 pkt_dev; /* in/out: Device nr for packet device */
+ __u32 num_devices; /* out: Largest device index + 1 */
+ __u32 padding; /* Not used */
+};
+
+/*
+ * packet ioctls
+ */
+#define PACKET_IOCTL_MAGIC ('X')
+#define PACKET_CTRL_CMD _IOWR(PACKET_IOCTL_MAGIC, 1, struct pkt_ctrl_command)
+
+#ifdef __KERNEL__
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/cdrom.h>
+
+struct packet_settings
+{
+ __u8 size; /* packet size in (512 byte) sectors */
+ __u8 fp; /* fixed packets */
+ __u8 link_loss; /* the rest is specified
+ * as per Mt Fuji */
+ __u8 write_type;
+ __u8 track_mode;
+ __u8 block_mode;
+};
+
+/*
+ * Very crude stats for now
+ */
+struct packet_stats
+{
+ unsigned long pkt_started;
+ unsigned long pkt_ended;
+ unsigned long secs_w;
+ unsigned long secs_rg;
+ unsigned long secs_r;
+};
+
+struct packet_cdrw
+{
+ struct list_head pkt_free_list;
+ struct list_head pkt_active_list;
+ spinlock_t active_list_lock; /* Serialize access to pkt_active_list */
+ struct task_struct *thread;
+ atomic_t pending_bios;
+};
+
+/*
+ * Switch to high speed reading after reading this many kilobytes
+ * with no interspersed writes.
+ */
+#define HI_SPEED_SWITCH 512
+
+struct packet_iosched
+{
+ atomic_t attention; /* Set to non-zero when queue processing is needed */
+ int writing; /* Non-zero when writing, zero when reading */
+ spinlock_t lock; /* Protecting read/write queue manipulations */
+ struct bio *read_queue;
+ struct bio *read_queue_tail;
+ struct bio *write_queue;
+ struct bio *write_queue_tail;
+ int high_prio_read; /* An important read request has been queued */
+ int successive_reads;
+};
+
+/*
+ * 32 buffers of 2048 bytes
+ */
+#define PACKET_MAX_SIZE 32
+#define PAGES_PER_PACKET (PACKET_MAX_SIZE * CD_FRAMESIZE / PAGE_SIZE)
+#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
+
+enum packet_data_state {
+ PACKET_IDLE_STATE, /* Not used at the moment */
+ PACKET_WAITING_STATE, /* Waiting for more bios to arrive, so */
+ /* we don't have to do as much */
+ /* data gathering */
+ PACKET_READ_WAIT_STATE, /* Waiting for reads to fill in holes */
+ PACKET_WRITE_WAIT_STATE, /* Waiting for the write to complete */
+ PACKET_RECOVERY_STATE, /* Recover after read/write errors */
+ PACKET_FINISHED_STATE, /* After write has finished */
+
+ PACKET_NUM_STATES /* Number of possible states */
+};
+
+/*
+ * Information needed for writing a single packet
+ */
+struct pktcdvd_device;
+
+struct packet_data
+{
+ struct list_head list;
+
+ spinlock_t lock; /* Lock protecting state transitions and */
+ /* orig_bios list */
+
+ struct bio *orig_bios; /* Original bios passed to pkt_make_request */
+ struct bio *orig_bios_tail;/* that will be handled by this packet */
+ int write_size; /* Total size of all bios in the orig_bios */
+ /* list, measured in number of frames */
+
+ struct bio *w_bio; /* The bio we will send to the real CD */
+ /* device once we have all data for the */
+ /* packet we are going to write */
+ sector_t sector; /* First sector in this packet */
+ int frames; /* Number of frames in this packet */
+
+ enum packet_data_state state; /* Current state */
+ atomic_t run_sm; /* Incremented whenever the state */
+ /* machine needs to be run */
+ long sleep_time; /* Set this to non-zero to make the state */
+ /* machine run after this many jiffies. */
+
+ atomic_t io_wait; /* Number of pending IO operations */
+ atomic_t io_errors; /* Number of read/write errors during IO */
+
+ struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
+ struct page *pages[PAGES_PER_PACKET];
+
+ int cache_valid; /* If non-zero, the data for the zone defined */
+ /* by the sector variable is completely cached */
+ /* in the pages[] vector. */
+
+ int id; /* ID number for debugging */
+ struct pktcdvd_device *pd;
+};
+
+struct pkt_rb_node {
+ struct rb_node rb_node;
+ struct bio *bio;
+};
+
+struct packet_stacked_data
+{
+ struct bio *bio; /* Original read request bio */
+ struct pktcdvd_device *pd;
+};
+#define PSD_POOL_SIZE 64
+
+struct pktcdvd_device
+{
+ struct block_device *bdev; /* dev attached */
+ dev_t pkt_dev; /* our dev */
+ char name[20];
+ struct packet_settings settings;
+ struct packet_stats stats;
+ int refcnt; /* Open count */
+ int write_speed; /* current write speed, kB/s */
+ int read_speed; /* current read speed, kB/s */
+ unsigned long offset; /* start offset */
+ __u8 mode_offset; /* 0 / 8 */
+ __u8 type;
+ unsigned long flags;
+ __u16 mmc3_profile;
+ __u32 nwa; /* next writable address */
+ __u32 lra; /* last recorded address */
+ struct packet_cdrw cdrw;
+ wait_queue_head_t wqueue;
+
+ spinlock_t lock; /* Serialize access to bio_queue */
+ struct rb_root bio_queue; /* Work queue of bios we need to handle */
+ int bio_queue_size; /* Number of nodes in bio_queue */
+ sector_t current_sector; /* Keep track of where the elevator is */
+ atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
+ /* needs to be run. */
+ mempool_t *rb_pool; /* mempool for pkt_rb_node allocations */
+
+ struct packet_iosched iosched;
+ struct gendisk *disk;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* __PKTCDVD_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 7bfd2d43963e..6446e4f65e93 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -143,11 +143,6 @@ int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data);
*/
int pm_send_all(pm_request_t rqst, void *data);
-/*
- * Find a device
- */
-struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from);
-
static inline void pm_access(struct pm_dev *dev) {}
static inline void pm_dev_idle(struct pm_dev *dev) {}
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index aff9a6adb39e..fc74ef3fef36 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -79,7 +79,6 @@ extern struct posix_acl *posix_acl_from_mode(mode_t, int);
extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *);
extern int posix_acl_create_masq(struct posix_acl *, mode_t *);
extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
-extern int posix_acl_masq_nfs_mode(struct posix_acl *, mode_t *);
extern struct posix_acl *get_posix_acl(struct inode *, int);
extern int set_posix_acl(struct inode *, int, struct posix_acl *);
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 54333c98e532..edb036b43597 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -49,7 +49,6 @@
# define PR_TIMING_TIMESTAMP 1 /* Accurate timestamp based
process timing */
-
#define PR_SET_NAME 15 /* Set process name */
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 6eacc2c653f1..df62d2ac38a7 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -438,7 +438,7 @@ static inline void set_offset_v2_k_offset( struct offset_v2 *v2, loff_t offset )
/* Key of an item determines its location in the S+tree, and
is composed of 4 components */
-struct key {
+struct reiserfs_key {
__u32 k_dir_id; /* packing locality: by default parent
directory object id */
__u32 k_objectid; /* object identifier */
@@ -450,7 +450,7 @@ struct key {
struct cpu_key {
- struct key on_disk_key;
+ struct reiserfs_key on_disk_key;
int version;
int key_length; /* 3 in all cases but direct2indirect and
indirect2direct conversion */
@@ -470,7 +470,7 @@ struct cpu_key {
#define KEY_FOUND 1
#define KEY_NOT_FOUND 0
-#define KEY_SIZE (sizeof(struct key))
+#define KEY_SIZE (sizeof(struct reiserfs_key))
#define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32))
/* return values for search_by_key and clones */
@@ -503,7 +503,7 @@ struct item_head
{
/* Everything in the tree is found by searching for it based on
* its key.*/
- struct key ih_key;
+ struct reiserfs_key ih_key;
union {
/* The free space in the last unformatted node of an
indirect item if this is an indirect item. This
@@ -602,7 +602,7 @@ static inline __u32 type2uniqueness (int type)
// there is no way to get version of object from key, so, provide
// version to these defines
//
-static inline loff_t le_key_k_offset (int version, const struct key * key)
+static inline loff_t le_key_k_offset (int version, const struct reiserfs_key * key)
{
return (version == KEY_FORMAT_3_5) ?
le32_to_cpu( key->u.k_offset_v1.k_offset ) :
@@ -614,7 +614,7 @@ static inline loff_t le_ih_k_offset (const struct item_head * ih)
return le_key_k_offset (ih_version (ih), &(ih->ih_key));
}
-static inline loff_t le_key_k_type (int version, const struct key * key)
+static inline loff_t le_key_k_type (int version, const struct reiserfs_key * key)
{
return (version == KEY_FORMAT_3_5) ?
uniqueness2type( le32_to_cpu( key->u.k_offset_v1.k_uniqueness)) :
@@ -627,7 +627,7 @@ static inline loff_t le_ih_k_type (const struct item_head * ih)
}
-static inline void set_le_key_k_offset (int version, struct key * key, loff_t offset)
+static inline void set_le_key_k_offset (int version, struct reiserfs_key * key, loff_t offset)
{
(version == KEY_FORMAT_3_5) ?
(void)(key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
@@ -641,7 +641,7 @@ static inline void set_le_ih_k_offset (struct item_head * ih, loff_t offset)
}
-static inline void set_le_key_k_type (int version, struct key * key, int type)
+static inline void set_le_key_k_type (int version, struct reiserfs_key * key, int type)
{
(version == KEY_FORMAT_3_5) ?
(void)(key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
@@ -738,7 +738,7 @@ static inline void cpu_key_k_offset_dec (struct cpu_key * key)
/* object identifier for root dir */
#define REISERFS_ROOT_OBJECTID 2
#define REISERFS_ROOT_PARENT_OBJECTID 1
-extern struct key root_key;
+extern struct reiserfs_key root_key;
@@ -760,7 +760,7 @@ struct block_head {
__u16 blk_free_space; /* Block free space in bytes. */
__u16 blk_reserved;
/* dump this in v4/planA */
- struct key blk_right_delim_key; /* kept only for compatibility */
+ struct reiserfs_key blk_right_delim_key; /* kept only for compatibility */
};
#define BLKH_SIZE (sizeof(struct block_head))
@@ -1301,7 +1301,7 @@ struct path var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
#define UNFM_P_SHIFT 2
// in in-core inode key is stored on le form
-#define INODE_PKEY(inode) ((struct key *)(REISERFS_I(inode)->i_key))
+#define INODE_PKEY(inode) ((struct reiserfs_key *)(REISERFS_I(inode)->i_key))
#define MAX_UL_INT 0xffffffff
#define MAX_INT 0x7ffffff
@@ -1479,7 +1479,7 @@ struct tree_balance
int fs_gen; /* saved value of `reiserfs_generation' counter
see FILESYSTEM_CHANGED() macro in reiserfs_fs.h */
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
- struct key key; /* key pointer, to pass to block allocator or
+ struct reiserfs_key key; /* key pointer, to pass to block allocator or
another low-level subsystem */
#endif
} ;
@@ -1543,7 +1543,7 @@ struct buffer_info {
struct item_operations {
int (*bytes_number) (struct item_head * ih, int block_size);
void (*decrement_key) (struct cpu_key *);
- int (*is_left_mergeable) (struct key * ih, unsigned long bsize);
+ int (*is_left_mergeable) (struct reiserfs_key * ih, unsigned long bsize);
void (*print_item) (struct item_head *, char * item);
void (*check_item) (struct item_head *, char * item);
@@ -1594,7 +1594,7 @@ extern struct item_operations * item_ops [TYPE_ANY + 1];
#define B_N_PITEM_HEAD(bh,item_num) ( (struct item_head * )((bh)->b_data + BLKH_SIZE) + (item_num) )
/* get key */
-#define B_N_PDELIM_KEY(bh,item_num) ( (struct key * )((bh)->b_data + BLKH_SIZE) + (item_num) )
+#define B_N_PDELIM_KEY(bh,item_num) ( (struct reiserfs_key * )((bh)->b_data + BLKH_SIZE) + (item_num) )
/* get the key */
#define B_N_PKEY(bh,item_num) ( &(B_N_PITEM_HEAD(bh,item_num)->ih_key) )
@@ -1832,11 +1832,11 @@ extern void copy_item_head(struct item_head * p_v_to,
const struct item_head * p_v_from);
// first key is in cpu form, second - le
-extern int comp_keys (const struct key * le_key,
+extern int comp_keys (const struct reiserfs_key * le_key,
const struct cpu_key * cpu_key);
-extern int comp_short_keys (const struct key * le_key,
+extern int comp_short_keys (const struct reiserfs_key * le_key,
const struct cpu_key * cpu_key);
-extern void le_key2cpu_key (struct cpu_key * to, const struct key * from);
+extern void le_key2cpu_key (struct cpu_key * to, const struct reiserfs_key * from);
// both are cpu keys
extern int comp_cpu_keys (const struct cpu_key *, const struct cpu_key *);
@@ -1845,13 +1845,13 @@ extern int comp_short_cpu_keys (const struct cpu_key *,
extern void cpu_key2cpu_key (struct cpu_key *, const struct cpu_key *);
// both are in le form
-extern int comp_le_keys (const struct key *, const struct key *);
-extern int comp_short_le_keys (const struct key *, const struct key *);
+extern int comp_le_keys (const struct reiserfs_key *, const struct reiserfs_key *);
+extern int comp_short_le_keys (const struct reiserfs_key *, const struct reiserfs_key *);
//
// get key version from on disk key - kludge
//
-static inline int le_key_version (const struct key * key)
+static inline int le_key_version (const struct reiserfs_key * key)
{
int type;
@@ -1864,14 +1864,14 @@ static inline int le_key_version (const struct key * key)
}
-static inline void copy_key (struct key *to, const struct key *from)
+static inline void copy_key (struct reiserfs_key *to, const struct reiserfs_key *from)
{
memcpy (to, from, KEY_SIZE);
}
int comp_items (const struct item_head * stored_ih, const struct path * p_s_path);
-const struct key * get_rkey (const struct path * p_s_chk_path,
+const struct reiserfs_key * get_rkey (const struct path * p_s_chk_path,
const struct super_block * p_s_sb);
inline int bin_search (const void * p_v_key, const void * p_v_base,
int p_n_num, int p_n_width, int * p_n_pos);
@@ -1913,7 +1913,7 @@ int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
struct buffer_head * p_s_un_bh);
void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
- struct inode *inode, struct key * key);
+ struct inode *inode, struct reiserfs_key * key);
int reiserfs_delete_object (struct reiserfs_transaction_handle *th, struct inode * p_s_inode);
int reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
struct inode * p_s_inode, struct page *,
@@ -2131,7 +2131,7 @@ struct buffer_head * get_FEB (struct tree_balance *);
struct __reiserfs_blocknr_hint {
struct inode * inode; /* inode passed to allocator, if we allocate unf. nodes */
long block; /* file offset, in blocks */
- struct key key;
+ struct reiserfs_key key;
struct path * path; /* search path, used by allocator to deternine search_start by
* various ways */
struct reiserfs_transaction_handle * th; /* transaction handle is needed to log super blocks and
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dc3f297a726d..c8f981f108d4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -227,7 +227,7 @@ struct mm_struct {
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long rss, total_vm, locked_vm, shared_vm;
- unsigned long exec_vm, stack_vm, reserved_vm, def_flags;
+ unsigned long exec_vm, stack_vm, reserved_vm, def_flags, nr_ptes;
unsigned long saved_auxv[42]; /* for /proc/PID/auxv */
@@ -358,6 +358,11 @@ struct user_struct {
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
unsigned long locked_shm; /* How many pages of mlocked shm ? */
+#ifdef CONFIG_KEYS
+ struct key *uid_keyring; /* UID specific keyring */
+ struct key *session_keyring; /* UID's default session keyring */
+#endif
+
/* Hash table maintenance information */
struct list_head uidhash_list;
uid_t uid;
@@ -611,6 +616,11 @@ struct task_struct {
kernel_cap_t cap_effective, cap_inheritable, cap_permitted;
unsigned keep_capabilities:1;
struct user_struct *user;
+#ifdef CONFIG_KEYS
+ struct key *session_keyring; /* keyring inherited over fork */
+ struct key *process_keyring; /* keyring private to this process (CLONE_THREAD) */
+ struct key *thread_keyring; /* keyring private to this thread */
+#endif
unsigned short used_math;
char comm[16];
/* file system info */
@@ -644,7 +654,7 @@ struct task_struct {
/* Thread group tracking */
u32 parent_exec_id;
u32 self_exec_id;
-/* Protection of (de-)allocation: mm, files, fs, tty */
+/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
spinlock_t alloc_lock;
/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
spinlock_t proc_lock;
@@ -828,7 +838,6 @@ extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
extern int kill_pg_info(int, struct siginfo *, pid_t);
-extern int kill_sl_info(int, struct siginfo *, pid_t);
extern int kill_proc_info(int, struct siginfo *, pid_t);
extern void do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
@@ -977,8 +986,8 @@ static inline int thread_group_empty(task_t *p)
extern void unhash_process(struct task_struct *p);
/*
- * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm and
- * synchronises with wait4().
+ * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
+ * subscriptions and synchronises with wait4(). Also used in procfs.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
diff --git a/include/linux/security.h b/include/linux/security.h
index a1dee9a60587..ab0941c9fca7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -27,13 +27,14 @@
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/sem.h>
-#include <linux/sysctl.h>
#include <linux/shm.h>
#include <linux/msg.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
+struct ctl_table;
+
/*
* These functions are in security/capability.c and are used
* as the default capabilities functions
@@ -395,13 +396,13 @@ struct swap_info_struct;
* Return 0 if permission is granted.
* @inode_getsecurity:
* Copy the extended attribute representation of the security label
- * associated with @name for @dentry into @buffer. @buffer may be
+ * associated with @name for @inode into @buffer. @buffer may be
* NULL to request the size of the buffer required. @size indicates
* the size of @buffer in bytes. Note that @name is the remainder
* of the attribute name after the security. prefix has been removed.
* Return number of bytes used/required on success.
* @inode_setsecurity:
- * Set the security label associated with @name for @dentry from the
+ * Set the security label associated with @name for @inode from the
* extended attribute value @value. @size indicates the size of the
* @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
* Note that @name is the remainder of the attribute name after the
@@ -409,8 +410,9 @@ struct swap_info_struct;
* Return 0 on success.
* @inode_listsecurity:
* Copy the extended attribute names for the security labels
- * associated with @dentry into @buffer. @buffer may be NULL to
- * request the size of the buffer required.
+ * associated with @inode into @buffer. The maximum size of @buffer
+ * is specified by @buffer_size. @buffer may be NULL to request
+ * the size of the buffer required.
* Returns number of bytes used/required on success.
*
* Security hooks for file operations
@@ -1029,7 +1031,7 @@ struct security_operations {
kernel_cap_t * inheritable,
kernel_cap_t * permitted);
int (*acct) (struct file * file);
- int (*sysctl) (ctl_table * table, int op);
+ int (*sysctl) (struct ctl_table * table, int op);
int (*capable) (struct task_struct * tsk, int cap);
int (*quotactl) (int cmds, int type, int id, struct super_block * sb);
int (*quota_on) (struct file * f);
@@ -1108,9 +1110,9 @@ struct security_operations {
int (*inode_getxattr) (struct dentry *dentry, char *name);
int (*inode_listxattr) (struct dentry *dentry);
int (*inode_removexattr) (struct dentry *dentry, char *name);
- int (*inode_getsecurity)(struct dentry *dentry, const char *name, void *buffer, size_t size);
- int (*inode_setsecurity)(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
- int (*inode_listsecurity)(struct dentry *dentry, char *buffer);
+ int (*inode_getsecurity)(struct inode *inode, const char *name, void *buffer, size_t size);
+ int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags);
+ int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size);
int (*file_permission) (struct file * file, int mask);
int (*file_alloc_security) (struct file * file);
@@ -1268,7 +1270,7 @@ static inline int security_acct (struct file *file)
return security_ops->acct (file);
}
-static inline int security_sysctl(ctl_table * table, int op)
+static inline int security_sysctl(struct ctl_table *table, int op)
{
return security_ops->sysctl(table, op);
}
@@ -1575,19 +1577,19 @@ static inline int security_inode_removexattr (struct dentry *dentry, char *name)
return security_ops->inode_removexattr (dentry, name);
}
-static inline int security_inode_getsecurity(struct dentry *dentry, const char *name, void *buffer, size_t size)
+static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size)
{
- return security_ops->inode_getsecurity(dentry, name, buffer, size);
+ return security_ops->inode_getsecurity(inode, name, buffer, size);
}
-static inline int security_inode_setsecurity(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
- return security_ops->inode_setsecurity(dentry, name, value, size, flags);
+ return security_ops->inode_setsecurity(inode, name, value, size, flags);
}
-static inline int security_inode_listsecurity(struct dentry *dentry, char *buffer)
+static inline int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
- return security_ops->inode_listsecurity(dentry, buffer);
+ return security_ops->inode_listsecurity(inode, buffer, buffer_size);
}
static inline int security_file_permission (struct file *file, int mask)
@@ -1940,7 +1942,7 @@ static inline int security_acct (struct file *file)
return 0;
}
-static inline int security_sysctl(ctl_table * table, int op)
+static inline int security_sysctl(struct ctl_table *table, int op)
{
return 0;
}
@@ -2214,17 +2216,17 @@ static inline int security_inode_removexattr (struct dentry *dentry, char *name)
return cap_inode_removexattr(dentry, name);
}
-static inline int security_inode_getsecurity(struct dentry *dentry, const char *name, void *buffer, size_t size)
+static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size)
{
return -EOPNOTSUPP;
}
-static inline int security_inode_setsecurity(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
return -EOPNOTSUPP;
}
-static inline int security_inode_listsecurity(struct dentry *dentry, char *buffer)
+static inline int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
return 0;
}
diff --git a/include/linux/smb_mount.h b/include/linux/smb_mount.h
index 256900c55881..d10f00cb5703 100644
--- a/include/linux/smb_mount.h
+++ b/include/linux/smb_mount.h
@@ -38,7 +38,10 @@ struct smb_mount_data {
#define SMB_MOUNT_DIRATTR 0x0004 /* Use find_first for getattr */
#define SMB_MOUNT_CASE 0x0008 /* Be case sensitive */
#define SMB_MOUNT_UNICODE 0x0010 /* Server talks unicode */
-
+#define SMB_MOUNT_UID 0x0020 /* Use user specified uid */
+#define SMB_MOUNT_GID 0x0040 /* Use user specified gid */
+#define SMB_MOUNT_FMODE 0x0080 /* Use user specified file mode */
+#define SMB_MOUNT_DMODE 0x0100 /* Use user specified dir mode */
struct smb_mount_data_kernel {
int version;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2a8c7faf2dcc..bc93606badbc 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -61,6 +61,7 @@ struct mq_attr;
#include <asm/siginfo.h>
#include <asm/signal.h>
#include <linux/quota.h>
+#include <linux/key.h>
asmlinkage long sys_time(int __user *tloc);
asmlinkage long sys_stime(time_t __user *tptr);
@@ -492,4 +493,18 @@ asmlinkage long sys_uselib(const char __user *library);
asmlinkage long sys_setaltroot(const char __user *altroot);
asmlinkage long sys_ni_syscall(void);
+asmlinkage long sys_add_key(const char __user *_type,
+ const char __user *_description,
+ const void __user *_payload,
+ size_t plen,
+ key_serial_t destringid);
+
+asmlinkage long sys_request_key(const char __user *_type,
+ const char __user *_description,
+ const char __user *_callout_info,
+ key_serial_t destringid);
+
+asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+
#endif
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 047e28bd7d8e..4243c55cce87 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -30,6 +30,6 @@
/*
* A maximum of 4 million PIDs should be enough for a while:
*/
-#define PID_MAX_LIMIT (4*1024*1024)
+#define PID_MAX_LIMIT (sizeof(long) > 4 ? 4*1024*1024 : PID_MAX_DEFAULT)
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index 2111941c1af7..ae174b8ab036 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -1,9 +1,12 @@
#ifndef _LINUX_TIME_H
#define _LINUX_TIME_H
-#include <asm/param.h>
#include <linux/types.h>
+#ifdef __KERNEL__
+#include <linux/seqlock.h>
+#endif
+
#ifndef _STRUCT_TIMESPEC
#define _STRUCT_TIMESPEC
struct timespec {
@@ -24,39 +27,6 @@ struct timezone {
#ifdef __KERNEL__
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <linux/timex.h>
-#include <asm/div64.h>
-#ifndef div_long_long_rem
-
-#define div_long_long_rem(dividend,divisor,remainder) ({ \
- u64 result = dividend; \
- *remainder = do_div(result,divisor); \
- result; })
-
-#endif
-
-/*
- * Have the 32 bit jiffies value wrap 5 minutes after boot
- * so jiffies wrap bugs show up earlier.
- */
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
-
-/*
- * Change timeval to jiffies, trying to avoid the
- * most obvious overflows..
- *
- * And some not so obvious.
- *
- * Note that we don't want to return MAX_LONG, because
- * for various timeout reasons we often end up having
- * to wait "jiffies+1" in order to guarantee that we wait
- * at _least_ "jiffies" - so "jiffies+1" had better still
- * be positive.
- */
-#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
-
/* Parameters used to convert the timespec values */
#ifndef USEC_PER_SEC
#define USEC_PER_SEC (1000000L)
@@ -70,232 +40,6 @@ struct timezone {
#define NSEC_PER_USEC (1000L)
#endif
-/*
- * We want to do realistic conversions of time so we need to use the same
- * values the update wall clock code uses as the jiffies size. This value
- * is: TICK_NSEC (which is defined in timex.h). This
- * is a constant and is in nanoseconds. We will used scaled math
- * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
- * NSEC_JIFFIE_SC. Note that these defines contain nothing but
- * constants and so are computed at compile time. SHIFT_HZ (computed in
- * timex.h) adjusts the scaling for different HZ values.
-
- * Scaled math??? What is that?
- *
- * Scaled math is a way to do integer math on values that would,
- * otherwise, either overflow, underflow, or cause undesired div
- * instructions to appear in the execution path. In short, we "scale"
- * up the operands so they take more bits (more precision, less
- * underflow), do the desired operation and then "scale" the result back
- * by the same amount. If we do the scaling by shifting we avoid the
- * costly mpy and the dastardly div instructions.
-
- * Suppose, for example, we want to convert from seconds to jiffies
- * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The
- * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
- * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
- * might calculate at compile time, however, the result will only have
- * about 3-4 bits of precision (less for smaller values of HZ).
- *
- * So, we scale as follows:
- * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
- * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
- * Then we make SCALE a power of two so:
- * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
- * Now we define:
- * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
- * jiff = (sec * SEC_CONV) >> SCALE;
- *
- * Often the math we use will expand beyond 32-bits so we tell C how to
- * do this and pass the 64-bit result of the mpy through the ">> SCALE"
- * which should take the result back to 32-bits. We want this expansion
- * to capture as much precision as possible. At the same time we don't
- * want to overflow so we pick the SCALE to avoid this. In this file,
- * that means using a different scale for each range of HZ values (as
- * defined in timex.h).
- *
- * For those who want to know, gcc will give a 64-bit result from a "*"
- * operator if the result is a long long AND at least one of the
- * operands is cast to long long (usually just prior to the "*" so as
- * not to confuse it into thinking it really has a 64-bit operand,
- * which, buy the way, it can do, but it take more code and at least 2
- * mpys).
-
- * We also need to be aware that one second in nanoseconds is only a
- * couple of bits away from overflowing a 32-bit word, so we MUST use
- * 64-bits to get the full range time in nanoseconds.
-
- */
-
-/*
- * Here are the scales we will use. One for seconds, nanoseconds and
- * microseconds.
- *
- * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
- * check if the sign bit is set. If not, we bump the shift count by 1.
- * (Gets an extra bit of precision where we can use it.)
- * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
- * Haven't tested others.
-
- * Limits of cpp (for #if expressions) only long (no long long), but
- * then we only need the most signicant bit.
- */
-
-#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
-#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
-#undef SEC_JIFFIE_SC
-#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
-#endif
-#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
-#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-
-#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION \
- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion. See there
- * for more details. It is the scaled resolution rounding value. Note
- * that it is a 64-bit value. Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
-/*
- * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
- * into seconds. The 64-bit case will overflow if we are not careful,
- * so use the messy SH_DIV macro to do it. Still all constants.
- */
-#if BITS_PER_LONG < 64
-# define MAX_SEC_IN_JIFFIES \
- (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
-#else /* take care of overflow on 64 bits machines */
-# define MAX_SEC_IN_JIFFIES \
- (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
-
-#endif
-
-/*
- * Convert jiffies to milliseconds and back.
- *
- * Avoid unnecessary multiplications/divisions in the
- * two most common HZ cases:
- */
-static inline unsigned int jiffies_to_msecs(const unsigned long j)
-{
-#if HZ <= 1000 && !(1000 % HZ)
- return (1000 / HZ) * j;
-#elif HZ > 1000 && !(HZ % 1000)
- return (j + (HZ / 1000) - 1)/(HZ / 1000);
-#else
- return (j * 1000) / HZ;
-#endif
-}
-
-static inline unsigned int jiffies_to_usecs(const unsigned long j)
-{
-#if HZ <= 1000 && !(1000 % HZ)
- return (1000000 / HZ) * j;
-#elif HZ > 1000 && !(HZ % 1000)
- return (j*1000 + (HZ - 1000))/(HZ / 1000);
-#else
- return (j * 1000000) / HZ;
-#endif
-}
-
-static inline unsigned long msecs_to_jiffies(const unsigned int m)
-{
- if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= 1000 && !(1000 % HZ)
- return (m + (1000 / HZ) - 1) / (1000 / HZ);
-#elif HZ > 1000 && !(HZ % 1000)
- return m * (HZ / 1000);
-#else
- return (m * HZ + 999) / 1000;
-#endif
-}
-
-/*
- * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
- * that a remainder subtract here would not do the right thing as the
- * resolution values don't fall on second boundries. I.e. the line:
- * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
- *
- * Rather, we just shift the bits off the right.
- *
- * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
- * value to a scaled second value.
- */
-static __inline__ unsigned long
-timespec_to_jiffies(const struct timespec *value)
-{
- unsigned long sec = value->tv_sec;
- long nsec = value->tv_nsec + TICK_NSEC - 1;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- nsec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)nsec * NSEC_CONVERSION) >>
- (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-
-}
-
-static __inline__ void
-jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
-}
-
-/* Same for "timeval"
- *
- * Well, almost. The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
- */
-static __inline__ unsigned long
-timeval_to_jiffies(const struct timeval *value)
-{
- unsigned long sec = value->tv_sec;
- long usec = value->tv_usec;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- usec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-}
-
-static __inline__ void
-jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
- value->tv_usec /= NSEC_PER_USEC;
-}
-
static __inline__ int timespec_equal(struct timespec *a, struct timespec *b)
{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
@@ -347,11 +91,6 @@ struct timespec current_kernel_time(void);
#define CURRENT_TIME (current_kernel_time())
-#endif /* __KERNEL__ */
-
-#define NFDBITS __NFDBITS
-
-#ifdef __KERNEL__
extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday(struct timespec *tv);
extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
@@ -378,7 +117,10 @@ set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
-#endif
+
+#endif /* __KERNEL__ */
+
+#define NFDBITS __NFDBITS
#define FD_SETSIZE __FD_SETSIZE
#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp)
diff --git a/include/linux/times.h b/include/linux/times.h
index 0c5aa078dad4..e2d3020742a6 100644
--- a/include/linux/times.h
+++ b/include/linux/times.h
@@ -1,79 +1,7 @@
#ifndef _LINUX_TIMES_H
#define _LINUX_TIMES_H
-#ifdef __KERNEL__
-#include <linux/timex.h>
-#include <asm/div64.h>
-#include <asm/types.h>
-#include <asm/param.h>
-
-static inline clock_t jiffies_to_clock_t(long x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
- return x / (HZ / USER_HZ);
-#else
- u64 tmp = (u64)x * TICK_NSEC;
- do_div(tmp, (NSEC_PER_SEC / USER_HZ));
- return (long)tmp;
-#endif
-}
-
-static inline unsigned long clock_t_to_jiffies(unsigned long x)
-{
-#if (HZ % USER_HZ)==0
- if (x >= ~0UL / (HZ / USER_HZ))
- return ~0UL;
- return x * (HZ / USER_HZ);
-#else
- u64 jif;
-
- /* Don't worry about loss of precision here .. */
- if (x >= ~0UL / HZ * USER_HZ)
- return ~0UL;
-
- /* .. but do try to contain it here */
- jif = x * (u64) HZ;
- do_div(jif, USER_HZ);
- return jif;
-#endif
-}
-
-static inline u64 jiffies_64_to_clock_t(u64 x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
- do_div(x, HZ / USER_HZ);
-#else
- /*
- * There are better ways that don't overflow early,
- * but even this doesn't overflow in hundreds of years
- * in 64 bits, so..
- */
- x *= TICK_NSEC;
- do_div(x, (NSEC_PER_SEC / USER_HZ));
-#endif
- return x;
-}
-#endif
-
-static inline u64 nsec_to_clock_t(u64 x)
-{
-#if (NSEC_PER_SEC % USER_HZ) == 0
- do_div(x, (NSEC_PER_SEC / USER_HZ));
-#elif (USER_HZ % 512) == 0
- x *= USER_HZ/512;
- do_div(x, (NSEC_PER_SEC / 512));
-#else
- /*
- * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
- * overflow after 64.99 years.
- * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
- */
- x *= 9;
- do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
- / USER_HZ));
-#endif
- return x;
-}
+#include <linux/types.h>
struct tms {
clock_t tms_utime;
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 31ef4595b1fd..438645f2fedc 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -55,33 +55,10 @@
#include <linux/config.h>
#include <linux/compiler.h>
+#include <linux/time.h>
#include <asm/param.h>
-
-/*
- * The following defines establish the engineering parameters of the PLL
- * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
- * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
- * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
- * nearest power of two in order to avoid hardware multiply operations.
- */
-#if HZ >= 12 && HZ < 24
-# define SHIFT_HZ 4
-#elif HZ >= 24 && HZ < 48
-# define SHIFT_HZ 5
-#elif HZ >= 48 && HZ < 96
-# define SHIFT_HZ 6
-#elif HZ >= 96 && HZ < 192
-# define SHIFT_HZ 7
-#elif HZ >= 192 && HZ < 384
-# define SHIFT_HZ 8
-#elif HZ >= 384 && HZ < 768
-# define SHIFT_HZ 9
-#elif HZ >= 768 && HZ < 1536
-# define SHIFT_HZ 10
-#else
-# error You lose.
-#endif
+#include <asm/timex.h>
/*
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
@@ -152,41 +129,6 @@
#define MAXGLITCH 30 /* pps signal glitch max (s) */
/*
- * Pick up the architecture specific timex specifications
- */
-#include <asm/timex.h>
-
-/* LATCH is used in the interval timer and ftape setup. */
-#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-
-/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
- * improve accuracy by shifting LSH bits, hence calculating:
- * (NOM << LSH) / DEN
- * This however means trouble for large NOM, because (NOM << LSH) may no
- * longer fit in 32 bits. The following way of calculating this gives us
- * some slack, under the following conditions:
- * - (NOM / DEN) fits in (32 - LSH) bits.
- * - (NOM % DEN) fits in (32 - LSH) bits.
- */
-#define SH_DIV(NOM,DEN,LSH) ( ((NOM / DEN) << LSH) \
- + (((NOM % DEN) << LSH) + DEN / 2) / DEN)
-
-/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
-#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
-
-/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
-#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
-
-/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
-#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-
-/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
-/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
-#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
-
-
-#include <linux/time.h>
-/*
* syscall interface - used (mainly by NTP daemon)
* to discipline kernel clock oscillator
*/
diff --git a/include/linux/types.h b/include/linux/types.h
index 13ccdf3036fd..893c4b367bae 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -140,6 +140,13 @@ typedef unsigned long sector_t;
#define pgoff_t unsigned long
#endif
+#endif /* __KERNEL_STRICT_NAMES */
+
+/*
+ * Below are truly Linux-specific types that should never collide with
+ * any application/library that wants linux/types.h.
+ */
+
#ifdef __CHECKER__
#define __bitwise __attribute__((bitwise))
#else
@@ -153,13 +160,6 @@ typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
-#endif /* __KERNEL_STRICT_NAMES */
-
-/*
- * Below are truly Linux-specific types that should never collide with
- * any application/library that wants linux/types.h.
- */
-
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a43c95a016d7..18ee0751a32b 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -61,6 +61,13 @@ struct usb_host_interface {
int extralen;
};
+enum usb_interface_condition {
+ USB_INTERFACE_UNBOUND = 0,
+ USB_INTERFACE_BINDING,
+ USB_INTERFACE_BOUND,
+ USB_INTERFACE_UNBINDING,
+};
+
/**
* struct usb_interface - what usb device drivers talk to
* @altsetting: array of interface structures, one for each alternate
@@ -75,6 +82,8 @@ struct usb_host_interface {
* be unused. The driver should set this value in the probe()
* function of the driver, after it has been assigned a minor
* number from the USB core by calling usb_register_dev().
+ * @condition: binding state of the interface: not bound, binding
+ * (in probe()), bound to a driver, or unbinding (in disconnect())
* @dev: driver model's view of this device
* @class_dev: driver model's class view of this device.
*
@@ -113,6 +122,7 @@ struct usb_interface {
unsigned num_altsetting; /* number of alternate settings */
int minor; /* minor number this interface is bound to */
+ enum usb_interface_condition condition; /* state of binding */
struct device dev; /* interface specific device info */
struct class_device *class_dev;
};
@@ -264,7 +274,6 @@ struct usb_bus {
int bandwidth_isoc_reqs; /* number of Isoc. requests */
struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */
- struct dentry *usbdevfs_dentry; /* usbdevfs dentry entry for the bus */
struct class_device class_dev; /* class device for this bus */
void (*release)(struct usb_bus *bus); /* function to destroy this bus's memory */
@@ -282,6 +291,14 @@ struct usb_bus {
struct usb_tt;
+/*
+ * struct usb_device - kernel's representation of a USB device
+ *
+ * FIXME: Write the kerneldoc!
+ *
+ * Usbcore drivers should not set usbdev->state directly. Instead use
+ * usb_set_device_state().
+ */
struct usb_device {
int devnum; /* Address on USB bus */
char devpath [16]; /* Use in messages: /port/port/... */
@@ -315,7 +332,6 @@ struct usb_device {
struct list_head filelist;
struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */
- struct dentry *usbdevfs_dentry; /* usbdevfs dentry entry for the device */
/*
* Child devices - these can be either new devices
@@ -333,9 +349,14 @@ struct usb_device {
extern struct usb_device *usb_get_dev(struct usb_device *dev);
extern void usb_put_dev(struct usb_device *dev);
-/* mostly for devices emulating SCSI over USB */
+extern void usb_lock_device(struct usb_device *udev);
+extern int usb_trylock_device(struct usb_device *udev);
+extern int usb_lock_device_for_reset(struct usb_device *udev,
+ struct usb_interface *iface);
+extern void usb_unlock_device(struct usb_device *udev);
+
+/* USB port reset for device reinitialization */
extern int usb_reset_device(struct usb_device *dev);
-extern int __usb_reset_device(struct usb_device *dev);
extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index 78f47434b757..af49afaf7bb4 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -166,16 +166,6 @@ struct dev_state {
unsigned long ifclaimed;
};
-/* internal methods & data */
-extern struct usb_driver usbdevfs_driver;
-extern struct file_operations usbdevfs_drivers_fops;
-extern struct file_operations usbdevfs_devices_fops;
-extern struct file_operations usbdevfs_device_file_operations;
-extern struct inode_operations usbdevfs_device_inode_operations;
-extern struct inode_operations usbdevfs_bus_inode_operations;
-extern struct file_operations usbdevfs_bus_file_operations;
-extern void usbdevfs_conn_disc_event(void);
-
#endif /* __KERNEL__ */
/* --------------------------------------------------------------------- */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 21cd4df67b24..8b3a2b86d92a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -24,6 +24,7 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <asm/system.h>
+#include <asm/current.h>
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
@@ -37,6 +38,16 @@ struct __wait_queue {
struct list_head task_list;
};
+struct wait_bit_key {
+ void *flags;
+ int bit_nr;
+};
+
+struct wait_bit_queue {
+ struct wait_bit_key key;
+ wait_queue_t wait;
+};
+
struct __wait_queue_head {
spinlock_t lock;
struct list_head task_list;
@@ -63,6 +74,9 @@ typedef struct __wait_queue_head wait_queue_head_t;
#define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
+#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
+ { .flags = word, .bit_nr = bit, }
+
static inline void init_waitqueue_head(wait_queue_head_t *q)
{
q->lock = SPIN_LOCK_UNLOCKED;
@@ -125,11 +139,17 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
+int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
+int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
+void FASTCALL(wake_up_bit(void *, int));
+int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
+int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
+wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
-#define wake_up_all_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0)
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
@@ -300,6 +320,7 @@ void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
wait_queue_t *wait, int state));
void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
#define DEFINE_WAIT(name) \
wait_queue_t name = { \
@@ -310,12 +331,69 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
}, \
}
+#define DEFINE_WAIT_BIT(name, word, bit) \
+ struct wait_bit_queue name = { \
+ .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
+ .wait = { \
+ .task = current, \
+ .func = wake_bit_function, \
+ .task_list = \
+ LIST_HEAD_INIT(name.wait.task_list), \
+ }, \
+ }
+
#define init_wait(wait) \
do { \
wait->task = current; \
wait->func = autoremove_wake_function; \
INIT_LIST_HEAD(&wait->task_list); \
} while (0)
+
+/**
+ * wait_on_bit - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit.
+ * For instance, if one were to have waiters on a bitflag, one would
+ * call wait_on_bit() in threads waiting for the bit to clear.
+ * One uses wait_on_bit() where one is waiting for the bit to clear,
+ * but has no intention of setting it.
+ */
+static inline int wait_on_bit(void *word, int bit,
+ int (*action)(void *), unsigned mode)
+{
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit(word, bit, action, mode);
+}
+
+/**
+ * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit
+ * when one intends to set it, for instance, trying to lock bitflags.
+ * For instance, if one were to have waiters trying to set bitflag
+ * and waiting for it to clear before setting it, one would call
+ * wait_on_bit() in threads waiting to be able to set the bit.
+ * One uses wait_on_bit_lock() where one is waiting for the bit to
+ * clear with the intention of setting it, and when done, clearing it.
+ */
+static inline int wait_on_bit_lock(void *word, int bit,
+ int (*action)(void *), unsigned mode)
+{
+ if (!test_and_set_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_lock(word, bit, action, mode);
+}
#endif /* __KERNEL__ */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 7c165c334be5..1c9994fe2acc 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -68,7 +68,7 @@ struct writeback_control {
*/
void writeback_inodes(struct writeback_control *wbc);
void wake_up_inode(struct inode *inode);
-void __wait_on_inode(struct inode * inode);
+int inode_wait(void *);
void sync_inodes_sb(struct super_block *, int wait);
void sync_inodes(int wait);
@@ -76,8 +76,8 @@ void sync_inodes(int wait);
static inline void wait_on_inode(struct inode *inode)
{
might_sleep();
- if (inode->i_state & I_LOCK)
- __wait_on_inode(inode);
+ wait_on_bit(&inode->i_state, __I_LOCK, inode_wait,
+ TASK_UNINTERRUPTIBLE);
}
/*
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d9c5d5c83d49..23f9c61d9546 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -5,6 +5,7 @@
Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
+ Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*/
#ifndef _LINUX_XATTR_H
#define _LINUX_XATTR_H
@@ -14,4 +15,19 @@
#define XATTR_SECURITY_PREFIX "security."
+struct xattr_handler {
+ char *prefix;
+ size_t (*list)(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len);
+ int (*get)(struct inode *inode, const char *name, void *buffer,
+ size_t size);
+ int (*set)(struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags);
+};
+
+ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
+ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
+int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
+int generic_removexattr(struct dentry *dentry, const char *name);
+
#endif /* _LINUX_XATTR_H */
diff --git a/include/video/radeon.h b/include/video/radeon.h
index 664f758a746a..91ba041acab6 100644
--- a/include/video/radeon.h
+++ b/include/video/radeon.h
@@ -619,8 +619,7 @@
#define LVDS_BLON (1 << 19)
#define LVDS_SEL_CRTC2 (1 << 23)
#define LVDS_STATE_MASK \
- (LVDS_ON | LVDS_DISPLAY_DIS | LVDS_BL_MOD_LEVEL_MASK | \
- LVDS_EN | LVDS_DIGON | LVDS_BLON)
+ (LVDS_ON | LVDS_DISPLAY_DIS | LVDS_BL_MOD_LEVEL_MASK | LVDS_BLON)
/* LVDS_PLL_CNTL bit constatns */
#define HSYNC_DELAY_SHIFT 0x1c
diff --git a/kernel/Makefile b/kernel/Makefile
index c4337751cee7..abab504f01e1 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,7 +7,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o intermodule.o extable.o params.o posix-timers.o \
- kthread.o
+ kthread.o wait.o kfifo.o
obj-$(CONFIG_FUTEX) += futex.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
diff --git a/kernel/exit.c b/kernel/exit.c
index 55d853392524..a8ae81ed1d41 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -14,6 +14,7 @@
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/namespace.h>
+#include <linux/key.h>
#include <linux/security.h>
#include <linux/cpu.h>
#include <linux/acct.h>
@@ -511,8 +512,6 @@ void exit_mm(struct task_struct *tsk)
__exit_mm(tsk);
}
-EXPORT_SYMBOL(exit_mm);
-
static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper)
{
/*
@@ -816,6 +815,7 @@ asmlinkage NORET_TYPE void do_exit(long code)
__exit_fs(tsk);
exit_namespace(tsk);
exit_thread();
+ exit_keys(tsk);
if (tsk->signal->leader)
disassociate_ctty(1);
diff --git a/kernel/fork.c b/kernel/fork.c
index 3020dccc548f..96714c501cc8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -24,6 +24,7 @@
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
+#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/fs.h>
@@ -100,131 +101,6 @@ void __put_task_struct(struct task_struct *tsk)
free_task(tsk);
}
-void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
-{
- unsigned long flags;
-
- wait->flags &= ~WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
- __add_wait_queue(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(add_wait_queue);
-
-void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
-{
- unsigned long flags;
-
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
- __add_wait_queue_tail(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(add_wait_queue_exclusive);
-
-void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&q->lock, flags);
- __remove_wait_queue(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(remove_wait_queue);
-
-
-/*
- * Note: we use "set_current_state()" _after_ the wait-queue add,
- * because we need a memory barrier there on SMP, so that any
- * wake-function that tests for the wait-queue being active
- * will be guaranteed to see waitqueue addition _or_ subsequent
- * tests in this thread will see the wakeup having taken place.
- *
- * The spin_unlock() itself is semi-permeable and only protects
- * one way (it only protects stuff inside the critical region and
- * stops them from bleeding out - it would still allow subsequent
- * loads to move into the the critical region).
- */
-void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
-{
- unsigned long flags;
-
- wait->flags &= ~WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
- if (list_empty(&wait->task_list))
- __add_wait_queue(q, wait);
- /*
- * don't alter the task state if this is just going to
- * queue an async wait queue callback
- */
- if (is_sync_wait(wait))
- set_current_state(state);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(prepare_to_wait);
-
-void fastcall
-prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
-{
- unsigned long flags;
-
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
- if (list_empty(&wait->task_list))
- __add_wait_queue_tail(q, wait);
- /*
- * don't alter the task state if this is just going to
- * queue an async wait queue callback
- */
- if (is_sync_wait(wait))
- set_current_state(state);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-EXPORT_SYMBOL(prepare_to_wait_exclusive);
-
-void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
-{
- unsigned long flags;
-
- __set_current_state(TASK_RUNNING);
- /*
- * We can check for list emptiness outside the lock
- * IFF:
- * - we use the "careful" check that verifies both
- * the next and prev pointers, so that there cannot
- * be any half-pending updates in progress on other
- * CPU's that we haven't seen yet (and that might
- * still change the stack area.
- * and
- * - all other users take the lock (ie we can only
- * have _one_ other CPU that looks at or modifies
- * the list).
- */
- if (!list_empty_careful(&wait->task_list)) {
- spin_lock_irqsave(&q->lock, flags);
- list_del_init(&wait->task_list);
- spin_unlock_irqrestore(&q->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(finish_wait);
-
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
-{
- int ret = default_wake_function(wait, mode, sync, key);
-
- if (ret)
- list_del_init(&wait->task_list);
- return ret;
-}
-
-EXPORT_SYMBOL(autoremove_wake_function);
-
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -426,6 +302,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
mm->core_waiters = 0;
+ mm->nr_ptes = 0;
mm->page_table_lock = SPIN_LOCK_UNLOCKED;
mm->ioctx_list_lock = RW_LOCK_UNLOCKED;
mm->ioctx_list = NULL;
@@ -1019,6 +896,10 @@ static task_t *copy_process(unsigned long clone_flags,
}
#endif
+ p->tgid = p->pid;
+ if (clone_flags & CLONE_THREAD)
+ p->tgid = current->tgid;
+
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
@@ -1036,8 +917,10 @@ static task_t *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
- if ((retval = copy_namespace(clone_flags, p)))
+ if ((retval = copy_keys(clone_flags, p)))
goto bad_fork_cleanup_mm;
+ if ((retval = copy_namespace(clone_flags, p)))
+ goto bad_fork_cleanup_keys;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_namespace;
@@ -1071,7 +954,6 @@ static task_t *copy_process(unsigned long clone_flags,
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
- p->tgid = p->pid;
p->group_leader = p;
INIT_LIST_HEAD(&p->ptrace_children);
INIT_LIST_HEAD(&p->ptrace_list);
@@ -1119,7 +1001,6 @@ static task_t *copy_process(unsigned long clone_flags,
retval = -EAGAIN;
goto bad_fork_cleanup_namespace;
}
- p->tgid = current->tgid;
p->group_leader = current->group_leader;
if (current->signal->group_stop_count > 0) {
@@ -1159,6 +1040,8 @@ fork_out:
bad_fork_cleanup_namespace:
exit_namespace(p);
+bad_fork_cleanup_keys:
+ exit_keys(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 74ba3cb21809..8f3c6c1d1ce7 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -4,7 +4,12 @@
* Rewritten and vastly simplified by Rusty Russell for in-kernel
* module loader:
* Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
- * Stem compression by Andi Kleen.
+ *
+ * ChangeLog:
+ *
+ * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
+ * Changed the compression method from stem compression to "table lookup"
+ * compression (see scripts/kallsyms.c for a more complete description)
*/
#include <linux/kallsyms.h>
#include <linux/module.h>
@@ -17,7 +22,12 @@
/* These will be re-linked against their real values during the second link stage */
extern unsigned long kallsyms_addresses[] __attribute__((weak));
extern unsigned long kallsyms_num_syms __attribute__((weak));
-extern char kallsyms_names[] __attribute__((weak));
+extern u8 kallsyms_names[] __attribute__((weak));
+
+extern u8 kallsyms_token_table[] __attribute__((weak));
+extern u16 kallsyms_token_index[] __attribute__((weak));
+
+extern unsigned long kallsyms_markers[] __attribute__((weak));
/* Defined by the linker script. */
extern char _stext[], _etext[], _sinittext[], _einittext[];
@@ -37,21 +47,88 @@ static inline int is_kernel_text(unsigned long addr)
return 0;
}
+/* expand a compressed symbol data into the resulting uncompressed string,
+ given the offset to where the symbol is in the compressed stream */
+static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
+{
+ int len, skipped_first = 0;
+ u8 *tptr, *data;
+
+ /* get the compressed symbol length from the first symbol byte */
+ data = &kallsyms_names[off];
+ len = *data;
+ data++;
+
+ /* update the offset to return the offset for the next symbol on
+ * the compressed stream */
+ off += len + 1;
+
+ /* for every byte on the compressed symbol data, copy the table
+ entry for that byte */
+ while(len) {
+ tptr = &kallsyms_token_table[ kallsyms_token_index[*data] ];
+ data++;
+ len--;
+
+ while (*tptr) {
+ if(skipped_first) {
+ *result = *tptr;
+ result++;
+ } else
+ skipped_first = 1;
+ tptr++;
+ }
+ }
+
+ *result = '\0';
+
+ /* return to offset to the next symbol */
+ return off;
+}
+
+/* get symbol type information. This is encoded as a single char at the
+ * begining of the symbol name */
+static char kallsyms_get_symbol_type(unsigned int off)
+{
+ /* get just the first code, look it up in the token table, and return the
+ * first char from this token */
+ return kallsyms_token_table[ kallsyms_token_index[ kallsyms_names[off+1] ] ];
+}
+
+
+/* find the offset on the compressed stream given and index in the
+ * kallsyms array */
+static unsigned int get_symbol_offset(unsigned long pos)
+{
+ u8 *name;
+ int i;
+
+ /* use the closest marker we have. We have markers every 256 positions,
+ * so that should be close enough */
+ name = &kallsyms_names[ kallsyms_markers[pos>>8] ];
+
+ /* sequentially scan all the symbols up to the point we're searching for.
+ * Every symbol is stored in a [<len>][<len> bytes of data] format, so we
+ * just need to add the len to the current pointer for every symbol we
+ * wish to skip */
+ for(i = 0; i < (pos&0xFF); i++)
+ name = name + (*name) + 1;
+
+ return name - kallsyms_names;
+}
+
/* Lookup the address for this symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name)
{
char namebuf[KSYM_NAME_LEN+1];
unsigned long i;
- char *knames;
+ unsigned int off;
- for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
- unsigned prefix = *knames++;
+ for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
+ off = kallsyms_expand_symbol(off, namebuf);
- strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
if (strcmp(namebuf, name) == 0)
return kallsyms_addresses[i];
-
- knames += strlen(knames) + 1;
}
return module_kallsyms_lookup_name(name);
}
@@ -62,7 +139,7 @@ const char *kallsyms_lookup(unsigned long addr,
unsigned long *offset,
char **modname, char *namebuf)
{
- unsigned long i, best = 0;
+ unsigned long i, low, high, mid;
/* This kernel should never had been booted. */
BUG_ON(!kallsyms_addresses);
@@ -71,40 +148,45 @@ const char *kallsyms_lookup(unsigned long addr,
namebuf[0] = 0;
if (is_kernel_text(addr) || is_kernel_inittext(addr)) {
- unsigned long symbol_end;
- char *name = kallsyms_names;
-
- /* They're sorted, we could be clever here, but who cares? */
- for (i = 0; i < kallsyms_num_syms; i++) {
- if (kallsyms_addresses[i] > kallsyms_addresses[best] &&
- kallsyms_addresses[i] <= addr)
- best = i;
- }
+ unsigned long symbol_end=0;
- /* Grab name */
- for (i = 0; i <= best; i++) {
- unsigned prefix = *name++;
- strncpy(namebuf + prefix, name, KSYM_NAME_LEN - prefix);
- name += strlen(name) + 1;
+ /* do a binary search on the sorted kallsyms_addresses array */
+ low = 0;
+ high = kallsyms_num_syms;
+
+ while (high-low > 1) {
+ mid = (low + high) / 2;
+ if (kallsyms_addresses[mid] <= addr) low = mid;
+ else high = mid;
}
- /* At worst, symbol ends at end of section. */
- if (is_kernel_inittext(addr))
- symbol_end = (unsigned long)_einittext;
- else
- symbol_end = (unsigned long)_etext;
+ /* search for the first aliased symbol. Aliased symbols are
+ symbols with the same address */
+ while (low && kallsyms_addresses[low - 1] == kallsyms_addresses[low])
+ --low;
+
+ /* Grab name */
+ kallsyms_expand_symbol(get_symbol_offset(low), namebuf);
/* Search for next non-aliased symbol */
- for (i = best+1; i < kallsyms_num_syms; i++) {
- if (kallsyms_addresses[i] > kallsyms_addresses[best]) {
+ for (i = low + 1; i < kallsyms_num_syms; i++) {
+ if (kallsyms_addresses[i] > kallsyms_addresses[low]) {
symbol_end = kallsyms_addresses[i];
break;
}
}
- *symbolsize = symbol_end - kallsyms_addresses[best];
+ /* if we found no next symbol, we use the end of the section */
+ if (!symbol_end) {
+ if (is_kernel_inittext(addr))
+ symbol_end = (unsigned long)_einittext;
+ else
+ symbol_end = (unsigned long)_etext;
+ }
+
+ *symbolsize = symbol_end - kallsyms_addresses[low];
*modname = NULL;
- *offset = addr - kallsyms_addresses[best];
+ *offset = addr - kallsyms_addresses[low];
return namebuf;
}
@@ -135,7 +217,7 @@ void __print_symbol(const char *fmt, unsigned long address)
printk(fmt, buffer);
}
-/* To avoid O(n^2) iteration, we carry prefix along. */
+/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
struct kallsym_iter
{
loff_t pos;
@@ -168,31 +250,23 @@ static int get_ksymbol_mod(struct kallsym_iter *iter)
/* Returns space to next name. */
static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
{
- unsigned stemlen, off = iter->nameoff;
-
- /* First char of each symbol name indicates prefix length
- shared with previous name (stem compression). */
- stemlen = kallsyms_names[off++];
+ unsigned off = iter->nameoff;
- strlcpy(iter->name+stemlen, kallsyms_names + off,
- KSYM_NAME_LEN+1-stemlen);
- off += strlen(kallsyms_names + off) + 1;
iter->owner = NULL;
iter->value = kallsyms_addresses[iter->pos];
- if (is_kernel_text(iter->value) || is_kernel_inittext(iter->value))
- iter->type = 't';
- else
- iter->type = 'd';
- upcase_if_global(iter);
+ iter->type = kallsyms_get_symbol_type(off);
+
+ off = kallsyms_expand_symbol(off, iter->name);
+
return off - iter->nameoff;
}
-static void reset_iter(struct kallsym_iter *iter)
+static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
{
iter->name[0] = '\0';
- iter->nameoff = 0;
- iter->pos = 0;
+ iter->nameoff = get_symbol_offset(new_pos);
+ iter->pos = new_pos;
}
/* Returns false if pos at or past end of file. */
@@ -204,16 +278,13 @@ static int update_iter(struct kallsym_iter *iter, loff_t pos)
return get_ksymbol_mod(iter);
}
- /* If we're past the desired position, reset to start. */
- if (pos < iter->pos)
- reset_iter(iter);
-
- /* We need to iterate through the previous symbols: can be slow */
- for (; iter->pos != pos; iter->pos++) {
- iter->nameoff += get_ksymbol_core(iter);
- cond_resched();
- }
- get_ksymbol_core(iter);
+ /* If we're not on the desired position, reset to new position. */
+ if (pos != iter->pos)
+ reset_iter(iter, pos);
+
+ iter->nameoff += get_ksymbol_core(iter);
+ iter->pos++;
+
return 1;
}
@@ -267,14 +338,15 @@ struct seq_operations kallsyms_op = {
static int kallsyms_open(struct inode *inode, struct file *file)
{
/* We keep iterator in m->private, since normal case is to
- * s_start from where we left off, so we avoid O(N^2). */
+ * s_start from where we left off, so we avoid doing
+ * using get_symbol_offset for every symbol */
struct kallsym_iter *iter;
int ret;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
- reset_iter(iter);
+ reset_iter(iter, 0);
ret = seq_open(file, &kallsyms_op);
if (ret == 0)
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
new file mode 100644
index 000000000000..9a5e17b507fe
--- /dev/null
+++ b/kernel/kfifo.c
@@ -0,0 +1,170 @@
+/*
+ * A simple kernel FIFO implementation.
+ *
+ * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/kfifo.h>
+
+/*
+ * kfifo_init - allocates a new FIFO using a preallocated buffer
+ * @buffer: the preallocated buffer to be used.
+ * @size: the size of the internal buffer, this have to be a power of 2.
+ * @gfp_mask: get_free_pages mask, passed to kmalloc()
+ * @lock: the lock to be used to protect the fifo buffer
+ *
+ * Do NOT pass the kfifo to kfifo_free() after use ! Simply free the
+ * struct kfifo with kfree().
+ */
+struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
+ int gfp_mask, spinlock_t *lock)
+{
+ struct kfifo *fifo;
+
+ /* size must be a power of 2 */
+ BUG_ON(size & (size - 1));
+
+ fifo = kmalloc(sizeof(struct kfifo), gfp_mask);
+ if (!fifo)
+ return ERR_PTR(-ENOMEM);
+
+ fifo->buffer = buffer;
+ fifo->size = size;
+ fifo->in = fifo->out = 0;
+ fifo->lock = lock;
+
+ return fifo;
+}
+EXPORT_SYMBOL(kfifo_init);
+
+/*
+ * kfifo_alloc - allocates a new FIFO and its internal buffer
+ * @size: the size of the internal buffer to be allocated.
+ * @gfp_mask: get_free_pages mask, passed to kmalloc()
+ * @lock: the lock to be used to protect the fifo buffer
+ *
+ * The size will be rounded-up to a power of 2.
+ */
+struct kfifo *kfifo_alloc(unsigned int size, int gfp_mask, spinlock_t *lock)
+{
+ unsigned int newsize;
+ unsigned char *buffer;
+ struct kfifo *ret;
+
+ /*
+ * round up to the next power of 2, since our 'let the indices
+ * wrap' tachnique works only in this case.
+ */
+ newsize = size;
+ if (size & (size - 1)) {
+ BUG_ON(size > 0x80000000);
+ newsize = roundup_pow_of_two(size);
+ }
+
+ buffer = kmalloc(newsize, gfp_mask);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ ret = kfifo_init(buffer, size, gfp_mask, lock);
+
+ if (IS_ERR(ret))
+ kfree(buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL(kfifo_alloc);
+
+/*
+ * kfifo_free - frees the FIFO
+ * @fifo: the fifo to be freed.
+ */
+void kfifo_free(struct kfifo *fifo)
+{
+ kfree(fifo->buffer);
+ kfree(fifo);
+}
+EXPORT_SYMBOL(kfifo_free);
+
+/*
+ * __kfifo_put - puts some data into the FIFO, no locking version
+ * @fifo: the fifo to be used.
+ * @buffer: the data to be added.
+ * @len: the length of the data to be added.
+ *
+ * This function copies at most 'len' bytes from the 'buffer' into
+ * the FIFO depending on the free space, and returns the number of
+ * bytes copied.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+unsigned int __kfifo_put(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len)
+{
+ unsigned int l;
+
+ len = min(len, fifo->size - fifo->in + fifo->out);
+
+ /* first put the data starting from fifo->in to buffer end */
+ l = min(len, fifo->size - (fifo->in & (fifo->size - 1)));
+ memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l);
+
+ /* then put the rest (if any) at the beginning of the buffer */
+ memcpy(fifo->buffer, buffer + l, len - l);
+
+ fifo->in += len;
+
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_put);
+
+/*
+ * __kfifo_get - gets some data from the FIFO, no locking version
+ * @fifo: the fifo to be used.
+ * @buffer: where the data must be copied.
+ * @len: the size of the destination buffer.
+ *
+ * This function copies at most 'len' bytes from the FIFO into the
+ * 'buffer' and returns the number of copied bytes.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+unsigned int __kfifo_get(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len)
+{
+ unsigned int l;
+
+ len = min(len, fifo->in - fifo->out);
+
+ /* first get the data from fifo->out until the end of the buffer */
+ l = min(len, fifo->size - (fifo->out & (fifo->size - 1)));
+ memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l);
+
+ /* then get the rest (if any) from the beginning of the buffer */
+ memcpy(buffer + l, fifo->buffer, len - l);
+
+ fifo->out += len;
+
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_get);
diff --git a/kernel/panic.c b/kernel/panic.c
index fce7f4030d0a..c7ab9981c7aa 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -110,6 +110,9 @@ EXPORT_SYMBOL(panic);
* 'P' - Proprietary module has been loaded.
* 'F' - Module has been forcibly loaded.
* 'S' - SMP with CPUs not designed for SMP.
+ * 'R' - User forced a module unload.
+ * 'M' - Machine had a machine check experience.
+ * 'B' - System has hit bad_page.
*
* The string is overwritten by the next call to print_taint().
*/
@@ -118,12 +121,21 @@ const char *print_tainted(void)
{
static char buf[20];
if (tainted) {
- snprintf(buf, sizeof(buf), "Tainted: %c%c%c",
+ snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c",
tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
- tainted & TAINT_UNSAFE_SMP ? 'S' : ' ');
+ tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
+ tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
+ tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
+ tainted & TAINT_BAD_PAGE ? 'B' : ' ');
}
else
snprintf(buf, sizeof(buf), "Not tainted");
return(buf);
}
+
+void add_taint(unsigned flag)
+{
+ tainted |= flag;
+}
+EXPORT_SYMBOL(add_taint);
diff --git a/kernel/pid.c b/kernel/pid.c
index 83008f812f49..21024b7ae37c 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -1,8 +1,9 @@
/*
* Generic pidhash and scalable, time-bounded PID allocator
*
- * (C) 2002 William Irwin, IBM
- * (C) 2002 Ingo Molnar, Red Hat
+ * (C) 2002-2003 William Irwin, IBM
+ * (C) 2004 William Irwin, Oracle
+ * (C) 2002-2004 Ingo Molnar, Red Hat
*
* pid-structures are backing objects for tasks sharing a given ID to chain
* against. There is very little to them aside from hashing them and
@@ -35,9 +36,15 @@ int last_pid;
#define RESERVED_PIDS 300
-#define PIDMAP_ENTRIES (PID_MAX_LIMIT/PAGE_SIZE/8)
+int pid_max_min = RESERVED_PIDS + 1;
+int pid_max_max = PID_MAX_LIMIT;
+
+#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8)
#define BITS_PER_PAGE (PAGE_SIZE*8)
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+#define mk_pid(map, off) (((map) - pidmap_array)*BITS_PER_PAGE + (off))
+#define find_next_offset(map, off) \
+ find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
/*
* PID-map pages start out as NULL, they get allocated upon
@@ -53,8 +60,6 @@ typedef struct pidmap {
static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
{ [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
-static pidmap_t *map_limit = pidmap_array + PIDMAP_ENTRIES;
-
static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
fastcall void free_pidmap(int pid)
@@ -66,15 +71,18 @@ fastcall void free_pidmap(int pid)
atomic_inc(&map->nr_free);
}
-/*
- * Here we search for the next map that has free bits left.
- * Normally the next map has free PIDs.
- */
-static inline pidmap_t *next_free_map(pidmap_t *map, int *max_steps)
+int alloc_pidmap(void)
{
- while (--*max_steps) {
- if (++map == map_limit)
- map = pidmap_array;
+ int i, offset, max_scan, pid, last = last_pid;
+ pidmap_t *map;
+
+ pid = last + 1;
+ if (pid >= pid_max)
+ pid = RESERVED_PIDS;
+ offset = pid & BITS_PER_PAGE_MASK;
+ map = &pidmap_array[pid/BITS_PER_PAGE];
+ max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
+ for (i = 0; i <= max_scan; ++i) {
if (unlikely(!map->page)) {
unsigned long page = get_zeroed_page(GFP_KERNEL);
/*
@@ -87,62 +95,39 @@ static inline pidmap_t *next_free_map(pidmap_t *map, int *max_steps)
else
map->page = (void *)page;
spin_unlock(&pidmap_lock);
-
- if (!map->page)
+ if (unlikely(!map->page))
break;
}
- if (atomic_read(&map->nr_free))
- return map;
- }
- return NULL;
-}
-
-int alloc_pidmap(void)
-{
- int pid, offset, max_steps = PIDMAP_ENTRIES + 1;
- pidmap_t *map;
-
- pid = last_pid + 1;
- if (pid >= pid_max)
- pid = RESERVED_PIDS;
-
- offset = pid & BITS_PER_PAGE_MASK;
- map = pidmap_array + pid / BITS_PER_PAGE;
-
- if (likely(map->page && !test_and_set_bit(offset, map->page))) {
- /*
- * There is a small window for last_pid updates to race,
- * but in that case the next allocation will go into the
- * slowpath and that fixes things up.
- */
-return_pid:
- atomic_dec(&map->nr_free);
- last_pid = pid;
- return pid;
- }
-
- if (!offset || !atomic_read(&map->nr_free)) {
-next_map:
- map = next_free_map(map, &max_steps);
- if (!map)
- goto failure;
- offset = 0;
+ if (likely(atomic_read(&map->nr_free))) {
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ atomic_dec(&map->nr_free);
+ last_pid = pid;
+ return pid;
+ }
+ offset = find_next_offset(map, offset);
+ pid = mk_pid(map, offset);
+ /*
+ * find_next_offset() found a bit, the pid from it
+ * is in-bounds, and if we fell back to the last
+ * bitmap block and the final block was the same
+ * as the starting point, pid is before last_pid.
+ */
+ } while (offset < BITS_PER_PAGE && pid < pid_max &&
+ (i != max_scan || pid < last ||
+ !((last+1) & BITS_PER_PAGE_MASK)));
+ }
+ if (map < &pidmap_array[(pid_max-1)/BITS_PER_PAGE]) {
+ ++map;
+ offset = 0;
+ } else {
+ map = &pidmap_array[0];
+ offset = RESERVED_PIDS;
+ if (unlikely(last == offset))
+ break;
+ }
+ pid = mk_pid(map, offset);
}
- /*
- * Find the next zero bit:
- */
-scan_more:
- offset = find_next_zero_bit(map->page, BITS_PER_PAGE, offset);
- if (offset >= BITS_PER_PAGE)
- goto next_map;
- if (test_and_set_bit(offset, map->page))
- goto scan_more;
-
- /* we got the PID: */
- pid = (map - pidmap_array) * BITS_PER_PAGE + offset;
- goto return_pid;
-
-failure:
return -1;
}
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index d1bc943072d4..8fca5822a807 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -256,41 +256,10 @@ int pm_send_all(pm_request_t rqst, void *data)
return 0;
}
-/**
- * pm_find - find a device
- * @type: type of device
- * @from: where to start looking
- *
- * Scan the power management list for devices of a specific type. The
- * return value for a matching device may be passed to further calls
- * to this function to find further matches. A %NULL indicates the end
- * of the list.
- *
- * To search from the beginning pass %NULL as the @from value.
- *
- * The caller MUST hold the pm_devs_lock lock when calling this
- * function. The instant that the lock is dropped all pointers returned
- * may become invalid.
- */
-
-struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from)
-{
- struct list_head *entry = from ? from->entry.next:pm_devs.next;
- while (entry != &pm_devs) {
- struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
- if (type == PM_UNKNOWN_DEV || dev->type == type)
- return dev;
- entry = entry->next;
- }
- return NULL;
-}
-
EXPORT_SYMBOL(pm_register);
EXPORT_SYMBOL(pm_unregister);
EXPORT_SYMBOL(pm_unregister_all);
-EXPORT_SYMBOL(pm_send);
EXPORT_SYMBOL(pm_send_all);
-EXPORT_SYMBOL(pm_find);
EXPORT_SYMBOL(pm_active);
diff --git a/kernel/printk.c b/kernel/printk.c
index c02ec626f384..390396fc6d01 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -661,12 +661,10 @@ EXPORT_SYMBOL(release_console_sem);
*
* Must be called within acquire_console_sem().
*/
-void console_conditional_schedule(void)
+void __sched console_conditional_schedule(void)
{
- if (console_may_schedule && need_resched()) {
- set_current_state(TASK_RUNNING);
- schedule();
- }
+ if (console_may_schedule)
+ cond_resched();
}
EXPORT_SYMBOL(console_conditional_schedule);
diff --git a/kernel/profile.c b/kernel/profile.c
index 1c4375fad923..e7ff9b32d822 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -1,5 +1,16 @@
/*
* linux/kernel/profile.c
+ * Simple profiling. Manages a direct-mapped profile hit count buffer,
+ * with configurable resolution, support for restricting the cpus on
+ * which profiling is done, and switching between cpu time and
+ * schedule() calls via kernel command line parameters passed at boot.
+ *
+ * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
+ * Red Hat, July 2004
+ * Consolidation of architecture support code for profiling,
+ * William Irwin, Oracle, July 2004
+ * Amortized hit count accounting via per-cpu open-addressed hashtables
+ * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
*/
#include <linux/config.h>
@@ -9,13 +20,29 @@
#include <linux/notifier.h>
#include <linux/mm.h>
#include <linux/cpumask.h>
+#include <linux/cpu.h>
#include <linux/profile.h>
+#include <linux/highmem.h>
#include <asm/sections.h>
+#include <asm/semaphore.h>
+
+struct profile_hit {
+ u32 pc, hits;
+};
+#define PROFILE_GRPSHIFT 3
+#define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
+#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
+#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
static atomic_t *prof_buffer;
static unsigned long prof_len, prof_shift;
static int prof_on;
static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
+#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
+static DEFINE_PER_CPU(int, cpu_profile_flip);
+static DECLARE_MUTEX(profile_flip_mutex);
+#endif /* CONFIG_SMP */
static int __init profile_setup(char * str)
{
@@ -181,6 +208,179 @@ EXPORT_SYMBOL_GPL(task_handoff_unregister);
EXPORT_SYMBOL_GPL(profile_event_register);
EXPORT_SYMBOL_GPL(profile_event_unregister);
+#ifdef CONFIG_SMP
+/*
+ * Each cpu has a pair of open-addressed hashtables for pending
+ * profile hits. read_profile() IPI's all cpus to request them
+ * to flip buffers and flushes their contents to prof_buffer itself.
+ * Flip requests are serialized by the profile_flip_mutex. The sole
+ * use of having a second hashtable is for avoiding cacheline
+ * contention that would otherwise happen during flushes of pending
+ * profile hits required for the accuracy of reported profile hits
+ * and so resurrect the interrupt livelock issue.
+ *
+ * The open-addressed hashtables are indexed by profile buffer slot
+ * and hold the number of pending hits to that profile buffer slot on
+ * a cpu in an entry. When the hashtable overflows, all pending hits
+ * are accounted to their corresponding profile buffer slots with
+ * atomic_add() and the hashtable emptied. As numerous pending hits
+ * may be accounted to a profile buffer slot in a hashtable entry,
+ * this amortizes a number of atomic profile buffer increments likely
+ * to be far larger than the number of entries in the hashtable,
+ * particularly given that the number of distinct profile buffer
+ * positions to which hits are accounted during short intervals (e.g.
+ * several seconds) is usually very small. Exclusion from buffer
+ * flipping is provided by interrupt disablement (note that for
+ * SCHED_PROFILING profile_hit() may be called from process context).
+ * The hash function is meant to be lightweight as opposed to strong,
+ * and was vaguely inspired by ppc64 firmware-supported inverted
+ * pagetable hash functions, but uses a full hashtable full of finite
+ * collision chains, not just pairs of them.
+ *
+ * -- wli
+ */
+static void __profile_flip_buffers(void *unused)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
+}
+
+static void profile_flip_buffers(void)
+{
+ int i, j, cpu;
+
+ down(&profile_flip_mutex);
+ j = per_cpu(cpu_profile_flip, get_cpu());
+ put_cpu();
+ on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
+ for_each_online_cpu(cpu) {
+ struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
+ for (i = 0; i < NR_PROFILE_HIT; ++i) {
+ if (!hits[i].hits) {
+ if (hits[i].pc)
+ hits[i].pc = 0;
+ continue;
+ }
+ atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
+ hits[i].hits = hits[i].pc = 0;
+ }
+ }
+ up(&profile_flip_mutex);
+}
+
+static void profile_discard_flip_buffers(void)
+{
+ int i, cpu;
+
+ down(&profile_flip_mutex);
+ i = per_cpu(cpu_profile_flip, get_cpu());
+ put_cpu();
+ on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
+ for_each_online_cpu(cpu) {
+ struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
+ memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
+ }
+ up(&profile_flip_mutex);
+}
+
+void profile_hit(int type, void *__pc)
+{
+ unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
+ int i, j, cpu;
+ struct profile_hit *hits;
+
+ if (prof_on != type || !prof_buffer)
+ return;
+ pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
+ i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
+ secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
+ cpu = get_cpu();
+ hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
+ if (!hits) {
+ put_cpu();
+ return;
+ }
+ local_irq_save(flags);
+ do {
+ for (j = 0; j < PROFILE_GRPSZ; ++j) {
+ if (hits[i + j].pc == pc) {
+ hits[i + j].hits++;
+ goto out;
+ } else if (!hits[i + j].hits) {
+ hits[i + j].pc = pc;
+ hits[i + j].hits = 1;
+ goto out;
+ }
+ }
+ i = (i + secondary) & (NR_PROFILE_HIT - 1);
+ } while (i != primary);
+ atomic_inc(&prof_buffer[pc]);
+ for (i = 0; i < NR_PROFILE_HIT; ++i) {
+ atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
+ hits[i].pc = hits[i].hits = 0;
+ }
+out:
+ local_irq_restore(flags);
+ put_cpu();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int __devinit profile_cpu_callback(struct notifier_block *info,
+ unsigned long action, void *__cpu)
+{
+ int node, cpu = (unsigned long)__cpu;
+ struct page *page;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ node = cpu_to_node(cpu);
+ per_cpu(cpu_profile_flip, cpu) = 0;
+ if (!per_cpu(cpu_profile_hits, cpu)[1]) {
+ page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (!page)
+ return NOTIFY_BAD;
+ clear_highpage(page);
+ per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
+ }
+ if (!per_cpu(cpu_profile_hits, cpu)[0]) {
+ page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (!page)
+ goto out_free;
+ clear_highpage(page);
+ per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
+ }
+ break;
+ out_free:
+ page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
+ per_cpu(cpu_profile_hits, cpu)[1] = NULL;
+ __free_page(page);
+ return NOTIFY_BAD;
+ case CPU_ONLINE:
+ cpu_set(cpu, prof_cpu_mask);
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_DEAD:
+ cpu_clear(cpu, prof_cpu_mask);
+ if (per_cpu(cpu_profile_hits, cpu)[0]) {
+ page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
+ per_cpu(cpu_profile_hits, cpu)[0] = NULL;
+ __free_page(page);
+ }
+ if (per_cpu(cpu_profile_hits, cpu)[1]) {
+ page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
+ per_cpu(cpu_profile_hits, cpu)[1] = NULL;
+ __free_page(page);
+ }
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+#else /* !CONFIG_SMP */
+#define profile_flip_buffers() do { } while (0)
+#define profile_discard_flip_buffers() do { } while (0)
+
void profile_hit(int type, void *__pc)
{
unsigned long pc;
@@ -190,6 +390,7 @@ void profile_hit(int type, void *__pc)
pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
atomic_inc(&prof_buffer[min(pc, prof_len - 1)]);
}
+#endif /* !CONFIG_SMP */
void profile_tick(int type, struct pt_regs *regs)
{
@@ -256,6 +457,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
char * pnt;
unsigned int sample_step = 1 << prof_shift;
+ profile_flip_buffers();
if (p >= (prof_len+1)*sizeof(unsigned int))
return 0;
if (count > (prof_len+1)*sizeof(unsigned int) - p)
@@ -296,7 +498,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
return -EINVAL;
}
#endif
-
+ profile_discard_flip_buffers();
memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
return count;
}
@@ -306,16 +508,70 @@ static struct file_operations proc_profile_operations = {
.write = write_profile,
};
+#ifdef CONFIG_SMP
+static void __init profile_nop(void *unused)
+{
+}
+
+static int __init create_hash_tables(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ int node = cpu_to_node(cpu);
+ struct page *page;
+
+ page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (!page)
+ goto out_cleanup;
+ clear_highpage(page);
+ per_cpu(cpu_profile_hits, cpu)[1]
+ = (struct profile_hit *)page_address(page);
+ page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (!page)
+ goto out_cleanup;
+ clear_highpage(page);
+ per_cpu(cpu_profile_hits, cpu)[0]
+ = (struct profile_hit *)page_address(page);
+ }
+ return 0;
+out_cleanup:
+ prof_on = 0;
+ mb();
+ on_each_cpu(profile_nop, NULL, 0, 1);
+ for_each_online_cpu(cpu) {
+ struct page *page;
+
+ if (per_cpu(cpu_profile_hits, cpu)[0]) {
+ page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
+ per_cpu(cpu_profile_hits, cpu)[0] = NULL;
+ __free_page(page);
+ }
+ if (per_cpu(cpu_profile_hits, cpu)[1]) {
+ page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
+ per_cpu(cpu_profile_hits, cpu)[1] = NULL;
+ __free_page(page);
+ }
+ }
+ return -1;
+}
+#else
+#define create_hash_tables() ({ 0; })
+#endif
+
static int __init create_proc_profile(void)
{
struct proc_dir_entry *entry;
if (!prof_on)
return 0;
+ if (create_hash_tables())
+ return -1;
if (!(entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL)))
return 0;
entry->proc_fops = &proc_profile_operations;
entry->size = (1+prof_len) * sizeof(atomic_t);
+ hotcpu_notifier(profile_cpu_callback, 0);
return 0;
}
module_init(create_proc_profile);
diff --git a/kernel/signal.c b/kernel/signal.c
index f67390806d73..ba039fab37e8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1143,36 +1143,6 @@ kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
return retval;
}
-/*
- * kill_sl_info() sends a signal to the session leader: this is used
- * to send SIGHUP to the controlling process of a terminal when
- * the connection is lost.
- */
-
-
-int
-kill_sl_info(int sig, struct siginfo *info, pid_t sid)
-{
- int err, retval = -EINVAL;
- struct task_struct *p;
-
- if (sid <= 0)
- goto out;
-
- retval = -ESRCH;
- read_lock(&tasklist_lock);
- do_each_task_pid(sid, PIDTYPE_SID, p) {
- if (!p->signal->leader)
- continue;
- err = group_send_sig_info(sig, info, p);
- if (retval)
- retval = err;
- } while_each_task_pid(sid, PIDTYPE_SID, p);
- read_unlock(&tasklist_lock);
-out:
- return retval;
-}
-
int
kill_proc_info(int sig, struct siginfo *info, pid_t pid)
{
@@ -1309,12 +1279,6 @@ kill_pg(pid_t pgrp, int sig, int priv)
}
int
-kill_sl(pid_t sess, int sig, int priv)
-{
- return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
-}
-
-int
kill_proc(pid_t pid, int sig, int priv)
{
return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
@@ -1978,22 +1942,11 @@ relock:
EXPORT_SYMBOL(recalc_sigpending);
EXPORT_SYMBOL_GPL(dequeue_signal);
EXPORT_SYMBOL(flush_signals);
-EXPORT_SYMBOL(force_sig);
-EXPORT_SYMBOL(force_sig_info);
EXPORT_SYMBOL(kill_pg);
-EXPORT_SYMBOL(kill_pg_info);
EXPORT_SYMBOL(kill_proc);
-EXPORT_SYMBOL(kill_proc_info);
-EXPORT_SYMBOL(kill_sl);
-EXPORT_SYMBOL(kill_sl_info);
EXPORT_SYMBOL(ptrace_notify);
EXPORT_SYMBOL(send_sig);
EXPORT_SYMBOL(send_sig_info);
-EXPORT_SYMBOL(send_group_sig_info);
-EXPORT_SYMBOL(sigqueue_alloc);
-EXPORT_SYMBOL(sigqueue_free);
-EXPORT_SYMBOL(send_sigqueue);
-EXPORT_SYMBOL(send_group_sigqueue);
EXPORT_SYMBOL(sigprocmask);
EXPORT_SYMBOL(block_all_signals);
EXPORT_SYMBOL(unblock_all_signals);
diff --git a/kernel/sys.c b/kernel/sys.c
index a95e3900dc1e..e6dbc2940751 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -19,6 +19,7 @@
#include <linux/fs.h>
#include <linux/workqueue.h>
#include <linux/device.h>
+#include <linux/key.h>
#include <linux/times.h>
#include <linux/security.h>
#include <linux/dcookies.h>
@@ -282,6 +283,9 @@ cond_syscall(sys_set_mempolicy)
cond_syscall(compat_mbind)
cond_syscall(compat_get_mempolicy)
cond_syscall(compat_set_mempolicy)
+cond_syscall(sys_add_key)
+cond_syscall(sys_request_key)
+cond_syscall(sys_keyctl)
/* arch-specific weak syscall entries */
cond_syscall(sys_pciconfig_read)
@@ -605,6 +609,7 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
current->fsgid = new_egid;
current->egid = new_egid;
current->gid = new_rgid;
+ key_fsgid_changed(current);
return 0;
}
@@ -642,6 +647,8 @@ asmlinkage long sys_setgid(gid_t gid)
}
else
return -EPERM;
+
+ key_fsgid_changed(current);
return 0;
}
@@ -730,6 +737,8 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
current->suid = current->euid;
current->fsuid = current->euid;
+ key_fsuid_changed(current);
+
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
}
@@ -775,6 +784,8 @@ asmlinkage long sys_setuid(uid_t uid)
current->fsuid = current->euid = uid;
current->suid = new_suid;
+ key_fsuid_changed(current);
+
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
}
@@ -821,6 +832,8 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if (suid != (uid_t) -1)
current->suid = suid;
+ key_fsuid_changed(current);
+
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
}
@@ -870,6 +883,8 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
current->gid = rgid;
if (sgid != (gid_t) -1)
current->sgid = sgid;
+
+ key_fsgid_changed(current);
return 0;
}
@@ -911,6 +926,8 @@ asmlinkage long sys_setfsuid(uid_t uid)
current->fsuid = uid;
}
+ key_fsuid_changed(current);
+
security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
return old_fsuid;
@@ -937,6 +954,7 @@ asmlinkage long sys_setfsgid(gid_t gid)
wmb();
}
current->fsgid = gid;
+ key_fsgid_changed(current);
}
return old_fsgid;
}
@@ -1669,7 +1687,7 @@ asmlinkage long sys_umask(int mask)
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
- int error;
+ long error;
int sig;
error = security_task_prctl(option, arg2, arg3, arg4, arg5);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 469cf0c2f26e..80bf15f035cd 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -66,6 +66,7 @@ extern int sysctl_lower_zone_protection;
extern int min_free_kbytes;
extern int printk_ratelimit_jiffies;
extern int printk_ratelimit_burst;
+extern int pid_max_min, pid_max_max;
#if defined(CONFIG_X86_LOCAL_APIC) && defined(__i386__)
int unknown_nmi_panic;
@@ -575,7 +576,10 @@ static ctl_table kern_table[] = {
.data = &pid_max,
.maxlen = sizeof (int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = sysctl_intvec,
+ .extra1 = &pid_max_min,
+ .extra2 = &pid_max_max,
},
{
.ctl_name = KERN_PANIC_ON_OOPS,
diff --git a/kernel/timer.c b/kernel/timer.c
index e3c9b5fcd52f..ac9386e22bd3 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -959,11 +959,6 @@ static inline void update_times(void)
void do_timer(struct pt_regs *regs)
{
jiffies_64++;
-#ifndef CONFIG_SMP
- /* SMP process accounting uses the local APIC timer */
-
- update_process_times(user_mode(regs));
-#endif
update_times();
}
diff --git a/kernel/user.c b/kernel/user.c
index 523175afeecd..693487dc940e 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/key.h>
/*
* UID task count cache, to get fast user lookup in "alloc_uid"
@@ -34,6 +35,10 @@ struct user_struct root_user = {
.sigpending = ATOMIC_INIT(0),
.mq_bytes = 0,
.locked_shm = 0,
+#ifdef CONFIG_KEYS
+ .uid_keyring = &root_user_keyring,
+ .session_keyring = &root_session_keyring,
+#endif
};
/*
@@ -87,6 +92,8 @@ void free_uid(struct user_struct *up)
{
if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
+ key_put(up->uid_keyring);
+ key_put(up->session_keyring);
kmem_cache_free(uid_cachep, up);
spin_unlock(&uidhash_lock);
}
@@ -116,6 +123,11 @@ struct user_struct * alloc_uid(uid_t uid)
new->mq_bytes = 0;
new->locked_shm = 0;
+ if (alloc_uid_keyring(new) < 0) {
+ kmem_cache_free(uid_cachep, new);
+ return NULL;
+ }
+
/*
* Before adding this, check whether we raced
* on adding the same user already..
@@ -123,6 +135,8 @@ struct user_struct * alloc_uid(uid_t uid)
spin_lock(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
+ key_put(new->uid_keyring);
+ key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
} else {
uid_hash_insert(new, hashent);
@@ -146,8 +160,10 @@ void switch_uid(struct user_struct *new_user)
old_user = current->user;
atomic_inc(&new_user->processes);
atomic_dec(&old_user->processes);
+ switch_uid_keyring(new_user);
current->user = new_user;
free_uid(old_user);
+ suid_keys(current);
}
diff --git a/kernel/wait.c b/kernel/wait.c
new file mode 100644
index 000000000000..791681cfea98
--- /dev/null
+++ b/kernel/wait.c
@@ -0,0 +1,246 @@
+/*
+ * Generic waiting primitives.
+ *
+ * (C) 2004 William Irwin, Oracle
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/wait.h>
+#include <linux/hash.h>
+
+void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+ unsigned long flags;
+
+ wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&q->lock, flags);
+ __add_wait_queue(q, wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(add_wait_queue);
+
+void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+{
+ unsigned long flags;
+
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&q->lock, flags);
+ __add_wait_queue_tail(q, wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(add_wait_queue_exclusive);
+
+void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ __remove_wait_queue(q, wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(remove_wait_queue);
+
+
+/*
+ * Note: we use "set_current_state()" _after_ the wait-queue add,
+ * because we need a memory barrier there on SMP, so that any
+ * wake-function that tests for the wait-queue being active
+ * will be guaranteed to see waitqueue addition _or_ subsequent
+ * tests in this thread will see the wakeup having taken place.
+ *
+ * The spin_unlock() itself is semi-permeable and only protects
+ * one way (it only protects stuff inside the critical region and
+ * stops them from bleeding out - it would still allow subsequent
+ * loads to move into the the critical region).
+ */
+void fastcall
+prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+ unsigned long flags;
+
+ wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&q->lock, flags);
+ if (list_empty(&wait->task_list))
+ __add_wait_queue(q, wait);
+ /*
+ * don't alter the task state if this is just going to
+ * queue an async wait queue callback
+ */
+ if (is_sync_wait(wait))
+ set_current_state(state);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(prepare_to_wait);
+
+void fastcall
+prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+ unsigned long flags;
+
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ spin_lock_irqsave(&q->lock, flags);
+ if (list_empty(&wait->task_list))
+ __add_wait_queue_tail(q, wait);
+ /*
+ * don't alter the task state if this is just going to
+ * queue an async wait queue callback
+ */
+ if (is_sync_wait(wait))
+ set_current_state(state);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(prepare_to_wait_exclusive);
+
+void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+{
+ unsigned long flags;
+
+ __set_current_state(TASK_RUNNING);
+ /*
+ * We can check for list emptiness outside the lock
+ * IFF:
+ * - we use the "careful" check that verifies both
+ * the next and prev pointers, so that there cannot
+ * be any half-pending updates in progress on other
+ * CPU's that we haven't seen yet (and that might
+ * still change the stack area.
+ * and
+ * - all other users take the lock (ie we can only
+ * have _one_ other CPU that looks at or modifies
+ * the list).
+ */
+ if (!list_empty_careful(&wait->task_list)) {
+ spin_lock_irqsave(&q->lock, flags);
+ list_del_init(&wait->task_list);
+ spin_unlock_irqrestore(&q->lock, flags);
+ }
+}
+EXPORT_SYMBOL(finish_wait);
+
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ int ret = default_wake_function(wait, mode, sync, key);
+
+ if (ret)
+ list_del_init(&wait->task_list);
+ return ret;
+}
+EXPORT_SYMBOL(autoremove_wake_function);
+
+int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
+{
+ struct wait_bit_key *key = arg;
+ struct wait_bit_queue *wait_bit
+ = container_of(wait, struct wait_bit_queue, wait);
+
+ if (wait_bit->key.flags != key->flags ||
+ wait_bit->key.bit_nr != key->bit_nr ||
+ test_bit(key->bit_nr, key->flags))
+ return 0;
+ else
+ return autoremove_wake_function(wait, mode, sync, key);
+}
+EXPORT_SYMBOL(wake_bit_function);
+
+/*
+ * To allow interruptible waiting and asynchronous (i.e. nonblocking)
+ * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
+ * permitted return codes. Nonzero return codes halt waiting and return.
+ */
+int __sched fastcall
+__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
+ int (*action)(void *), unsigned mode)
+{
+ int ret = 0;
+
+ do {
+ prepare_to_wait(wq, &q->wait, mode);
+ if (test_bit(q->key.bit_nr, q->key.flags))
+ ret = (*action)(q->key.flags);
+ } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
+ finish_wait(wq, &q->wait);
+ return ret;
+}
+EXPORT_SYMBOL(__wait_on_bit);
+
+int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
+ int (*action)(void *), unsigned mode)
+{
+ wait_queue_head_t *wq = bit_waitqueue(word, bit);
+ DEFINE_WAIT_BIT(wait, word, bit);
+
+ return __wait_on_bit(wq, &wait, action, mode);
+}
+EXPORT_SYMBOL(out_of_line_wait_on_bit);
+
+int __sched fastcall
+__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
+ int (*action)(void *), unsigned mode)
+{
+ int ret = 0;
+
+ do {
+ prepare_to_wait_exclusive(wq, &q->wait, mode);
+ if (test_bit(q->key.bit_nr, q->key.flags)) {
+ if ((ret = (*action)(q->key.flags)))
+ break;
+ }
+ } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
+ finish_wait(wq, &q->wait);
+ return ret;
+}
+EXPORT_SYMBOL(__wait_on_bit_lock);
+
+int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
+ int (*action)(void *), unsigned mode)
+{
+ wait_queue_head_t *wq = bit_waitqueue(word, bit);
+ DEFINE_WAIT_BIT(wait, word, bit);
+
+ return __wait_on_bit_lock(wq, &wait, action, mode);
+}
+EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
+
+void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
+{
+ struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
+ if (waitqueue_active(wq))
+ __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
+}
+EXPORT_SYMBOL(__wake_up_bit);
+
+/**
+ * wake_up_bit - wake up a waiter on a bit
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that wakes up waiters
+ * on a bit. For instance, if one were to have waiters on a bitflag,
+ * one would call wake_up_bit() after clearing the bit.
+ *
+ * In order for this to function properly, as it uses waitqueue_active()
+ * internally, some kind of memory barrier must be done prior to calling
+ * this. Typically, this will be smp_mb__after_clear_bit(), but in some
+ * cases where bitflags are manipulated non-atomically under a lock, one
+ * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
+ * because spin_unlock() does not guarantee a memory barrier.
+ */
+void fastcall wake_up_bit(void *word, int bit)
+{
+ __wake_up_bit(bit_waitqueue(word, bit), word, bit);
+}
+EXPORT_SYMBOL(wake_up_bit);
+
+fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit)
+{
+ const int shift = BITS_PER_LONG == 32 ? 5 : 6;
+ const struct zone *zone = page_zone(virt_to_page(word));
+ unsigned long val = (unsigned long)word << shift | bit;
+
+ return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
+}
+EXPORT_SYMBOL(bit_waitqueue);
diff --git a/lib/idr.c b/lib/idr.c
index 972eefcce2b3..7d92cfc23ac2 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -39,13 +39,11 @@ static struct idr_layer *alloc_layer(struct idr *idp)
struct idr_layer *p;
spin_lock(&idp->lock);
- if (!(p = idp->id_free)) {
- spin_unlock(&idp->lock);
- return NULL;
+ if ((p = idp->id_free)) {
+ idp->id_free = p->ary[0];
+ idp->id_free_cnt--;
+ p->ary[0] = NULL;
}
- idp->id_free = p->ary[0];
- idp->id_free_cnt--;
- p->ary[0] = NULL;
spin_unlock(&idp->lock);
return(p);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 3935097dc5cb..382bd020a33f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -131,9 +131,12 @@ void remove_from_page_cache(struct page *page)
spin_unlock_irq(&mapping->tree_lock);
}
-static inline int sync_page(struct page *page)
+static int sync_page(void *word)
{
struct address_space *mapping;
+ struct page *page;
+
+ page = container_of((page_flags_t *)word, struct page, flags);
/*
* FIXME, fercrissake. What is this barrier here for?
@@ -141,7 +144,8 @@ static inline int sync_page(struct page *page)
smp_mb();
mapping = page_mapping(page);
if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
- return mapping->a_ops->sync_page(page);
+ mapping->a_ops->sync_page(page);
+ io_schedule();
return 0;
}
@@ -360,40 +364,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
* at a cost of "thundering herd" phenomena during rare hash
* collisions.
*/
-struct page_wait_queue {
- struct page *page;
- int bit;
- wait_queue_t wait;
-};
-
-static int page_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
-{
- struct page *page = key;
- struct page_wait_queue *wq;
-
- wq = container_of(wait, struct page_wait_queue, wait);
- if (wq->page != page || test_bit(wq->bit, &page->flags))
- return 0;
- else
- return autoremove_wake_function(wait, mode, sync, NULL);
-}
-
-#define __DEFINE_PAGE_WAIT(name, p, b, f) \
- struct page_wait_queue name = { \
- .page = p, \
- .bit = b, \
- .wait = { \
- .task = current, \
- .func = page_wake_function, \
- .flags = f, \
- .task_list = LIST_HEAD_INIT(name.wait.task_list),\
- }, \
- }
-
-#define DEFINE_PAGE_WAIT(name, p, b) __DEFINE_PAGE_WAIT(name, p, b, 0)
-#define DEFINE_PAGE_WAIT_EXCLUSIVE(name, p, b) \
- __DEFINE_PAGE_WAIT(name, p, b, WQ_FLAG_EXCLUSIVE)
-
static wait_queue_head_t *page_waitqueue(struct page *page)
{
const struct zone *zone = page_zone(page);
@@ -401,30 +371,19 @@ static wait_queue_head_t *page_waitqueue(struct page *page)
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
}
-static void wake_up_page(struct page *page)
+static inline void wake_up_page(struct page *page, int bit)
{
- const unsigned int mode = TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE;
- wait_queue_head_t *waitqueue = page_waitqueue(page);
-
- if (waitqueue_active(waitqueue))
- __wake_up(waitqueue, mode, 1, page);
+ __wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
void fastcall wait_on_page_bit(struct page *page, int bit_nr)
{
- wait_queue_head_t *waitqueue = page_waitqueue(page);
- DEFINE_PAGE_WAIT(wait, page, bit_nr);
+ DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
- do {
- prepare_to_wait(waitqueue, &wait.wait, TASK_UNINTERRUPTIBLE);
- if (test_bit(bit_nr, &page->flags)) {
- sync_page(page);
- io_schedule();
- }
- } while (test_bit(bit_nr, &page->flags));
- finish_wait(waitqueue, &wait.wait);
+ if (test_bit(bit_nr, &page->flags))
+ __wait_on_bit(page_waitqueue(page), &wait, sync_page,
+ TASK_UNINTERRUPTIBLE);
}
-
EXPORT_SYMBOL(wait_on_page_bit);
/**
@@ -448,7 +407,7 @@ void fastcall unlock_page(struct page *page)
if (!TestClearPageLocked(page))
BUG();
smp_mb__after_clear_bit();
- wake_up_page(page);
+ wake_up_page(page, PG_locked);
}
EXPORT_SYMBOL(unlock_page);
@@ -464,7 +423,7 @@ void end_page_writeback(struct page *page)
BUG();
smp_mb__after_clear_bit();
}
- wake_up_page(page);
+ wake_up_page(page, PG_writeback);
}
EXPORT_SYMBOL(end_page_writeback);
@@ -479,19 +438,11 @@ EXPORT_SYMBOL(end_page_writeback);
*/
void fastcall __lock_page(struct page *page)
{
- wait_queue_head_t *wqh = page_waitqueue(page);
- DEFINE_PAGE_WAIT_EXCLUSIVE(wait, page, PG_locked);
-
- while (TestSetPageLocked(page)) {
- prepare_to_wait_exclusive(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
- if (PageLocked(page)) {
- sync_page(page);
- io_schedule();
- }
- }
- finish_wait(wqh, &wait.wait);
-}
+ DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+ __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
+ TASK_UNINTERRUPTIBLE);
+}
EXPORT_SYMBOL(__lock_page);
/*
diff --git a/mm/memory.c b/mm/memory.c
index f10dc9bb7fe2..52f96233a13c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -114,6 +114,7 @@ static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir)
page = pmd_page(*dir);
pmd_clear(dir);
dec_page_state(nr_page_table_pages);
+ tlb->mm->nr_ptes--;
pte_free_tlb(tlb, page);
}
@@ -163,7 +164,6 @@ pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long a
spin_lock(&mm->page_table_lock);
if (!new)
return NULL;
-
/*
* Because we dropped the lock, we should re-check the
* entry, as somebody else could have populated it..
@@ -172,6 +172,7 @@ pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long a
pte_free(new);
goto out;
}
+ mm->nr_ptes++;
inc_page_state(nr_page_table_pages);
pmd_populate(mm, pmd, new);
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8fe9c7ee9853..9eac9c971104 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -66,6 +66,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/nodemask.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -95,7 +96,7 @@ static int nodes_online(unsigned long *nodes)
{
DECLARE_BITMAP(online2, MAX_NUMNODES);
- bitmap_copy(online2, node_online_map, MAX_NUMNODES);
+ bitmap_copy(online2, nodes_addr(node_online_map), MAX_NUMNODES);
if (bitmap_empty(online2, MAX_NUMNODES))
set_bit(0, online2);
if (!bitmap_subset(nodes, online2, MAX_NUMNODES))
@@ -424,7 +425,7 @@ static void get_zonemask(struct mempolicy *p, unsigned long *nodes)
case MPOL_PREFERRED:
/* or use current node instead of online map? */
if (p->v.preferred_node < 0)
- bitmap_copy(nodes, node_online_map, MAX_NUMNODES);
+ bitmap_copy(nodes, nodes_addr(node_online_map), MAX_NUMNODES);
else
__set_bit(p->v.preferred_node, nodes);
break;
@@ -692,7 +693,7 @@ static struct page *alloc_page_interleave(unsigned gfp, unsigned order, unsigned
struct zonelist *zl;
struct page *page;
- BUG_ON(!test_bit(nid, node_online_map));
+ BUG_ON(!node_online(nid));
zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK);
page = __alloc_pages(gfp, order, zl);
if (page && page_zone(page) == zl->zones[0]) {
@@ -1081,7 +1082,8 @@ void __init numa_policy_init(void)
/* Set interleaving policy for system init. This way not all
the data structures allocated at system boot end up in node zero. */
- if (sys_set_mempolicy(MPOL_INTERLEAVE, node_online_map, MAX_NUMNODES) < 0)
+ if (sys_set_mempolicy(MPOL_INTERLEAVE, nodes_addr(node_online_map),
+ MAX_NUMNODES) < 0)
printk("numa_policy_init: interleaving failed\n");
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f2b5f575a410..274a7af40a89 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -31,10 +31,12 @@
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
+#include <linux/nodemask.h>
#include <asm/tlbflush.h>
-DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
+nodemask_t node_online_map = NODE_MASK_NONE;
+nodemask_t node_possible_map = NODE_MASK_ALL;
struct pglist_data *pgdat_list;
unsigned long totalram_pages;
unsigned long totalhigh_pages;
@@ -92,6 +94,7 @@ static void bad_page(const char *function, struct page *page)
set_page_count(page, 0);
reset_page_mapcount(page);
page->mapping = NULL;
+ tainted |= TAINT_BAD_PAGE;
}
#ifndef CONFIG_HUGETLB_PAGE
diff --git a/mm/shmem.c b/mm/shmem.c
index 296a61073ef6..4a6b7ad35a7f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -10,6 +10,10 @@
* Copyright (C) 2002-2004 VERITAS Software Corporation.
* Copyright (C) 2004 Andi Kleen, SuSE Labs
*
+ * Extended attribute support for tmpfs:
+ * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
+ * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *
* This file is released under the GPL.
*/
@@ -41,6 +45,7 @@
#include <linux/swapops.h>
#include <linux/mempolicy.h>
#include <linux/namei.h>
+#include <linux/xattr.h>
#include <asm/uaccess.h>
#include <asm/div64.h>
#include <asm/pgtable.h>
@@ -171,6 +176,7 @@ static struct address_space_operations shmem_aops;
static struct file_operations shmem_file_operations;
static struct inode_operations shmem_inode_operations;
static struct inode_operations shmem_dir_inode_operations;
+static struct inode_operations shmem_special_inode_operations;
static struct vm_operations_struct shmem_vm_ops;
static struct backing_dev_info shmem_backing_dev_info = {
@@ -1235,6 +1241,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
switch (mode & S_IFMT) {
default:
+ inode->i_op = &shmem_special_inode_operations;
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
@@ -1756,6 +1763,12 @@ static void shmem_put_link(struct dentry *dentry, struct nameidata *nd)
static struct inode_operations shmem_symlink_inline_operations = {
.readlink = generic_readlink,
.follow_link = shmem_follow_link_inline,
+#ifdef CONFIG_TMPFS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
+#endif
};
static struct inode_operations shmem_symlink_inode_operations = {
@@ -1763,6 +1776,12 @@ static struct inode_operations shmem_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = shmem_follow_link,
.put_link = shmem_put_link,
+#ifdef CONFIG_TMPFS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
+#endif
};
static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes)
@@ -1862,6 +1881,12 @@ static void shmem_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
}
+#ifdef CONFIG_TMPFS_XATTR
+static struct xattr_handler *shmem_xattr_handlers[];
+#else
+#define shmem_xattr_handlers NULL
+#endif
+
static int shmem_fill_super(struct super_block *sb,
void *data, int silent)
{
@@ -1904,6 +1929,7 @@ static int shmem_fill_super(struct super_block *sb,
sbinfo->max_inodes = inodes;
sbinfo->free_inodes = inodes;
}
+ sb->s_xattr = shmem_xattr_handlers;
#endif
sb->s_maxbytes = SHMEM_MAX_BYTES;
@@ -1995,6 +2021,12 @@ static struct file_operations shmem_file_operations = {
static struct inode_operations shmem_inode_operations = {
.truncate = shmem_truncate,
.setattr = shmem_notify_change,
+#ifdef CONFIG_TMPFS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
+#endif
};
static struct inode_operations shmem_dir_inode_operations = {
@@ -2008,6 +2040,21 @@ static struct inode_operations shmem_dir_inode_operations = {
.rmdir = shmem_rmdir,
.mknod = shmem_mknod,
.rename = shmem_rename,
+#ifdef CONFIG_TMPFS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
+#endif
+#endif
+};
+
+static struct inode_operations shmem_special_inode_operations = {
+#ifdef CONFIG_TMPFS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
#endif
};
@@ -2032,6 +2079,49 @@ static struct vm_operations_struct shmem_vm_ops = {
#endif
};
+
+#ifdef CONFIG_TMPFS_SECURITY
+
+static size_t shmem_xattr_security_list(struct inode *inode, char *list, size_t list_len,
+ const char *name, size_t name_len)
+{
+ return security_inode_listsecurity(inode, list, list_len);
+}
+
+static int shmem_xattr_security_get(struct inode *inode, const char *name, void *buffer, size_t size)
+{
+ if (strcmp(name, "") == 0)
+ return -EINVAL;
+ return security_inode_getsecurity(inode, name, buffer, size);
+}
+
+static int shmem_xattr_security_set(struct inode *inode, const char *name, const void *value, size_t size, int flags)
+{
+ if (strcmp(name, "") == 0)
+ return -EINVAL;
+ return security_inode_setsecurity(inode, name, value, size, flags);
+}
+
+struct xattr_handler shmem_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .list = shmem_xattr_security_list,
+ .get = shmem_xattr_security_get,
+ .set = shmem_xattr_security_set,
+};
+
+#endif /* CONFIG_TMPFS_SECURITY */
+
+#ifdef CONFIG_TMPFS_XATTR
+
+static struct xattr_handler *shmem_xattr_handlers[] = {
+#ifdef CONFIG_TMPFS_SECURITY
+ &shmem_xattr_security_handler,
+#endif
+ NULL
+};
+
+#endif /* CONFIG_TMPFS_XATTR */
+
static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
@@ -2164,5 +2254,3 @@ int shmem_zero_setup(struct vm_area_struct *vma)
vma->vm_ops = &shmem_vm_ops;
return 0;
}
-
-EXPORT_SYMBOL(shmem_file_setup);
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index 90abc63db367..c13a2161bca2 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -120,5 +120,3 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
{
return 0;
}
-
-EXPORT_SYMBOL(shmem_file_setup);
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index e21a5d1a255a..2e0e07afb84c 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -6,6 +6,22 @@
* of the GNU General Public License, incorporated herein by reference.
*
* Usage: nm -n vmlinux | scripts/kallsyms [--all-symbols] > symbols.S
+ *
+ * ChangeLog:
+ *
+ * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
+ * Changed the compression method from stem compression to "table lookup"
+ * compression
+ *
+ * Table compression uses all the unused char codes on the symbols and
+ * maps these to the most used substrings (tokens). For instance, it might
+ * map char code 0xF7 to represent "write_" and then in every symbol where
+ * "write_" appears it can be replaced by 0xF7, saving 5 bytes.
+ * The used codes themselves are also placed in the table so that the
+ * decompresion can work without "special cases".
+ * Applied to kernel symbols, this usually produces a compression ratio
+ * of about 50%.
+ *
*/
#include <stdio.h>
@@ -13,10 +29,39 @@
#include <string.h>
#include <ctype.h>
+/* maximum token length used. It doesn't pay to increase it a lot, because
+ * very long substrings probably don't repeat themselves too often. */
+#define MAX_TOK_SIZE 11
+#define KSYM_NAME_LEN 127
+
+/* we use only a subset of the complete symbol table to gather the token count,
+ * to speed up compression, at the expense of a little compression ratio */
+#define WORKING_SET 1024
+
+/* first find the best token only on the list of tokens that would profit more
+ * than GOOD_BAD_THRESHOLD. Only if this list is empty go to the "bad" list.
+ * Increasing this value will put less tokens on the "good" list, so the search
+ * is faster. However, if the good list runs out of tokens, we must painfully
+ * search the bad list. */
+#define GOOD_BAD_THRESHOLD 10
+
+/* token hash parameters */
+#define HASH_BITS 18
+#define HASH_TABLE_SIZE (1 << HASH_BITS)
+#define HASH_MASK (HASH_TABLE_SIZE - 1)
+#define HASH_BASE_OFFSET 2166136261U
+#define HASH_FOLD(a) ((a)&(HASH_MASK))
+
+/* flags to mark symbols */
+#define SYM_FLAG_VALID 1
+#define SYM_FLAG_SAMPLED 2
+
struct sym_entry {
unsigned long long addr;
char type;
- char *sym;
+ unsigned char flags;
+ unsigned char len;
+ unsigned char *sym;
};
@@ -25,6 +70,26 @@ static int size, cnt;
static unsigned long long _stext, _etext, _sinittext, _einittext;
static int all_symbols = 0;
+struct token {
+ unsigned char data[MAX_TOK_SIZE];
+ unsigned char len;
+ /* profit: the number of bytes that could be saved by inserting this
+ * token into the table */
+ int profit;
+ struct token *next; /* next token on the hash list */
+ struct token *right; /* next token on the good/bad list */
+ struct token *left; /* previous token on the good/bad list */
+ struct token *smaller; /* token that is less one letter than this one */
+ };
+
+struct token bad_head, good_head;
+struct token *hash_table[HASH_TABLE_SIZE];
+
+/* the table that holds the result of the compression */
+unsigned char best_table[256][MAX_TOK_SIZE+1];
+unsigned char best_table_len[256];
+
+
static void
usage(void)
{
@@ -59,34 +124,53 @@ read_symbol(FILE *in, struct sym_entry *s)
else if (toupper(s->type) == 'A' || toupper(s->type) == 'U')
return -1;
- s->sym = strdup(str);
+ /* include the type field in the symbol name, so that it gets
+ * compressed together */
+ s->len = strlen(str) + 1;
+ s->sym = (char *) malloc(s->len + 1);
+ strcpy(s->sym + 1, str);
+ s->sym[0] = s->type;
+
return 0;
}
static int
symbol_valid(struct sym_entry *s)
{
+ /* Symbols which vary between passes. Passes 1 and 2 must have
+ * identical symbol lists. The kallsyms_* symbols below are only added
+ * after pass 1, they would be included in pass 2 when --all-symbols is
+ * specified so exclude them to get a stable symbol list.
+ */
+ static char *special_symbols[] = {
+ "kallsyms_addresses",
+ "kallsyms_num_syms",
+ "kallsyms_names",
+ "kallsyms_markers",
+ "kallsyms_token_table",
+ "kallsyms_token_index",
+
+ /* Exclude linker generated symbols which vary between passes */
+ "_SDA_BASE_", /* ppc */
+ "_SDA2_BASE_", /* ppc */
+ NULL };
+ int i;
+
+ /* if --all-symbols is not specified, then symbols outside the text
+ * and inittext sections are discarded */
if (!all_symbols) {
if ((s->addr < _stext || s->addr > _etext)
&& (s->addr < _sinittext || s->addr > _einittext))
return 0;
}
- /* Exclude symbols which vary between passes. Passes 1 and 2 must have
- * identical symbol lists. The kallsyms_* symbols below are only added
- * after pass 1, they would be included in pass 2 when --all-symbols is
- * specified so exclude them to get a stable symbol list.
- */
- if (strstr(s->sym, "_compiled.") ||
- strcmp(s->sym, "kallsyms_addresses") == 0 ||
- strcmp(s->sym, "kallsyms_num_syms") == 0 ||
- strcmp(s->sym, "kallsyms_names") == 0)
+ /* Exclude symbols which vary between passes. */
+ if (strstr(s->sym + 1, "_compiled."))
return 0;
- /* Exclude linker generated symbols which vary between passes */
- if (strcmp(s->sym, "_SDA_BASE_") == 0 || /* ppc */
- strcmp(s->sym, "_SDA2_BASE_") == 0) /* ppc */
- return 0;
+ for (i = 0; special_symbols[i]; i++)
+ if( strcmp(s->sym + 1, special_symbols[i]) == 0 )
+ return 0;
return 1;
}
@@ -108,11 +192,47 @@ read_map(FILE *in)
}
}
+static void output_label(char *label)
+{
+ printf(".globl %s\n",label);
+ printf("\tALGN\n");
+ printf("%s:\n",label);
+}
+
+/* uncompress a compressed symbol. When this function is called, the best table
+ * might still be compressed itself, so the function needs to be recursive */
+static int expand_symbol(unsigned char *data, int len, char *result)
+{
+ int c, rlen, total=0;
+
+ while (len) {
+ c = *data;
+ /* if the table holds a single char that is the same as the one
+ * we are looking for, then end the search */
+ if (best_table[c][0]==c && best_table_len[c]==1) {
+ *result++ = c;
+ total++;
+ } else {
+ /* if not, recurse and expand */
+ rlen = expand_symbol(best_table[c], best_table_len[c], result);
+ total += rlen;
+ result += rlen;
+ }
+ data++;
+ len--;
+ }
+ *result=0;
+
+ return total;
+}
+
static void
write_src(void)
{
- int i, valid = 0;
- char *prev;
+ int i, k, off, valid;
+ unsigned int best_idx[256];
+ unsigned int *markers;
+ char buf[KSYM_NAME_LEN+1];
printf("#include <asm/types.h>\n");
printf("#if BITS_PER_LONG == 64\n");
@@ -125,43 +245,399 @@ write_src(void)
printf(".data\n");
- printf(".globl kallsyms_addresses\n");
- printf("\tALGN\n");
- printf("kallsyms_addresses:\n");
+ output_label("kallsyms_addresses");
+ valid = 0;
for (i = 0; i < cnt; i++) {
- if (!symbol_valid(&table[i]))
- continue;
-
- printf("\tPTR\t%#llx\n", table[i].addr);
- valid++;
+ if (table[i].flags & SYM_FLAG_VALID) {
+ printf("\tPTR\t%#llx\n", table[i].addr);
+ valid++;
+ }
}
printf("\n");
- printf(".globl kallsyms_num_syms\n");
- printf("\tALGN\n");
- printf("kallsyms_num_syms:\n");
+ output_label("kallsyms_num_syms");
printf("\tPTR\t%d\n", valid);
printf("\n");
- printf(".globl kallsyms_names\n");
- printf("\tALGN\n");
- printf("kallsyms_names:\n");
- prev = "";
+ /* table of offset markers, that give the offset in the compressed stream
+ * every 256 symbols */
+ markers = (unsigned int *) malloc(sizeof(unsigned int)*((valid + 255) / 256));
+
+ output_label("kallsyms_names");
+ valid = 0;
+ off = 0;
for (i = 0; i < cnt; i++) {
- int k;
- if (!symbol_valid(&table[i]))
+ if (!table[i].flags & SYM_FLAG_VALID)
continue;
- for (k = 0; table[i].sym[k] && table[i].sym[k] == prev[k]; ++k)
- ;
+ if ((valid & 0xFF) == 0)
+ markers[valid >> 8] = off;
- printf("\t.byte 0x%02x\n\t.asciz\t\"%s\"\n", k, table[i].sym + k);
- prev = table[i].sym;
+ printf("\t.byte 0x%02x", table[i].len);
+ for (k = 0; k < table[i].len; k++)
+ printf(", 0x%02x", table[i].sym[k]);
+ printf("\n");
+
+ off += table[i].len + 1;
+ valid++;
+ }
+ printf("\n");
+
+ output_label("kallsyms_markers");
+ for (i = 0; i < ((valid + 255) >> 8); i++)
+ printf("\tPTR\t%d\n", markers[i]);
+ printf("\n");
+
+ free(markers);
+
+ output_label("kallsyms_token_table");
+ off = 0;
+ for (i = 0; i < 256; i++) {
+ best_idx[i] = off;
+ expand_symbol(best_table[i],best_table_len[i],buf);
+ printf("\t.asciz\t\"%s\"\n", buf);
+ off += strlen(buf) + 1;
}
printf("\n");
+
+ output_label("kallsyms_token_index");
+ for (i = 0; i < 256; i++)
+ printf("\t.short\t%d\n", best_idx[i]);
+ printf("\n");
+}
+
+
+/* table lookup compression functions */
+
+static inline unsigned int rehash_token(unsigned int hash, unsigned char data)
+{
+ return ((hash * 16777619) ^ data);
+}
+
+static unsigned int hash_token(unsigned char *data, int len)
+{
+ unsigned int hash=HASH_BASE_OFFSET;
+ int i;
+
+ for (i = 0; i < len; i++)
+ hash = rehash_token(hash, data[i]);
+
+ return HASH_FOLD(hash);
+}
+
+/* find a token given its data and hash value */
+static struct token *find_token_hash(unsigned char *data, int len, unsigned int hash)
+{
+ struct token *ptr;
+
+ ptr = hash_table[hash];
+
+ while (ptr) {
+ if ((ptr->len == len) && (memcmp(ptr->data, data, len) == 0))
+ return ptr;
+ ptr=ptr->next;
+ }
+
+ return NULL;
+}
+
+static inline void insert_token_in_group(struct token *head, struct token *ptr)
+{
+ ptr->right = head->right;
+ ptr->right->left = ptr;
+ head->right = ptr;
+ ptr->left = head;
+}
+
+static inline void remove_token_from_group(struct token *ptr)
+{
+ ptr->left->right = ptr->right;
+ ptr->right->left = ptr->left;
+}
+
+
+/* build the counts for all the tokens that start with "data", and have lenghts
+ * from 2 to "len" */
+static void learn_token(unsigned char *data, int len)
+{
+ struct token *ptr,*last_ptr;
+ int i, newprofit;
+ unsigned int hash = HASH_BASE_OFFSET;
+ unsigned int hashes[MAX_TOK_SIZE + 1];
+
+ if (len > MAX_TOK_SIZE)
+ len = MAX_TOK_SIZE;
+
+ /* calculate and store the hash values for all the sub-tokens */
+ hash = rehash_token(hash, data[0]);
+ for (i = 2; i <= len; i++) {
+ hash = rehash_token(hash, data[i-1]);
+ hashes[i] = HASH_FOLD(hash);
+ }
+
+ last_ptr = NULL;
+ ptr = NULL;
+
+ for (i = len; i >= 2; i--) {
+ hash = hashes[i];
+
+ if (!ptr) ptr = find_token_hash(data, i, hash);
+
+ if (!ptr) {
+ /* create a new token entry */
+ ptr = (struct token *) malloc(sizeof(*ptr));
+
+ memcpy(ptr->data, data, i);
+ ptr->len = i;
+
+ /* when we create an entry, it's profit is 0 because
+ * we also take into account the size of the token on
+ * the compressed table. We then subtract GOOD_BAD_THRESHOLD
+ * so that the test to see if this token belongs to
+ * the good or bad list, is a comparison to zero */
+ ptr->profit = -GOOD_BAD_THRESHOLD;
+
+ ptr->next = hash_table[hash];
+ hash_table[hash] = ptr;
+
+ insert_token_in_group(&bad_head, ptr);
+
+ ptr->smaller = NULL;
+ } else {
+ newprofit = ptr->profit + (ptr->len - 1);
+ /* check to see if this token needs to be moved to a
+ * different list */
+ if((ptr->profit < 0) && (newprofit >= 0)) {
+ remove_token_from_group(ptr);
+ insert_token_in_group(&good_head,ptr);
+ }
+ ptr->profit = newprofit;
+ }
+
+ if (last_ptr) last_ptr->smaller = ptr;
+ last_ptr = ptr;
+
+ ptr = ptr->smaller;
+ }
+}
+
+/* decrease the counts for all the tokens that start with "data", and have lenghts
+ * from 2 to "len". This function is much simpler than learn_token because we have
+ * more guarantees (tho tokens exist, the ->smaller pointer is set, etc.)
+ * The two separate functions exist only because of compression performance */
+static void forget_token(unsigned char *data, int len)
+{
+ struct token *ptr;
+ int i, newprofit;
+ unsigned int hash=0;
+
+ if (len > MAX_TOK_SIZE) len = MAX_TOK_SIZE;
+
+ hash = hash_token(data, len);
+ ptr = find_token_hash(data, len, hash);
+
+ for (i = len; i >= 2; i--) {
+
+ newprofit = ptr->profit - (ptr->len - 1);
+ if ((ptr->profit >= 0) && (newprofit < 0)) {
+ remove_token_from_group(ptr);
+ insert_token_in_group(&bad_head, ptr);
+ }
+ ptr->profit=newprofit;
+
+ ptr=ptr->smaller;
+ }
}
+/* count all the possible tokens in a symbol */
+static void learn_symbol(unsigned char *symbol, int len)
+{
+ int i;
+
+ for (i = 0; i < len - 1; i++)
+ learn_token(symbol + i, len - i);
+}
+
+/* decrease the count for all the possible tokens in a symbol */
+static void forget_symbol(unsigned char *symbol, int len)
+{
+ int i;
+
+ for (i = 0; i < len - 1; i++)
+ forget_token(symbol + i, len - i);
+}
+
+/* set all the symbol flags and do the initial token count */
+static void build_initial_tok_table(void)
+{
+ int i, use_it, valid;
+
+ valid = 0;
+ for (i = 0; i < cnt; i++) {
+ table[i].flags = 0;
+ if ( symbol_valid(&table[i]) ) {
+ table[i].flags |= SYM_FLAG_VALID;
+ valid++;
+ }
+ }
+
+ use_it = 0;
+ for (i = 0; i < cnt; i++) {
+
+ /* subsample the available symbols. This method is almost like
+ * a Bresenham's algorithm to get uniformly distributed samples
+ * across the symbol table */
+ if (table[i].flags & SYM_FLAG_VALID) {
+
+ use_it += WORKING_SET;
+
+ if (use_it >= valid) {
+ table[i].flags |= SYM_FLAG_SAMPLED;
+ use_it -= valid;
+ }
+ }
+ if (table[i].flags & SYM_FLAG_SAMPLED)
+ learn_symbol(table[i].sym, table[i].len);
+ }
+}
+
+/* replace a given token in all the valid symbols. Use the sampled symbols
+ * to update the counts */
+static void compress_symbols(unsigned char *str, int tlen, int idx)
+{
+ int i, len, learn, size;
+ unsigned char *p;
+
+ for (i = 0; i < cnt; i++) {
+
+ if (!(table[i].flags & SYM_FLAG_VALID)) continue;
+
+ len = table[i].len;
+ learn = 0;
+ p = table[i].sym;
+
+ do {
+ /* find the token on the symbol */
+ p = (unsigned char *) strstr((char *) p, (char *) str);
+ if (!p) break;
+
+ if (!learn) {
+ /* if this symbol was used to count, decrease it */
+ if (table[i].flags & SYM_FLAG_SAMPLED)
+ forget_symbol(table[i].sym, len);
+ learn = 1;
+ }
+
+ *p = idx;
+ size = (len - (p - table[i].sym)) - tlen + 1;
+ memmove(p + 1, p + tlen, size);
+ p++;
+ len -= tlen - 1;
+
+ } while (size >= tlen);
+
+ if(learn) {
+ table[i].len = len;
+ /* if this symbol was used to count, learn it again */
+ if(table[i].flags & SYM_FLAG_SAMPLED)
+ learn_symbol(table[i].sym, len);
+ }
+ }
+}
+
+/* search the token with the maximum profit */
+static struct token *find_best_token(void)
+{
+ struct token *ptr,*best,*head;
+ int bestprofit;
+
+ bestprofit=-10000;
+
+ /* failsafe: if the "good" list is empty search from the "bad" list */
+ if(good_head.right == &good_head) head = &bad_head;
+ else head = &good_head;
+
+ ptr = head->right;
+ best = NULL;
+ while (ptr != head) {
+ if (ptr->profit > bestprofit) {
+ bestprofit = ptr->profit;
+ best = ptr;
+ }
+ ptr = ptr->right;
+ }
+
+ return best;
+}
+
+/* this is the core of the algorithm: calculate the "best" table */
+static void optimize_result(void)
+{
+ struct token *best;
+ int i;
+
+ /* using the '\0' symbol last allows compress_symbols to use standard
+ * fast string functions */
+ for (i = 255; i >= 0; i--) {
+
+ /* if this table slot is empty (it is not used by an actual
+ * original char code */
+ if (!best_table_len[i]) {
+
+ /* find the token with the breates profit value */
+ best = find_best_token();
+
+ /* place it in the "best" table */
+ best_table_len[i] = best->len;
+ memcpy(best_table[i], best->data, best_table_len[i]);
+ /* zero terminate the token so that we can use strstr
+ in compress_symbols */
+ best_table[i][best_table_len[i]]='\0';
+
+ /* replace this token in all the valid symbols */
+ compress_symbols(best_table[i], best_table_len[i], i);
+ }
+ }
+}
+
+/* start by placing the symbols that are actually used on the table */
+static void insert_real_symbols_in_table(void)
+{
+ int i, j, c;
+
+ memset(best_table, 0, sizeof(best_table));
+ memset(best_table_len, 0, sizeof(best_table_len));
+
+ for (i = 0; i < cnt; i++) {
+ if (table[i].flags & SYM_FLAG_VALID) {
+ for (j = 0; j < table[i].len; j++) {
+ c = table[i].sym[j];
+ best_table[c][0]=c;
+ best_table_len[c]=1;
+ }
+ }
+ }
+}
+
+static void optimize_token_table(void)
+{
+ memset(hash_table, 0, sizeof(hash_table));
+
+ good_head.left = &good_head;
+ good_head.right = &good_head;
+
+ bad_head.left = &bad_head;
+ bad_head.right = &bad_head;
+
+ build_initial_tok_table();
+
+ insert_real_symbols_in_table();
+
+ optimize_result();
+}
+
+
int
main(int argc, char **argv)
{
@@ -171,6 +647,7 @@ main(int argc, char **argv)
usage();
read_map(stdin);
+ optimize_token_table();
write_src();
return 0;
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index 86ea6a34d791..bf07479c8630 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -416,7 +416,8 @@ static int get_version(const char *modname, char sum[])
struct md4_ctx md;
char *sources, *end, *fname;
const char *basename;
- char filelist[sizeof(".tmp_versions/%s.mod") + strlen(modname)];
+ char filelist[strlen(getenv("MODVERDIR")) + strlen("/") +
+ strlen(modname) - strlen(".o") + strlen(".mod") + 1 ];
/* Source files for module are in .tmp_versions/modname.mod,
after the first line. */
@@ -424,9 +425,8 @@ static int get_version(const char *modname, char sum[])
basename = strrchr(modname, '/') + 1;
else
basename = modname;
- sprintf(filelist, ".tmp_versions/%s", basename);
- /* Truncate .o, add .mod */
- strcpy(filelist + strlen(filelist)-2, ".mod");
+ sprintf(filelist, "%s/%.*s.mod", getenv("MODVERDIR"),
+ (int) strlen(basename) - 2, basename);
file = grab_file(filelist, &len);
if (!file) {
diff --git a/security/Kconfig b/security/Kconfig
index ddde53ba6234..8a35e4d52c8b 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -4,6 +4,35 @@
menu "Security options"
+config KEYS
+ bool "Enable access key retention support"
+ help
+ This option provides support for retaining authentication tokens and
+ access keys in the kernel.
+
+ It also includes provision of methods by which such keys might be
+ associated with a process so that network filesystems, encryption
+ support and the like can find them.
+
+ Furthermore, a special type of key is available that acts as keyring:
+ a searchable sequence of keys. Each process is equipped with access
+ to five standard keyrings: UID-specific, GID-specific, session,
+ process and thread.
+
+ If you are unsure as to whether this is required, answer N.
+
+config KEYS_DEBUG_PROC_KEYS
+ bool "Enable the /proc/keys file by which all keys may be viewed"
+ depends on KEYS
+ help
+ This option turns on support for the /proc/keys file through which
+ all the keys on the system can be listed.
+
+ This option is a slight security risk in that it makes it possible
+ for anyone to see all the keys on the system. Normally the manager
+ pretends keys that are inaccessible to a process don't exist as far
+ as that process is concerned.
+
config SECURITY
bool "Enable different security models"
help
diff --git a/security/Makefile b/security/Makefile
index 3686a1bb324a..473861ea657a 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -2,6 +2,7 @@
# Makefile for the kernel security code
#
+obj-$(CONFIG_KEYS) += keys/
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
# if we don't select a security model, use the default capabilities
diff --git a/security/dummy.c b/security/dummy.c
index a9e69659533e..0ce9f22d6c8c 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -447,17 +447,17 @@ static int dummy_inode_removexattr (struct dentry *dentry, char *name)
return 0;
}
-static int dummy_inode_getsecurity(struct dentry *dentry, const char *name, void *buffer, size_t size)
+static int dummy_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size)
{
return -EOPNOTSUPP;
}
-static int dummy_inode_setsecurity(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+static int dummy_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
return -EOPNOTSUPP;
}
-static int dummy_inode_listsecurity(struct dentry *dentry, char *buffer)
+static int dummy_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
return 0;
}
diff --git a/security/keys/Makefile b/security/keys/Makefile
new file mode 100644
index 000000000000..bd6500dbab0e
--- /dev/null
+++ b/security/keys/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for key management
+#
+
+obj-y := \
+ key.o \
+ keyring.o \
+ keyctl.o \
+ process_keys.o \
+ user_defined.o \
+ request_key.o
+
+obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/security/keys/internal.h b/security/keys/internal.h
new file mode 100644
index 000000000000..e68e0c7ee29e
--- /dev/null
+++ b/security/keys/internal.h
@@ -0,0 +1,109 @@
+/* internal.h: authentication token and access key management internal defs
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _INTERNAL_H
+#define _INTERNAL_H
+
+#include <linux/key.h>
+#include <linux/key-ui.h>
+
+extern struct key_type key_type_dead;
+extern struct key_type key_type_user;
+
+/*****************************************************************************/
+/*
+ * keep track of keys for a user
+ * - this needs to be separate to user_struct to avoid a refcount-loop
+ * (user_struct pins some keyrings which pin this struct)
+ * - this also keeps track of keys under request from userspace for this UID
+ */
+struct key_user {
+ struct rb_node node;
+ struct list_head consq; /* construction queue */
+ spinlock_t lock;
+ atomic_t usage; /* for accessing qnkeys & qnbytes */
+ atomic_t nkeys; /* number of keys */
+ atomic_t nikeys; /* number of instantiated keys */
+ uid_t uid;
+ int qnkeys; /* number of keys allocated to this user */
+ int qnbytes; /* number of bytes allocated to this user */
+};
+
+#define KEYQUOTA_MAX_KEYS 100
+#define KEYQUOTA_MAX_BYTES 10000
+#define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */
+
+extern struct rb_root key_user_tree;
+extern spinlock_t key_user_lock;
+extern struct key_user root_key_user;
+
+extern struct key_user *key_user_lookup(uid_t uid);
+extern void key_user_put(struct key_user *user);
+
+
+
+extern struct rb_root key_serial_tree;
+extern spinlock_t key_serial_lock;
+extern struct semaphore key_alloc_sem;
+extern struct rw_semaphore key_construction_sem;
+extern wait_queue_head_t request_key_conswq;
+
+
+extern void keyring_publish_name(struct key *keyring);
+
+extern int __key_link(struct key *keyring, struct key *key);
+
+extern struct key *__keyring_search_one(struct key *keyring,
+ const struct key_type *type,
+ const char *description,
+ key_perm_t perm);
+
+typedef int (*key_match_func_t)(const struct key *, const void *);
+
+extern struct key *keyring_search_aux(struct key *keyring,
+ struct key_type *type,
+ const void *description,
+ key_match_func_t match);
+
+extern struct key *search_process_keyrings_aux(struct key_type *type,
+ const void *description,
+ key_match_func_t match);
+
+extern struct key *find_keyring_by_name(const char *name, key_serial_t bound);
+
+extern int install_thread_keyring(struct task_struct *tsk);
+
+
+/*
+ * debugging key validation
+ */
+#ifdef KEY_DEBUGGING
+static void __key_check(const struct key *key)
+{
+ printk("__key_check: key %p {%08x} should be {%08x}\n",
+ key, key->magic, KEY_DEBUG_MAGIC);
+ BUG();
+}
+
+
+static inline void key_check(const struct key *key)
+{
+ if (key && (IS_ERR(key) || key->magic != KEY_DEBUG_MAGIC))
+ __key_check(key);
+}
+
+#else
+
+#define key_check(key) do {} while(0)
+
+#endif
+
+#endif /* _INTERNAL_H */
diff --git a/security/keys/key.c b/security/keys/key.c
new file mode 100644
index 000000000000..da9fc0aea739
--- /dev/null
+++ b/security/keys/key.c
@@ -0,0 +1,1039 @@
+/* key.c: basic authentication token and access key management
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/err.h>
+#include "internal.h"
+
+static kmem_cache_t *key_jar;
+static key_serial_t key_serial_next = 3;
+struct rb_root key_serial_tree; /* tree of keys indexed by serial */
+spinlock_t key_serial_lock = SPIN_LOCK_UNLOCKED;
+
+struct rb_root key_user_tree; /* tree of quota records indexed by UID */
+spinlock_t key_user_lock = SPIN_LOCK_UNLOCKED;
+
+static LIST_HEAD(key_types_list);
+static DECLARE_RWSEM(key_types_sem);
+
+static void key_cleanup(void *data);
+static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
+
+/* we serialise key instantiation and link */
+DECLARE_RWSEM(key_construction_sem);
+
+/* any key who's type gets unegistered will be re-typed to this */
+struct key_type key_type_dead = {
+ .name = "dead",
+};
+
+/*****************************************************************************/
+/*
+ * get the key quota record for a user, allocating a new record if one doesn't
+ * already exist
+ */
+struct key_user *key_user_lookup(uid_t uid)
+{
+ struct key_user *candidate = NULL, *user;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &key_user_tree.rb_node;
+
+ try_again:
+ spin_lock(&key_user_lock);
+
+ /* search the tree for a user record with a matching UID */
+ while (*p) {
+ parent = *p;
+ user = rb_entry(parent, struct key_user, node);
+
+ if (uid < user->uid)
+ p = &(*p)->rb_left;
+ else if (uid > user->uid)
+ p = &(*p)->rb_right;
+ else
+ goto found;
+ }
+
+ /* if we get here, we failed to find a match in the tree */
+ if (!candidate) {
+ /* allocate a candidate user record if we don't already have
+ * one */
+ spin_unlock(&key_user_lock);
+
+ user = NULL;
+ candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
+ if (unlikely(!candidate))
+ goto out;
+
+ /* the allocation may have scheduled, so we need to repeat the
+ * search lest someone else added the record whilst we were
+ * asleep */
+ goto try_again;
+ }
+
+ /* if we get here, then the user record still hadn't appeared on the
+ * second pass - so we use the candidate record */
+ atomic_set(&candidate->usage, 1);
+ atomic_set(&candidate->nkeys, 0);
+ atomic_set(&candidate->nikeys, 0);
+ candidate->uid = uid;
+ candidate->qnkeys = 0;
+ candidate->qnbytes = 0;
+ spin_lock_init(&candidate->lock);
+ INIT_LIST_HEAD(&candidate->consq);
+
+ rb_link_node(&candidate->node, parent, p);
+ rb_insert_color(&candidate->node, &key_user_tree);
+ spin_unlock(&key_user_lock);
+ user = candidate;
+ goto out;
+
+ /* okay - we found a user record for this UID */
+ found:
+ atomic_inc(&user->usage);
+ spin_unlock(&key_user_lock);
+ if (candidate)
+ kfree(candidate);
+ out:
+ return user;
+
+} /* end key_user_lookup() */
+
+/*****************************************************************************/
+/*
+ * dispose of a user structure
+ */
+void key_user_put(struct key_user *user)
+{
+ if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
+ rb_erase(&user->node, &key_user_tree);
+ spin_unlock(&key_user_lock);
+
+ kfree(user);
+ }
+
+} /* end key_user_put() */
+
+/*****************************************************************************/
+/*
+ * insert a key with a fixed serial number
+ */
+static void __init __key_insert_serial(struct key *key)
+{
+ struct rb_node *parent, **p;
+ struct key *xkey;
+
+ parent = NULL;
+ p = &key_serial_tree.rb_node;
+
+ while (*p) {
+ parent = *p;
+ xkey = rb_entry(parent, struct key, serial_node);
+
+ if (key->serial < xkey->serial)
+ p = &(*p)->rb_left;
+ else if (key->serial > xkey->serial)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+
+ /* we've found a suitable hole - arrange for this key to occupy it */
+ rb_link_node(&key->serial_node, parent, p);
+ rb_insert_color(&key->serial_node, &key_serial_tree);
+
+} /* end __key_insert_serial() */
+
+/*****************************************************************************/
+/*
+ * assign a key the next unique serial number
+ * - we work through all the serial numbers between 2 and 2^31-1 in turn and
+ * then wrap
+ */
+static inline void key_alloc_serial(struct key *key)
+{
+ struct rb_node *parent, **p;
+ struct key *xkey;
+
+ spin_lock(&key_serial_lock);
+
+ /* propose a likely serial number and look for a hole for it in the
+ * serial number tree */
+ key->serial = key_serial_next;
+ if (key->serial < 3)
+ key->serial = 3;
+ key_serial_next = key->serial + 1;
+
+ parent = NULL;
+ p = &key_serial_tree.rb_node;
+
+ while (*p) {
+ parent = *p;
+ xkey = rb_entry(parent, struct key, serial_node);
+
+ if (key->serial < xkey->serial)
+ p = &(*p)->rb_left;
+ else if (key->serial > xkey->serial)
+ p = &(*p)->rb_right;
+ else
+ goto serial_exists;
+ }
+ goto insert_here;
+
+ /* we found a key with the proposed serial number - walk the tree from
+ * that point looking for the next unused serial number */
+ serial_exists:
+ for (;;) {
+ key->serial = key_serial_next;
+ if (key->serial < 2)
+ key->serial = 2;
+ key_serial_next = key->serial + 1;
+
+ if (!parent->rb_parent)
+ p = &key_serial_tree.rb_node;
+ else if (parent->rb_parent->rb_left == parent)
+ p = &parent->rb_parent->rb_left;
+ else
+ p = &parent->rb_parent->rb_right;
+
+ parent = rb_next(parent);
+ if (!parent)
+ break;
+
+ xkey = rb_entry(parent, struct key, serial_node);
+ if (key->serial < xkey->serial)
+ goto insert_here;
+ }
+
+ /* we've found a suitable hole - arrange for this key to occupy it */
+ insert_here:
+ rb_link_node(&key->serial_node, parent, p);
+ rb_insert_color(&key->serial_node, &key_serial_tree);
+
+ spin_unlock(&key_serial_lock);
+
+} /* end key_alloc_serial() */
+
+/*****************************************************************************/
+/*
+ * allocate a key of the specified type
+ * - update the user's quota to reflect the existence of the key
+ * - called from a key-type operation with key_types_sem read-locked by either
+ * key_create_or_update() or by key_duplicate(); this prevents unregistration
+ * of the key type
+ * - upon return the key is as yet uninstantiated; the caller needs to either
+ * instantiate the key or discard it before returning
+ */
+struct key *key_alloc(struct key_type *type, const char *desc,
+ uid_t uid, gid_t gid, key_perm_t perm,
+ int not_in_quota)
+{
+ struct key_user *user = NULL;
+ struct key *key;
+ size_t desclen, quotalen;
+
+ key = ERR_PTR(-EINVAL);
+ if (!desc || !*desc)
+ goto error;
+
+ desclen = strlen(desc) + 1;
+ quotalen = desclen + type->def_datalen;
+
+ /* get hold of the key tracking for this user */
+ user = key_user_lookup(uid);
+ if (!user)
+ goto no_memory_1;
+
+ /* check that the user's quota permits allocation of another key and
+ * its description */
+ if (!not_in_quota) {
+ spin_lock(&user->lock);
+ if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS &&
+ user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
+ )
+ goto no_quota;
+
+ user->qnkeys++;
+ user->qnbytes += quotalen;
+ spin_unlock(&user->lock);
+ }
+
+ /* allocate and initialise the key and its description */
+ key = kmem_cache_alloc(key_jar, SLAB_KERNEL);
+ if (!key)
+ goto no_memory_2;
+
+ if (desc) {
+ key->description = kmalloc(desclen, GFP_KERNEL);
+ if (!key->description)
+ goto no_memory_3;
+
+ memcpy(key->description, desc, desclen);
+ }
+
+ atomic_set(&key->usage, 1);
+ rwlock_init(&key->lock);
+ init_rwsem(&key->sem);
+ key->type = type;
+ key->user = user;
+ key->quotalen = quotalen;
+ key->datalen = type->def_datalen;
+ key->uid = uid;
+ key->gid = gid;
+ key->perm = perm;
+ key->flags = 0;
+ key->expiry = 0;
+ key->payload.data = NULL;
+
+ if (!not_in_quota)
+ key->flags |= KEY_FLAG_IN_QUOTA;
+
+ memset(&key->type_data, 0, sizeof(key->type_data));
+
+#ifdef KEY_DEBUGGING
+ key->magic = KEY_DEBUG_MAGIC;
+#endif
+
+ /* publish the key by giving it a serial number */
+ atomic_inc(&user->nkeys);
+ key_alloc_serial(key);
+
+ error:
+ return key;
+
+ no_memory_3:
+ kmem_cache_free(key_jar, key);
+ no_memory_2:
+ if (!not_in_quota) {
+ spin_lock(&user->lock);
+ user->qnkeys--;
+ user->qnbytes -= quotalen;
+ spin_unlock(&user->lock);
+ }
+ key_user_put(user);
+ no_memory_1:
+ key = ERR_PTR(-ENOMEM);
+ goto error;
+
+ no_quota:
+ spin_unlock(&user->lock);
+ key_user_put(user);
+ key = ERR_PTR(-EDQUOT);
+ goto error;
+
+} /* end key_alloc() */
+
+EXPORT_SYMBOL(key_alloc);
+
+/*****************************************************************************/
+/*
+ * reserve an amount of quota for the key's payload
+ */
+int key_payload_reserve(struct key *key, size_t datalen)
+{
+ int delta = (int) datalen - key->datalen;
+ int ret = 0;
+
+ key_check(key);
+
+ /* contemplate the quota adjustment */
+ if (delta != 0 && key->flags & KEY_FLAG_IN_QUOTA) {
+ spin_lock(&key->user->lock);
+
+ if (delta > 0 &&
+ key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
+ ) {
+ ret = -EDQUOT;
+ }
+ else {
+ key->user->qnbytes += delta;
+ key->quotalen += delta;
+ }
+ spin_unlock(&key->user->lock);
+ }
+
+ /* change the recorded data length if that didn't generate an error */
+ if (ret == 0)
+ key->datalen = datalen;
+
+ return ret;
+
+} /* end key_payload_reserve() */
+
+EXPORT_SYMBOL(key_payload_reserve);
+
+/*****************************************************************************/
+/*
+ * instantiate a key and link it into the target keyring atomically
+ * - called with the target keyring's semaphore writelocked
+ */
+static int __key_instantiate_and_link(struct key *key,
+ const void *data,
+ size_t datalen,
+ struct key *keyring)
+{
+ int ret, awaken;
+
+ key_check(key);
+ key_check(keyring);
+
+ awaken = 0;
+ ret = -EBUSY;
+
+ down_write(&key_construction_sem);
+
+ /* can't instantiate twice */
+ if (!(key->flags & KEY_FLAG_INSTANTIATED)) {
+ /* instantiate the key */
+ ret = key->type->instantiate(key, data, datalen);
+
+ if (ret == 0) {
+ /* mark the key as being instantiated */
+ write_lock(&key->lock);
+
+ atomic_inc(&key->user->nikeys);
+ key->flags |= KEY_FLAG_INSTANTIATED;
+
+ if (key->flags & KEY_FLAG_USER_CONSTRUCT) {
+ key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
+ awaken = 1;
+ }
+
+ write_unlock(&key->lock);
+
+ /* and link it into the destination keyring */
+ if (keyring)
+ ret = __key_link(keyring, key);
+ }
+ }
+
+ up_write(&key_construction_sem);
+
+ /* wake up anyone waiting for a key to be constructed */
+ if (awaken)
+ wake_up_all(&request_key_conswq);
+
+ return ret;
+
+} /* end __key_instantiate_and_link() */
+
+/*****************************************************************************/
+/*
+ * instantiate a key and link it into the target keyring atomically
+ */
+int key_instantiate_and_link(struct key *key,
+ const void *data,
+ size_t datalen,
+ struct key *keyring)
+{
+ int ret;
+
+ if (keyring)
+ down_write(&keyring->sem);
+
+ ret = __key_instantiate_and_link(key, data, datalen, keyring);
+
+ if (keyring)
+ up_write(&keyring->sem);
+
+ return ret;
+} /* end key_instantiate_and_link() */
+
+EXPORT_SYMBOL(key_instantiate_and_link);
+
+/*****************************************************************************/
+/*
+ * negatively instantiate a key and link it into the target keyring atomically
+ */
+int key_negate_and_link(struct key *key,
+ unsigned timeout,
+ struct key *keyring)
+{
+ struct timespec now;
+ int ret, awaken;
+
+ key_check(key);
+ key_check(keyring);
+
+ awaken = 0;
+ ret = -EBUSY;
+
+ if (keyring)
+ down_write(&keyring->sem);
+
+ down_write(&key_construction_sem);
+
+ /* can't instantiate twice */
+ if (!(key->flags & KEY_FLAG_INSTANTIATED)) {
+ /* mark the key as being negatively instantiated */
+ write_lock(&key->lock);
+
+ atomic_inc(&key->user->nikeys);
+ key->flags |= KEY_FLAG_INSTANTIATED | KEY_FLAG_NEGATIVE;
+ now = current_kernel_time();
+ key->expiry = now.tv_sec + timeout;
+
+ if (key->flags & KEY_FLAG_USER_CONSTRUCT) {
+ key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
+ awaken = 1;
+ }
+
+ write_unlock(&key->lock);
+ ret = 0;
+
+ /* and link it into the destination keyring */
+ if (keyring)
+ ret = __key_link(keyring, key);
+ }
+
+ up_write(&key_construction_sem);
+
+ if (keyring)
+ up_write(&keyring->sem);
+
+ /* wake up anyone waiting for a key to be constructed */
+ if (awaken)
+ wake_up_all(&request_key_conswq);
+
+ return ret;
+
+} /* end key_negate_and_link() */
+
+EXPORT_SYMBOL(key_negate_and_link);
+
+/*****************************************************************************/
+/*
+ * do cleaning up in process context so that we don't have to disable
+ * interrupts all over the place
+ */
+static void key_cleanup(void *data)
+{
+ struct rb_node *_n;
+ struct key *key;
+
+ go_again:
+ /* look for a dead key in the tree */
+ spin_lock(&key_serial_lock);
+
+ for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
+ key = rb_entry(_n, struct key, serial_node);
+
+ if (atomic_read(&key->usage) == 0)
+ goto found_dead_key;
+ }
+
+ spin_unlock(&key_serial_lock);
+ return;
+
+ found_dead_key:
+ /* we found a dead key - once we've removed it from the tree, we can
+ * drop the lock */
+ rb_erase(&key->serial_node, &key_serial_tree);
+ spin_unlock(&key_serial_lock);
+
+ /* deal with the user's key tracking and quota */
+ if (key->flags & KEY_FLAG_IN_QUOTA) {
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
+ }
+
+ atomic_dec(&key->user->nkeys);
+ if (key->flags & KEY_FLAG_INSTANTIATED)
+ atomic_dec(&key->user->nikeys);
+
+ key_user_put(key->user);
+
+ /* now throw away the key memory */
+ if (key->type->destroy)
+ key->type->destroy(key);
+
+ kfree(key->description);
+
+#ifdef KEY_DEBUGGING
+ key->magic = KEY_DEBUG_MAGIC_X;
+#endif
+ kmem_cache_free(key_jar, key);
+
+ /* there may, of course, be more than one key to destroy */
+ goto go_again;
+
+} /* end key_cleanup() */
+
+/*****************************************************************************/
+/*
+ * dispose of a reference to a key
+ * - when all the references are gone, we schedule the cleanup task to come and
+ * pull it out of the tree in definite process context
+ */
+void key_put(struct key *key)
+{
+ if (key) {
+ key_check(key);
+
+ if (atomic_dec_and_test(&key->usage))
+ schedule_work(&key_cleanup_task);
+ }
+
+} /* end key_put() */
+
+EXPORT_SYMBOL(key_put);
+
+/*****************************************************************************/
+/*
+ * find a key by its serial number
+ */
+struct key *key_lookup(key_serial_t id)
+{
+ struct rb_node *n;
+ struct key *key;
+
+ spin_lock(&key_serial_lock);
+
+ /* search the tree for the specified key */
+ n = key_serial_tree.rb_node;
+ while (n) {
+ key = rb_entry(n, struct key, serial_node);
+
+ if (id < key->serial)
+ n = n->rb_left;
+ else if (id > key->serial)
+ n = n->rb_right;
+ else
+ goto found;
+ }
+
+ spin_unlock(&key_serial_lock);
+
+ not_found:
+ key = ERR_PTR(-ENOKEY);
+ goto error;
+
+ found:
+ /* pretent doesn't exist if it's dead */
+ if (atomic_read(&key->usage) == 0 ||
+ (key->flags & KEY_FLAG_DEAD) ||
+ key->type == &key_type_dead)
+ goto not_found;
+
+ /* this races with key_put(), but that doesn't matter since key_put()
+ * doesn't actually change the key
+ */
+ atomic_inc(&key->usage);
+
+ spin_unlock(&key_serial_lock);
+ error:
+ return key;
+
+} /* end key_lookup() */
+
+/*****************************************************************************/
+/*
+ * find and lock the specified key type against removal
+ * - we return with the sem readlocked
+ */
+struct key_type *key_type_lookup(const char *type)
+{
+ struct key_type *ktype;
+
+ down_read(&key_types_sem);
+
+ /* look up the key type to see if it's one of the registered kernel
+ * types */
+ list_for_each_entry(ktype, &key_types_list, link) {
+ if (strcmp(ktype->name, type) == 0)
+ goto found_kernel_type;
+ }
+
+ up_read(&key_types_sem);
+ ktype = ERR_PTR(-ENOKEY);
+
+ found_kernel_type:
+ return ktype;
+
+} /* end key_type_lookup() */
+
+/*****************************************************************************/
+/*
+ * unlock a key type
+ */
+void key_type_put(struct key_type *ktype)
+{
+ up_read(&key_types_sem);
+
+} /* end key_type_put() */
+
+/*****************************************************************************/
+/*
+ * attempt to update an existing key
+ * - the key has an incremented refcount
+ * - we need to put the key if we get an error
+ */
+static inline struct key *__key_update(struct key *key, const void *payload,
+ size_t plen)
+{
+ int ret;
+
+ /* need write permission on the key to update it */
+ ret = -EACCES;
+ if (!key_permission(key, KEY_WRITE))
+ goto error;
+
+ ret = -EEXIST;
+ if (!key->type->update)
+ goto error;
+
+ down_write(&key->sem);
+
+ ret = key->type->update(key, payload, plen);
+
+ if (ret == 0) {
+ /* updating a negative key instantiates it */
+ write_lock(&key->lock);
+ key->flags &= ~KEY_FLAG_NEGATIVE;
+ write_unlock(&key->lock);
+ }
+
+ up_write(&key->sem);
+
+ if (ret < 0)
+ goto error;
+ out:
+ return key;
+
+ error:
+ key_put(key);
+ key = ERR_PTR(ret);
+ goto out;
+
+} /* end __key_update() */
+
+/*****************************************************************************/
+/*
+ * search the specified keyring for a key of the same description; if one is
+ * found, update it, otherwise add a new one
+ */
+struct key *key_create_or_update(struct key *keyring,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ int not_in_quota)
+{
+ struct key_type *ktype;
+ struct key *key = NULL;
+ key_perm_t perm;
+ int ret;
+
+ key_check(keyring);
+
+ /* look up the key type to see if it's one of the registered kernel
+ * types */
+ ktype = key_type_lookup(type);
+ if (IS_ERR(ktype)) {
+ key = ERR_PTR(-ENODEV);
+ goto error;
+ }
+
+ ret = -EINVAL;
+ if (!ktype->match || !ktype->instantiate)
+ goto error_2;
+
+ /* search for an existing key of the same type and description in the
+ * destination keyring
+ */
+ down_write(&keyring->sem);
+
+ key = __keyring_search_one(keyring, ktype, description, 0);
+ if (!IS_ERR(key))
+ goto found_matching_key;
+
+ /* if we're going to allocate a new key, we're going to have to modify
+ * the keyring */
+ ret = -EACCES;
+ if (!key_permission(keyring, KEY_WRITE))
+ goto error_3;
+
+ /* decide on the permissions we want */
+ perm = KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK;
+
+ if (ktype->read)
+ perm |= KEY_USR_READ;
+
+ if (ktype == &key_type_keyring || ktype->update)
+ perm |= KEY_USR_WRITE;
+
+ /* allocate a new key */
+ key = key_alloc(ktype, description, current->fsuid, current->fsgid,
+ perm, not_in_quota);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error_3;
+ }
+
+ /* instantiate it and link it into the target keyring */
+ ret = __key_instantiate_and_link(key, payload, plen, keyring);
+ if (ret < 0) {
+ key_put(key);
+ key = ERR_PTR(ret);
+ }
+
+ error_3:
+ up_write(&keyring->sem);
+ error_2:
+ key_type_put(ktype);
+ error:
+ return key;
+
+ found_matching_key:
+ /* we found a matching key, so we're going to try to update it
+ * - we can drop the locks first as we have the key pinned
+ */
+ up_write(&keyring->sem);
+ key_type_put(ktype);
+
+ key = __key_update(key, payload, plen);
+ goto error;
+
+} /* end key_create_or_update() */
+
+EXPORT_SYMBOL(key_create_or_update);
+
+/*****************************************************************************/
+/*
+ * update a key
+ */
+int key_update(struct key *key, const void *payload, size_t plen)
+{
+ int ret;
+
+ key_check(key);
+
+ /* the key must be writable */
+ ret = -EACCES;
+ if (!key_permission(key, KEY_WRITE))
+ goto error;
+
+ /* attempt to update it if supported */
+ ret = -EOPNOTSUPP;
+ if (key->type->update) {
+ down_write(&key->sem);
+ ret = key->type->update(key, payload, plen);
+
+ if (ret == 0) {
+ /* updating a negative key instantiates it */
+ write_lock(&key->lock);
+ key->flags &= ~KEY_FLAG_NEGATIVE;
+ write_unlock(&key->lock);
+ }
+
+ up_write(&key->sem);
+ }
+
+ error:
+ return ret;
+
+} /* end key_update() */
+
+EXPORT_SYMBOL(key_update);
+
+/*****************************************************************************/
+/*
+ * duplicate a key, potentially with a revised description
+ * - must be supported by the keytype (keyrings for instance can be duplicated)
+ */
+struct key *key_duplicate(struct key *source, const char *desc)
+{
+ struct key *key;
+ int ret;
+
+ key_check(source);
+
+ if (!desc)
+ desc = source->description;
+
+ down_read(&key_types_sem);
+
+ ret = -EINVAL;
+ if (!source->type->duplicate)
+ goto error;
+
+ /* allocate and instantiate a key */
+ key = key_alloc(source->type, desc, current->fsuid, current->fsgid,
+ source->perm, 0);
+ if (IS_ERR(key))
+ goto error_k;
+
+ down_read(&source->sem);
+ ret = key->type->duplicate(key, source);
+ up_read(&source->sem);
+ if (ret < 0)
+ goto error2;
+
+ atomic_inc(&key->user->nikeys);
+
+ write_lock(&key->lock);
+ key->flags |= KEY_FLAG_INSTANTIATED;
+ write_unlock(&key->lock);
+
+ error_k:
+ up_read(&key_types_sem);
+ out:
+ return key;
+
+ error2:
+ key_put(key);
+ error:
+ up_read(&key_types_sem);
+ key = ERR_PTR(ret);
+ goto out;
+
+} /* end key_duplicate() */
+
+/*****************************************************************************/
+/*
+ * revoke a key
+ */
+void key_revoke(struct key *key)
+{
+ key_check(key);
+
+ /* make sure no one's trying to change or use the key when we mark
+ * it */
+ down_write(&key->sem);
+ write_lock(&key->lock);
+ key->flags |= KEY_FLAG_REVOKED;
+ write_unlock(&key->lock);
+ up_write(&key->sem);
+
+} /* end key_revoke() */
+
+EXPORT_SYMBOL(key_revoke);
+
+/*****************************************************************************/
+/*
+ * register a type of key
+ */
+int register_key_type(struct key_type *ktype)
+{
+ struct key_type *p;
+ int ret;
+
+ ret = -EEXIST;
+ down_write(&key_types_sem);
+
+ /* disallow key types with the same name */
+ list_for_each_entry(p, &key_types_list, link) {
+ if (strcmp(p->name, ktype->name) == 0)
+ goto out;
+ }
+
+ /* store the type */
+ list_add(&ktype->link, &key_types_list);
+ ret = 0;
+
+ out:
+ up_write(&key_types_sem);
+ return ret;
+
+} /* end register_key_type() */
+
+EXPORT_SYMBOL(register_key_type);
+
+/*****************************************************************************/
+/*
+ * unregister a type of key
+ */
+void unregister_key_type(struct key_type *ktype)
+{
+ struct rb_node *_n;
+ struct key *key;
+
+ down_write(&key_types_sem);
+
+ /* withdraw the key type */
+ list_del_init(&ktype->link);
+
+ /* need to withdraw all keys of this type */
+ spin_lock(&key_serial_lock);
+
+ for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
+ key = rb_entry(_n, struct key, serial_node);
+
+ if (key->type != ktype)
+ continue;
+
+ write_lock(&key->lock);
+ key->type = &key_type_dead;
+ write_unlock(&key->lock);
+
+ /* there shouldn't be anyone looking at the description or
+ * payload now */
+ if (ktype->destroy)
+ ktype->destroy(key);
+ memset(&key->payload, 0xbd, sizeof(key->payload));
+ }
+
+ spin_unlock(&key_serial_lock);
+ up_write(&key_types_sem);
+
+} /* end unregister_key_type() */
+
+EXPORT_SYMBOL(unregister_key_type);
+
+/*****************************************************************************/
+/*
+ * initialise the key management stuff
+ */
+static int __init key_init(void)
+{
+ /* allocate a slab in which we can store keys */
+ key_jar = kmem_cache_create("key_jar", sizeof(struct key),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!key_jar)
+ panic("Cannot create key jar\n");
+
+ /* add the special key types */
+ list_add_tail(&key_type_keyring.link, &key_types_list);
+ list_add_tail(&key_type_dead.link, &key_types_list);
+ list_add_tail(&key_type_user.link, &key_types_list);
+
+ /* record the root user tracking */
+ rb_link_node(&root_key_user.node,
+ NULL,
+ &key_user_tree.rb_node);
+
+ rb_insert_color(&root_key_user.node,
+ &key_user_tree);
+
+ /* record root's user standard keyrings */
+ key_check(&root_user_keyring);
+ key_check(&root_session_keyring);
+
+ __key_insert_serial(&root_user_keyring);
+ __key_insert_serial(&root_session_keyring);
+
+ keyring_publish_name(&root_user_keyring);
+ keyring_publish_name(&root_session_keyring);
+
+ /* link the two root keyrings together */
+ key_link(&root_session_keyring, &root_user_keyring);
+
+ return 0;
+
+} /* end key_init() */
+
+subsys_initcall(key_init);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
new file mode 100644
index 000000000000..2045b24615fd
--- /dev/null
+++ b/security/keys/keyctl.c
@@ -0,0 +1,991 @@
+/* keyctl.c: userspace keyctl operations
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/keyctl.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+/*****************************************************************************/
+/*
+ * extract the description of a new key from userspace and either add it as a
+ * new key to the specified keyring or update a matching key in that keyring
+ * - the keyring must be writable
+ * - returns the new key's serial number
+ * - implements add_key()
+ */
+asmlinkage long sys_add_key(const char __user *_type,
+ const char __user *_description,
+ const void __user *_payload,
+ size_t plen,
+ key_serial_t ringid)
+{
+ struct key *keyring, *key;
+ char type[32], *description;
+ void *payload;
+ long dlen, ret;
+
+ ret = -EINVAL;
+ if (plen > 32767)
+ goto error;
+
+ /* draw all the data into kernel space */
+ ret = strncpy_from_user(type, _type, sizeof(type) - 1);
+ if (ret < 0)
+ goto error;
+ type[31] = '\0';
+
+ ret = -EFAULT;
+ dlen = strnlen_user(_description, PAGE_SIZE - 1);
+ if (dlen <= 0)
+ goto error;
+
+ ret = -EINVAL;
+ if (dlen > PAGE_SIZE - 1)
+ goto error;
+
+ ret = -ENOMEM;
+ description = kmalloc(dlen + 1, GFP_KERNEL);
+ if (!description)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_from_user(description, _description, dlen + 1) != 0)
+ goto error2;
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+
+ if (_payload) {
+ ret = -ENOMEM;
+ payload = kmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error2;
+
+ ret = -EFAULT;
+ if (copy_from_user(payload, _payload, plen) != 0)
+ goto error3;
+ }
+
+ /* find the target keyring (which must be writable) */
+ keyring = lookup_user_key(ringid, 1, 0, KEY_WRITE);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error3;
+ }
+
+ /* create or update the requested key and add it to the target
+ * keyring */
+ key = key_create_or_update(keyring, type, description,
+ payload, plen, 0);
+ if (!IS_ERR(key)) {
+ ret = key->serial;
+ key_put(key);
+ }
+ else {
+ ret = PTR_ERR(key);
+ }
+
+ key_put(keyring);
+ error3:
+ kfree(payload);
+ error2:
+ kfree(description);
+ error:
+ return ret;
+
+} /* end sys_add_key() */
+
+/*****************************************************************************/
+/*
+ * search the process keyrings for a matching key
+ * - nested keyrings may also be searched if they have Search permission
+ * - if a key is found, it will be attached to the destination keyring if
+ * there's one specified
+ * - /sbin/request-key will be invoked if _callout_info is non-NULL
+ * - the _callout_info string will be passed to /sbin/request-key
+ * - if the _callout_info string is empty, it will be rendered as "-"
+ * - implements request_key()
+ */
+asmlinkage long sys_request_key(const char __user *_type,
+ const char __user *_description,
+ const char __user *_callout_info,
+ key_serial_t destringid)
+{
+ struct key_type *ktype;
+ struct key *key, *dest;
+ char type[32], *description, *callout_info;
+ long dlen, ret;
+
+ /* pull the type into kernel space */
+ ret = strncpy_from_user(type, _type, sizeof(type) - 1);
+ if (ret < 0)
+ goto error;
+ type[31] = '\0';
+
+ /* pull the description into kernel space */
+ ret = -EFAULT;
+ dlen = strnlen_user(_description, PAGE_SIZE - 1);
+ if (dlen <= 0)
+ goto error;
+
+ ret = -EINVAL;
+ if (dlen > PAGE_SIZE - 1)
+ goto error;
+
+ ret = -ENOMEM;
+ description = kmalloc(dlen + 1, GFP_KERNEL);
+ if (!description)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_from_user(description, _description, dlen + 1) != 0)
+ goto error2;
+
+ /* pull the callout info into kernel space */
+ callout_info = NULL;
+ if (_callout_info) {
+ ret = -EFAULT;
+ dlen = strnlen_user(_callout_info, PAGE_SIZE - 1);
+ if (dlen <= 0)
+ goto error2;
+
+ ret = -EINVAL;
+ if (dlen > PAGE_SIZE - 1)
+ goto error2;
+
+ ret = -ENOMEM;
+ callout_info = kmalloc(dlen + 1, GFP_KERNEL);
+ if (!callout_info)
+ goto error2;
+
+ ret = -EFAULT;
+ if (copy_from_user(callout_info, _callout_info, dlen + 1) != 0)
+ goto error3;
+ }
+
+ /* get the destination keyring if specified */
+ dest = NULL;
+ if (destringid) {
+ dest = lookup_user_key(destringid, 1, 0, KEY_WRITE);
+ if (IS_ERR(dest)) {
+ ret = PTR_ERR(dest);
+ goto error3;
+ }
+ }
+
+ /* find the key type */
+ ktype = key_type_lookup(type);
+ if (IS_ERR(ktype)) {
+ ret = PTR_ERR(ktype);
+ goto error4;
+ }
+
+ /* do the search */
+ key = request_key(ktype, description, callout_info);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error5;
+ }
+
+ /* link the resulting key to the destination keyring */
+ if (dest) {
+ ret = key_link(dest, key);
+ if (ret < 0)
+ goto error6;
+ }
+
+ ret = key->serial;
+
+ error6:
+ key_put(key);
+ error5:
+ key_type_put(ktype);
+ error4:
+ key_put(dest);
+ error3:
+ kfree(callout_info);
+ error2:
+ kfree(description);
+ error:
+ return ret;
+
+} /* end sys_request_key() */
+
+/*****************************************************************************/
+/*
+ * get the ID of the specified process keyring
+ * - the keyring must have search permission to be found
+ * - implements keyctl(KEYCTL_GET_KEYRING_ID)
+ */
+static long keyctl_get_keyring_ID(key_serial_t id, int create)
+{
+ struct key *key;
+ long ret;
+
+ key = lookup_user_key(id, create, 0, KEY_SEARCH);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error;
+ }
+
+ ret = key->serial;
+ key_put(key);
+ error:
+ return ret;
+
+} /* end keyctl_get_keyring_ID() */
+
+/*****************************************************************************/
+/*
+ * join the session keyring
+ * - implements keyctl(KEYCTL_JOIN_SESSION_KEYRING)
+ */
+static long keyctl_join_session_keyring(const char __user *_name)
+{
+ char *name;
+ long nlen, ret;
+
+ /* fetch the name from userspace */
+ name = NULL;
+ if (_name) {
+ ret = -EFAULT;
+ nlen = strnlen_user(_name, PAGE_SIZE - 1);
+ if (nlen <= 0)
+ goto error;
+
+ ret = -EINVAL;
+ if (nlen > PAGE_SIZE - 1)
+ goto error;
+
+ ret = -ENOMEM;
+ name = kmalloc(nlen + 1, GFP_KERNEL);
+ if (!name)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_from_user(name, _name, nlen + 1) != 0)
+ goto error2;
+ }
+
+ /* join the session */
+ ret = join_session_keyring(name);
+
+ error2:
+ kfree(name);
+ error:
+ return ret;
+
+} /* end keyctl_join_session_keyring() */
+
+/*****************************************************************************/
+/*
+ * update a key's data payload
+ * - the key must be writable
+ * - implements keyctl(KEYCTL_UPDATE)
+ */
+static long keyctl_update_key(key_serial_t id,
+ const void __user *_payload,
+ size_t plen)
+{
+ struct key *key;
+ void *payload;
+ long ret;
+
+ ret = -EINVAL;
+ if (plen > PAGE_SIZE)
+ goto error;
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+ if (_payload) {
+ ret = -ENOMEM;
+ payload = kmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_from_user(payload, _payload, plen) != 0)
+ goto error2;
+ }
+
+ /* find the target key (which must be writable) */
+ key = lookup_user_key(id, 0, 0, KEY_WRITE);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error2;
+ }
+
+ /* update the key */
+ ret = key_update(key, payload, plen);
+
+ key_put(key);
+ error2:
+ kfree(payload);
+ error:
+ return ret;
+
+} /* end keyctl_update_key() */
+
+/*****************************************************************************/
+/*
+ * revoke a key
+ * - the key must be writable
+ * - implements keyctl(KEYCTL_REVOKE)
+ */
+static long keyctl_revoke_key(key_serial_t id)
+{
+ struct key *key;
+ long ret;
+
+ key = lookup_user_key(id, 0, 0, KEY_WRITE);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error;
+ }
+
+ key_revoke(key);
+ ret = 0;
+
+ key_put(key);
+ error:
+ return 0;
+
+} /* end keyctl_revoke_key() */
+
+/*****************************************************************************/
+/*
+ * clear the specified process keyring
+ * - the keyring must be writable
+ * - implements keyctl(KEYCTL_CLEAR)
+ */
+static long keyctl_keyring_clear(key_serial_t ringid)
+{
+ struct key *keyring;
+ long ret;
+
+ keyring = lookup_user_key(ringid, 1, 0, KEY_WRITE);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error;
+ }
+
+ ret = keyring_clear(keyring);
+
+ key_put(keyring);
+ error:
+ return ret;
+
+} /* end keyctl_keyring_clear() */
+
+/*****************************************************************************/
+/*
+ * link a key into a keyring
+ * - the keyring must be writable
+ * - the key must be linkable
+ * - implements keyctl(KEYCTL_LINK)
+ */
+static long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
+{
+ struct key *keyring, *key;
+ long ret;
+
+ keyring = lookup_user_key(ringid, 1, 0, KEY_WRITE);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error;
+ }
+
+ key = lookup_user_key(id, 1, 0, KEY_LINK);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error2;
+ }
+
+ ret = key_link(keyring, key);
+
+ key_put(key);
+ error2:
+ key_put(keyring);
+ error:
+ return ret;
+
+} /* end keyctl_keyring_link() */
+
+/*****************************************************************************/
+/*
+ * unlink the first attachment of a key from a keyring
+ * - the keyring must be writable
+ * - we don't need any permissions on the key
+ * - implements keyctl(KEYCTL_UNLINK)
+ */
+static long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
+{
+ struct key *keyring, *key;
+ long ret;
+
+ keyring = lookup_user_key(ringid, 0, 0, KEY_WRITE);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error;
+ }
+
+ key = lookup_user_key(id, 0, 0, 0);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error2;
+ }
+
+ ret = key_unlink(keyring, key);
+
+ key_put(key);
+ error2:
+ key_put(keyring);
+ error:
+ return ret;
+
+} /* end keyctl_keyring_unlink() */
+
+/*****************************************************************************/
+/*
+ * describe a user key
+ * - the key must have view permission
+ * - if there's a buffer, we place up to buflen bytes of data into it
+ * - unless there's an error, we return the amount of description available,
+ * irrespective of how much we may have copied
+ * - the description is formatted thus:
+ * type;uid;gid;perm;description<NUL>
+ * - implements keyctl(KEYCTL_DESCRIBE)
+ */
+static long keyctl_describe_key(key_serial_t keyid,
+ char __user *buffer,
+ size_t buflen)
+{
+ struct key *key;
+ char *tmpbuf;
+ long ret;
+
+ key = lookup_user_key(keyid, 0, 1, KEY_VIEW);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error;
+ }
+
+ /* calculate how much description we're going to return */
+ ret = -ENOMEM;
+ tmpbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmpbuf)
+ goto error2;
+
+ ret = snprintf(tmpbuf, PAGE_SIZE - 1,
+ "%s;%d;%d;%06x;%s",
+ key->type->name,
+ key->uid,
+ key->gid,
+ key->perm,
+ key->description ? key->description :""
+ );
+
+ /* include a NUL char at the end of the data */
+ if (ret > PAGE_SIZE - 1)
+ ret = PAGE_SIZE - 1;
+ tmpbuf[ret] = 0;
+ ret++;
+
+ /* consider returning the data */
+ if (buffer && buflen > 0) {
+ if (buflen > ret)
+ buflen = ret;
+
+ if (copy_to_user(buffer, tmpbuf, buflen) != 0)
+ ret = -EFAULT;
+ }
+
+ kfree(tmpbuf);
+ error2:
+ key_put(key);
+ error:
+ return ret;
+
+} /* end keyctl_describe_key() */
+
+/*****************************************************************************/
+/*
+ * search the specified keyring for a matching key
+ * - the start keyring must be searchable
+ * - nested keyrings may also be searched if they are searchable
+ * - only keys with search permission may be found
+ * - if a key is found, it will be attached to the destination keyring if
+ * there's one specified
+ * - implements keyctl(KEYCTL_SEARCH)
+ */
+static long keyctl_keyring_search(key_serial_t ringid,
+ const char __user *_type,
+ const char __user *_description,
+ key_serial_t destringid)
+{
+ struct key_type *ktype;
+ struct key *keyring, *key, *dest;
+ char type[32], *description;
+ long dlen, ret;
+
+ /* pull the type and description into kernel space */
+ ret = strncpy_from_user(type, _type, sizeof(type) - 1);
+ if (ret < 0)
+ goto error;
+ type[31] = '\0';
+
+ ret = -EFAULT;
+ dlen = strnlen_user(_description, PAGE_SIZE - 1);
+ if (dlen <= 0)
+ goto error;
+
+ ret = -EINVAL;
+ if (dlen > PAGE_SIZE - 1)
+ goto error;
+
+ ret = -ENOMEM;
+ description = kmalloc(dlen + 1, GFP_KERNEL);
+ if (!description)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_from_user(description, _description, dlen + 1) != 0)
+ goto error2;
+
+ /* get the keyring at which to begin the search */
+ keyring = lookup_user_key(ringid, 0, 0, KEY_SEARCH);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error2;
+ }
+
+ /* get the destination keyring if specified */
+ dest = NULL;
+ if (destringid) {
+ dest = lookup_user_key(destringid, 1, 0, KEY_WRITE);
+ if (IS_ERR(dest)) {
+ ret = PTR_ERR(dest);
+ goto error3;
+ }
+ }
+
+ /* find the key type */
+ ktype = key_type_lookup(type);
+ if (IS_ERR(ktype)) {
+ ret = PTR_ERR(ktype);
+ goto error4;
+ }
+
+ /* do the search */
+ key = keyring_search(keyring, ktype, description);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+
+ /* treat lack or presence of a negative key the same */
+ if (ret == -EAGAIN)
+ ret = -ENOKEY;
+ goto error5;
+ }
+
+ /* link the resulting key to the destination keyring if we can */
+ if (dest) {
+ ret = -EACCES;
+ if (!key_permission(key, KEY_LINK))
+ goto error6;
+
+ ret = key_link(dest, key);
+ if (ret < 0)
+ goto error6;
+ }
+
+ ret = key->serial;
+
+ error6:
+ key_put(key);
+ error5:
+ key_type_put(ktype);
+ error4:
+ key_put(dest);
+ error3:
+ key_put(keyring);
+ error2:
+ kfree(description);
+ error:
+ return ret;
+
+} /* end keyctl_keyring_search() */
+
+/*****************************************************************************/
+/*
+ * see if the key we're looking at is the target key
+ */
+static int keyctl_read_key_same(const struct key *key, const void *target)
+{
+ return key == target;
+
+} /* end keyctl_read_key_same() */
+
+/*****************************************************************************/
+/*
+ * read a user key's payload
+ * - the keyring must be readable or the key must be searchable from the
+ * process's keyrings
+ * - if there's a buffer, we place up to buflen bytes of data into it
+ * - unless there's an error, we return the amount of data in the key,
+ * irrespective of how much we may have copied
+ * - implements keyctl(KEYCTL_READ)
+ */
+static long keyctl_read_key(key_serial_t keyid,
+ char __user *buffer,
+ size_t buflen)
+{
+ struct key *key, *skey;
+ long ret;
+
+ /* find the key first */
+ key = lookup_user_key(keyid, 0, 0, 0);
+ if (!IS_ERR(key)) {
+ /* see if we can read it directly */
+ if (key_permission(key, KEY_READ))
+ goto can_read_key;
+
+ /* can't; see if it's searchable from this process's
+ * keyrings */
+ ret = -ENOKEY;
+ if (key_permission(key, KEY_SEARCH)) {
+ /* okay - we do have search permission on the key
+ * itself, but do we have the key? */
+ skey = search_process_keyrings_aux(key->type, key,
+ keyctl_read_key_same);
+ if (!IS_ERR(skey))
+ goto can_read_key2;
+ }
+
+ goto error2;
+ }
+
+ ret = -ENOKEY;
+ goto error;
+
+ /* the key is probably readable - now try to read it */
+ can_read_key2:
+ key_put(skey);
+ can_read_key:
+ ret = key_validate(key);
+ if (ret == 0) {
+ ret = -EOPNOTSUPP;
+ if (key->type->read) {
+ /* read the data with the semaphore held (since we
+ * might sleep) */
+ down_read(&key->sem);
+ ret = key->type->read(key, buffer, buflen);
+ up_read(&key->sem);
+ }
+ }
+
+ error2:
+ key_put(key);
+ error:
+ return ret;
+
+} /* end keyctl_read_key() */
+
+/*****************************************************************************/
+/*
+ * change the ownership of a key
+ * - the keyring owned by the changer
+ * - if the uid or gid is -1, then that parameter is not changed
+ * - implements keyctl(KEYCTL_CHOWN)
+ */
+static long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
+{
+ struct key *key;
+ long ret;
+
+ ret = 0;
+ if (uid == (uid_t) -1 && gid == (gid_t) -1)
+ goto error;
+
+ key = lookup_user_key(id, 1, 1, 0);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error;
+ }
+
+ /* make the changes with the locks held to prevent chown/chown races */
+ ret = -EACCES;
+ down_write(&key->sem);
+ write_lock(&key->lock);
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ /* only the sysadmin can chown a key to some other UID */
+ if (uid != (uid_t) -1 && key->uid != uid)
+ goto no_access;
+
+ /* only the sysadmin can set the key's GID to a group other
+ * than one of those that the current process subscribes to */
+ if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid))
+ goto no_access;
+ }
+
+ /* change the UID (have to update the quotas) */
+ if (uid != (uid_t) -1 && uid != key->uid) {
+ /* don't support UID changing yet */
+ ret = -EOPNOTSUPP;
+ goto no_access;
+ }
+
+ /* change the GID */
+ if (gid != (gid_t) -1)
+ key->gid = gid;
+
+ ret = 0;
+
+ no_access:
+ write_unlock(&key->lock);
+ up_write(&key->sem);
+ key_put(key);
+ error:
+ return ret;
+
+} /* end keyctl_chown_key() */
+
+/*****************************************************************************/
+/*
+ * change the permission mask on a key
+ * - the keyring owned by the changer
+ * - implements keyctl(KEYCTL_SETPERM)
+ */
+static long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
+{
+ struct key *key;
+ long ret;
+
+ ret = -EINVAL;
+ if (perm & ~(KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
+ goto error;
+
+ key = lookup_user_key(id, 1, 1, 0);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error;
+ }
+
+ /* make the changes with the locks held to prevent chown/chmod
+ * races */
+ ret = -EACCES;
+ down_write(&key->sem);
+ write_lock(&key->lock);
+
+ /* if we're not the sysadmin, we can only chmod a key that we
+ * own */
+ if (!capable(CAP_SYS_ADMIN) && key->uid != current->fsuid)
+ goto no_access;
+
+ /* changing the permissions mask */
+ key->perm = perm;
+ ret = 0;
+
+ no_access:
+ write_unlock(&key->lock);
+ up_write(&key->sem);
+ key_put(key);
+ error:
+ return ret;
+
+} /* end keyctl_setperm_key() */
+
+/*****************************************************************************/
+/*
+ * instantiate the key with the specified payload, and, if one is given, link
+ * the key into the keyring
+ */
+static long keyctl_instantiate_key(key_serial_t id,
+ const void __user *_payload,
+ size_t plen,
+ key_serial_t ringid)
+{
+ struct key *key, *keyring;
+ void *payload;
+ long ret;
+
+ ret = -EINVAL;
+ if (plen > 32767)
+ goto error;
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+
+ if (_payload) {
+ ret = -ENOMEM;
+ payload = kmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_from_user(payload, _payload, plen) != 0)
+ goto error2;
+ }
+
+ /* find the target key (which must be writable) */
+ key = lookup_user_key(id, 0, 1, KEY_WRITE);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error2;
+ }
+
+ /* find the destination keyring if present (which must also be
+ * writable) */
+ keyring = NULL;
+ if (ringid) {
+ keyring = lookup_user_key(ringid, 1, 0, KEY_WRITE);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error3;
+ }
+ }
+
+ /* instantiate the key and link it into a keyring */
+ ret = key_instantiate_and_link(key, payload, plen, keyring);
+
+ key_put(keyring);
+ error3:
+ key_put(key);
+ error2:
+ kfree(payload);
+ error:
+ return ret;
+
+} /* end keyctl_instantiate_key() */
+
+/*****************************************************************************/
+/*
+ * negatively instantiate the key with the given timeout (in seconds), and, if
+ * one is given, link the key into the keyring
+ */
+static long keyctl_negate_key(key_serial_t id,
+ unsigned timeout,
+ key_serial_t ringid)
+{
+ struct key *key, *keyring;
+ long ret;
+
+ /* find the target key (which must be writable) */
+ key = lookup_user_key(id, 0, 1, KEY_WRITE);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error;
+ }
+
+ /* find the destination keyring if present (which must also be
+ * writable) */
+ keyring = NULL;
+ if (ringid) {
+ keyring = lookup_user_key(ringid, 1, 0, KEY_WRITE);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error2;
+ }
+ }
+
+ /* instantiate the key and link it into a keyring */
+ ret = key_negate_and_link(key, timeout, keyring);
+
+ key_put(keyring);
+ error2:
+ key_put(key);
+ error:
+ return ret;
+
+} /* end keyctl_negate_key() */
+
+/*****************************************************************************/
+/*
+ * the key control system call
+ * - currently invoked through prctl()
+ */
+asmlinkage long sys_keyctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5)
+{
+ switch (option) {
+ case KEYCTL_GET_KEYRING_ID:
+ return keyctl_get_keyring_ID((key_serial_t) arg2,
+ (int) arg3);
+
+ case KEYCTL_JOIN_SESSION_KEYRING:
+ return keyctl_join_session_keyring((const char __user *) arg3);
+
+ case KEYCTL_UPDATE:
+ return keyctl_update_key((key_serial_t) arg2,
+ (const void __user *) arg3,
+ (size_t) arg4);
+
+ case KEYCTL_REVOKE:
+ return keyctl_revoke_key((key_serial_t) arg2);
+
+ case KEYCTL_DESCRIBE:
+ return keyctl_describe_key((key_serial_t) arg2,
+ (char __user *) arg3,
+ (unsigned) arg4);
+
+ case KEYCTL_CLEAR:
+ return keyctl_keyring_clear((key_serial_t) arg2);
+
+ case KEYCTL_LINK:
+ return keyctl_keyring_link((key_serial_t) arg2,
+ (key_serial_t) arg3);
+
+ case KEYCTL_UNLINK:
+ return keyctl_keyring_unlink((key_serial_t) arg2,
+ (key_serial_t) arg3);
+
+ case KEYCTL_SEARCH:
+ return keyctl_keyring_search((key_serial_t) arg2,
+ (const char __user *) arg3,
+ (const char __user *) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_READ:
+ return keyctl_read_key((key_serial_t) arg2,
+ (char __user *) arg3,
+ (size_t) arg4);
+
+ case KEYCTL_CHOWN:
+ return keyctl_chown_key((key_serial_t) arg2,
+ (uid_t) arg3,
+ (gid_t) arg4);
+
+ case KEYCTL_SETPERM:
+ return keyctl_setperm_key((key_serial_t) arg2,
+ (key_perm_t) arg3);
+
+ case KEYCTL_INSTANTIATE:
+ return keyctl_instantiate_key((key_serial_t) arg2,
+ (const void __user *) arg3,
+ (size_t) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_NEGATE:
+ return keyctl_negate_key((key_serial_t) arg2,
+ (unsigned) arg3,
+ (key_serial_t) arg4);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+} /* end sys_keyctl() */
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
new file mode 100644
index 000000000000..98d4b0b817f7
--- /dev/null
+++ b/security/keys/keyring.c
@@ -0,0 +1,895 @@
+/* keyring.c: keyring handling
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+/*
+ * when plumbing the depths of the key tree, this sets a hard limit set on how
+ * deep we're willing to go
+ */
+#define KEYRING_SEARCH_MAX_DEPTH 6
+
+/*
+ * we keep all named keyrings in a hash to speed looking them up
+ */
+#define KEYRING_NAME_HASH_SIZE (1 << 5)
+
+static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
+static rwlock_t keyring_name_lock = RW_LOCK_UNLOCKED;
+
+static inline unsigned keyring_hash(const char *desc)
+{
+ unsigned bucket = 0;
+
+ for (; *desc; desc++)
+ bucket += (unsigned char) *desc;
+
+ return bucket & (KEYRING_NAME_HASH_SIZE - 1);
+}
+
+/*
+ * the keyring type definition
+ */
+static int keyring_instantiate(struct key *keyring,
+ const void *data, size_t datalen);
+static int keyring_duplicate(struct key *keyring, const struct key *source);
+static int keyring_match(const struct key *keyring, const void *criterion);
+static void keyring_destroy(struct key *keyring);
+static void keyring_describe(const struct key *keyring, struct seq_file *m);
+static long keyring_read(const struct key *keyring,
+ char __user *buffer, size_t buflen);
+
+struct key_type key_type_keyring = {
+ .name = "keyring",
+ .def_datalen = sizeof(struct keyring_list),
+ .instantiate = keyring_instantiate,
+ .duplicate = keyring_duplicate,
+ .match = keyring_match,
+ .destroy = keyring_destroy,
+ .describe = keyring_describe,
+ .read = keyring_read,
+};
+
+/*
+ * semaphore to serialise link/link calls to prevent two link calls in parallel
+ * introducing a cycle
+ */
+DECLARE_RWSEM(keyring_serialise_link_sem);
+
+/*****************************************************************************/
+/*
+ * publish the name of a keyring so that it can be found by name (if it has
+ * one)
+ */
+void keyring_publish_name(struct key *keyring)
+{
+ int bucket;
+
+ if (keyring->description) {
+ bucket = keyring_hash(keyring->description);
+
+ write_lock(&keyring_name_lock);
+
+ if (!keyring_name_hash[bucket].next)
+ INIT_LIST_HEAD(&keyring_name_hash[bucket]);
+
+ list_add_tail(&keyring->type_data.link,
+ &keyring_name_hash[bucket]);
+
+ write_unlock(&keyring_name_lock);
+ }
+
+} /* end keyring_publish_name() */
+
+/*****************************************************************************/
+/*
+ * initialise a keyring
+ * - we object if we were given any data
+ */
+static int keyring_instantiate(struct key *keyring,
+ const void *data, size_t datalen)
+{
+ int ret;
+
+ ret = -EINVAL;
+ if (datalen == 0) {
+ /* make the keyring available by name if it has one */
+ keyring_publish_name(keyring);
+ ret = 0;
+ }
+
+ return ret;
+
+} /* end keyring_instantiate() */
+
+/*****************************************************************************/
+/*
+ * duplicate the list of subscribed keys from a source keyring into this one
+ */
+static int keyring_duplicate(struct key *keyring, const struct key *source)
+{
+ struct keyring_list *sklist, *klist;
+ unsigned max;
+ size_t size;
+ int loop, ret;
+
+ const unsigned limit =
+ (PAGE_SIZE - sizeof(*klist)) / sizeof(struct key);
+
+ ret = 0;
+ sklist = source->payload.subscriptions;
+
+ if (sklist && sklist->nkeys > 0) {
+ max = sklist->nkeys;
+ BUG_ON(max > limit);
+
+ max = (max + 3) & ~3;
+ if (max > limit)
+ max = limit;
+
+ ret = -ENOMEM;
+ size = sizeof(*klist) + sizeof(struct key) * max;
+ klist = kmalloc(size, GFP_KERNEL);
+ if (!klist)
+ goto error;
+
+ klist->maxkeys = max;
+ klist->nkeys = sklist->nkeys;
+ memcpy(klist->keys,
+ sklist->keys,
+ sklist->nkeys * sizeof(struct key));
+
+ for (loop = klist->nkeys - 1; loop >= 0; loop--)
+ atomic_inc(&klist->keys[loop]->usage);
+
+ keyring->payload.subscriptions = klist;
+ ret = 0;
+ }
+
+ error:
+ return ret;
+
+} /* end keyring_duplicate() */
+
+/*****************************************************************************/
+/*
+ * match keyrings on their name
+ */
+static int keyring_match(const struct key *keyring, const void *description)
+{
+ return keyring->description &&
+ strcmp(keyring->description, description) == 0;
+
+} /* end keyring_match() */
+
+/*****************************************************************************/
+/*
+ * dispose of the data dangling from the corpse of a keyring
+ */
+static void keyring_destroy(struct key *keyring)
+{
+ struct keyring_list *klist;
+ int loop;
+
+ if (keyring->description) {
+ write_lock(&keyring_name_lock);
+ list_del(&keyring->type_data.link);
+ write_unlock(&keyring_name_lock);
+ }
+
+ klist = keyring->payload.subscriptions;
+ if (klist) {
+ for (loop = klist->nkeys - 1; loop >= 0; loop--)
+ key_put(klist->keys[loop]);
+ kfree(klist);
+ }
+
+} /* end keyring_destroy() */
+
+/*****************************************************************************/
+/*
+ * describe the keyring
+ */
+static void keyring_describe(const struct key *keyring, struct seq_file *m)
+{
+ struct keyring_list *klist;
+
+ if (keyring->description) {
+ seq_puts(m, keyring->description);
+ }
+ else {
+ seq_puts(m, "[anon]");
+ }
+
+ klist = keyring->payload.subscriptions;
+ if (klist)
+ seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys);
+ else
+ seq_puts(m, ": empty");
+
+} /* end keyring_describe() */
+
+/*****************************************************************************/
+/*
+ * read a list of key IDs from the keyring's contents
+ */
+static long keyring_read(const struct key *keyring,
+ char __user *buffer, size_t buflen)
+{
+ struct keyring_list *klist;
+ struct key *key;
+ size_t qty, tmp;
+ int loop, ret;
+
+ ret = 0;
+ klist = keyring->payload.subscriptions;
+
+ if (klist) {
+ /* calculate how much data we could return */
+ qty = klist->nkeys * sizeof(key_serial_t);
+
+ if (buffer && buflen > 0) {
+ if (buflen > qty)
+ buflen = qty;
+
+ /* copy the IDs of the subscribed keys into the
+ * buffer */
+ ret = -EFAULT;
+
+ for (loop = 0; loop < klist->nkeys; loop++) {
+ key = klist->keys[loop];
+
+ tmp = sizeof(key_serial_t);
+ if (tmp > buflen)
+ tmp = buflen;
+
+ if (copy_to_user(buffer,
+ &key->serial,
+ tmp) != 0)
+ goto error;
+
+ buflen -= tmp;
+ if (buflen == 0)
+ break;
+ buffer += tmp;
+ }
+ }
+
+ ret = qty;
+ }
+
+ error:
+ return ret;
+
+} /* end keyring_read() */
+
+/*****************************************************************************/
+/*
+ * allocate a keyring and link into the destination keyring
+ */
+struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+ int not_in_quota, struct key *dest)
+{
+ struct key *keyring;
+ int ret;
+
+ keyring = key_alloc(&key_type_keyring, description,
+ uid, gid, KEY_USR_ALL, not_in_quota);
+
+ if (!IS_ERR(keyring)) {
+ ret = key_instantiate_and_link(keyring, NULL, 0, dest);
+ if (ret < 0) {
+ key_put(keyring);
+ keyring = ERR_PTR(ret);
+ }
+ }
+
+ return keyring;
+
+} /* end keyring_alloc() */
+
+/*****************************************************************************/
+/*
+ * search the supplied keyring tree for a key that matches the criterion
+ * - perform a breadth-then-depth search up to the prescribed limit
+ * - we only find keys on which we have search permission
+ * - we use the supplied match function to see if the description (or other
+ * feature of interest) matches
+ * - we readlock the keyrings as we search down the tree
+ * - we return -EAGAIN if we didn't find any matching key
+ * - we return -ENOKEY if we only found negative matching keys
+ */
+struct key *keyring_search_aux(struct key *keyring,
+ struct key_type *type,
+ const void *description,
+ key_match_func_t match)
+{
+ struct {
+ struct key *keyring;
+ int kix;
+ } stack[KEYRING_SEARCH_MAX_DEPTH];
+
+ struct keyring_list *keylist;
+ struct timespec now;
+ struct key *key;
+ long err;
+ int sp, psp, kix;
+
+ key_check(keyring);
+
+ /* top keyring must have search permission to begin the search */
+ key = ERR_PTR(-EACCES);
+ if (!key_permission(keyring, KEY_SEARCH))
+ goto error;
+
+ key = ERR_PTR(-ENOTDIR);
+ if (keyring->type != &key_type_keyring)
+ goto error;
+
+ now = current_kernel_time();
+ err = -EAGAIN;
+ sp = 0;
+
+ /* start processing a new keyring */
+ descend:
+ read_lock(&keyring->lock);
+ if (keyring->flags & KEY_FLAG_REVOKED)
+ goto not_this_keyring;
+
+ keylist = keyring->payload.subscriptions;
+ if (!keylist)
+ goto not_this_keyring;
+
+ /* iterate through the keys in this keyring first */
+ for (kix = 0; kix < keylist->nkeys; kix++) {
+ key = keylist->keys[kix];
+
+ /* ignore keys not of this type */
+ if (key->type != type)
+ continue;
+
+ /* skip revoked keys and expired keys */
+ if (key->flags & KEY_FLAG_REVOKED)
+ continue;
+
+ if (key->expiry && now.tv_sec >= key->expiry)
+ continue;
+
+ /* keys that don't match */
+ if (!match(key, description))
+ continue;
+
+ /* key must have search permissions */
+ if (!key_permission(key, KEY_SEARCH))
+ continue;
+
+ /* we set a different error code if we find a negative key */
+ if (key->flags & KEY_FLAG_NEGATIVE) {
+ err = -ENOKEY;
+ continue;
+ }
+
+ goto found;
+ }
+
+ /* search through the keyrings nested in this one */
+ kix = 0;
+ ascend:
+ while (kix < keylist->nkeys) {
+ key = keylist->keys[kix];
+ if (key->type != &key_type_keyring)
+ goto next;
+
+ /* recursively search nested keyrings
+ * - only search keyrings for which we have search permission
+ */
+ if (sp >= KEYRING_SEARCH_MAX_DEPTH)
+ goto next;
+
+ if (!key_permission(key, KEY_SEARCH))
+ goto next;
+
+ /* evade loops in the keyring tree */
+ for (psp = 0; psp < sp; psp++)
+ if (stack[psp].keyring == keyring)
+ goto next;
+
+ /* stack the current position */
+ stack[sp].keyring = keyring;
+ stack[sp].kix = kix;
+ sp++;
+
+ /* begin again with the new keyring */
+ keyring = key;
+ goto descend;
+
+ next:
+ kix++;
+ }
+
+ /* the keyring we're looking at was disqualified or didn't contain a
+ * matching key */
+ not_this_keyring:
+ read_unlock(&keyring->lock);
+
+ if (sp > 0) {
+ /* resume the processing of a keyring higher up in the tree */
+ sp--;
+ keyring = stack[sp].keyring;
+ keylist = keyring->payload.subscriptions;
+ kix = stack[sp].kix + 1;
+ goto ascend;
+ }
+
+ key = ERR_PTR(err);
+ goto error;
+
+ /* we found a viable match */
+ found:
+ atomic_inc(&key->usage);
+ read_unlock(&keyring->lock);
+
+ /* unwind the keyring stack */
+ while (sp > 0) {
+ sp--;
+ read_unlock(&stack[sp].keyring->lock);
+ }
+
+ key_check(key);
+ error:
+ return key;
+
+} /* end keyring_search_aux() */
+
+/*****************************************************************************/
+/*
+ * search the supplied keyring tree for a key that matches the criterion
+ * - perform a breadth-then-depth search up to the prescribed limit
+ * - we only find keys on which we have search permission
+ * - we readlock the keyrings as we search down the tree
+ * - we return -EAGAIN if we didn't find any matching key
+ * - we return -ENOKEY if we only found negative matching keys
+ */
+struct key *keyring_search(struct key *keyring,
+ struct key_type *type,
+ const char *description)
+{
+ return keyring_search_aux(keyring, type, description, type->match);
+
+} /* end keyring_search() */
+
+EXPORT_SYMBOL(keyring_search);
+
+/*****************************************************************************/
+/*
+ * search the given keyring only (no recursion)
+ * - keyring must be locked by caller
+ */
+struct key *__keyring_search_one(struct key *keyring,
+ const struct key_type *ktype,
+ const char *description,
+ key_perm_t perm)
+{
+ struct keyring_list *klist;
+ struct key *key;
+ int loop;
+
+ klist = keyring->payload.subscriptions;
+ if (klist) {
+ for (loop = 0; loop < klist->nkeys; loop++) {
+ key = klist->keys[loop];
+
+ if (key->type == ktype &&
+ key->type->match(key, description) &&
+ key_permission(key, perm) &&
+ !(key->flags & KEY_FLAG_REVOKED)
+ )
+ goto found;
+ }
+ }
+
+ key = ERR_PTR(-ENOKEY);
+ goto error;
+
+ found:
+ atomic_inc(&key->usage);
+ error:
+ return key;
+
+} /* end __keyring_search_one() */
+
+/*****************************************************************************/
+/*
+ * find a keyring with the specified name
+ * - all named keyrings are searched
+ * - only find keyrings with search permission for the process
+ * - only find keyrings with a serial number greater than the one specified
+ */
+struct key *find_keyring_by_name(const char *name, key_serial_t bound)
+{
+ struct key *keyring;
+ int bucket;
+
+ keyring = ERR_PTR(-EINVAL);
+ if (!name)
+ goto error;
+
+ bucket = keyring_hash(name);
+
+ read_lock(&keyring_name_lock);
+
+ if (keyring_name_hash[bucket].next) {
+ /* search this hash bucket for a keyring with a matching name
+ * that's readable and that hasn't been revoked */
+ list_for_each_entry(keyring,
+ &keyring_name_hash[bucket],
+ type_data.link
+ ) {
+ if (keyring->flags & KEY_FLAG_REVOKED)
+ continue;
+
+ if (strcmp(keyring->description, name) != 0)
+ continue;
+
+ if (!key_permission(keyring, KEY_SEARCH))
+ continue;
+
+ /* found a potential candidate, but we still need to
+ * check the serial number */
+ if (keyring->serial <= bound)
+ continue;
+
+ /* we've got a match */
+ atomic_inc(&keyring->usage);
+ read_unlock(&keyring_name_lock);
+ goto error;
+ }
+ }
+
+ read_unlock(&keyring_name_lock);
+ keyring = ERR_PTR(-ENOKEY);
+
+ error:
+ return keyring;
+
+} /* end find_keyring_by_name() */
+
+/*****************************************************************************/
+/*
+ * see if a cycle will will be created by inserting acyclic tree B in acyclic
+ * tree A at the topmost level (ie: as a direct child of A)
+ * - since we are adding B to A at the top level, checking for cycles should
+ * just be a matter of seeing if node A is somewhere in tree B
+ */
+static int keyring_detect_cycle(struct key *A, struct key *B)
+{
+ struct {
+ struct key *subtree;
+ int kix;
+ } stack[KEYRING_SEARCH_MAX_DEPTH];
+
+ struct keyring_list *keylist;
+ struct key *subtree, *key;
+ int sp, kix, ret;
+
+ ret = -EDEADLK;
+ if (A == B)
+ goto error;
+
+ subtree = B;
+ sp = 0;
+
+ /* start processing a new keyring */
+ descend:
+ read_lock(&subtree->lock);
+ if (subtree->flags & KEY_FLAG_REVOKED)
+ goto not_this_keyring;
+
+ keylist = subtree->payload.subscriptions;
+ if (!keylist)
+ goto not_this_keyring;
+ kix = 0;
+
+ ascend:
+ /* iterate through the remaining keys in this keyring */
+ for (; kix < keylist->nkeys; kix++) {
+ key = keylist->keys[kix];
+
+ if (key == A)
+ goto cycle_detected;
+
+ /* recursively check nested keyrings */
+ if (key->type == &key_type_keyring) {
+ if (sp >= KEYRING_SEARCH_MAX_DEPTH)
+ goto too_deep;
+
+ /* stack the current position */
+ stack[sp].subtree = subtree;
+ stack[sp].kix = kix;
+ sp++;
+
+ /* begin again with the new keyring */
+ subtree = key;
+ goto descend;
+ }
+ }
+
+ /* the keyring we're looking at was disqualified or didn't contain a
+ * matching key */
+ not_this_keyring:
+ read_unlock(&subtree->lock);
+
+ if (sp > 0) {
+ /* resume the checking of a keyring higher up in the tree */
+ sp--;
+ subtree = stack[sp].subtree;
+ keylist = subtree->payload.subscriptions;
+ kix = stack[sp].kix + 1;
+ goto ascend;
+ }
+
+ ret = 0; /* no cycles detected */
+
+ error:
+ return ret;
+
+ too_deep:
+ ret = -ELOOP;
+ goto error_unwind;
+ cycle_detected:
+ ret = -EDEADLK;
+ error_unwind:
+ read_unlock(&subtree->lock);
+
+ /* unwind the keyring stack */
+ while (sp > 0) {
+ sp--;
+ read_unlock(&stack[sp].subtree->lock);
+ }
+
+ goto error;
+
+} /* end keyring_detect_cycle() */
+
+/*****************************************************************************/
+/*
+ * link a key into to a keyring
+ * - must be called with the keyring's semaphore held
+ */
+int __key_link(struct key *keyring, struct key *key)
+{
+ struct keyring_list *klist, *nklist;
+ unsigned max;
+ size_t size;
+ int ret;
+
+ ret = -EKEYREVOKED;
+ if (keyring->flags & KEY_FLAG_REVOKED)
+ goto error;
+
+ ret = -ENOTDIR;
+ if (keyring->type != &key_type_keyring)
+ goto error;
+
+ /* serialise link/link calls to prevent parallel calls causing a
+ * cycle when applied to two keyring in opposite orders */
+ down_write(&keyring_serialise_link_sem);
+
+ /* check that we aren't going to create a cycle adding one keyring to
+ * another */
+ if (key->type == &key_type_keyring) {
+ ret = keyring_detect_cycle(keyring, key);
+ if (ret < 0)
+ goto error2;
+ }
+
+ /* check that we aren't going to overrun the user's quota */
+ ret = key_payload_reserve(keyring,
+ keyring->datalen + KEYQUOTA_LINK_BYTES);
+ if (ret < 0)
+ goto error2;
+
+ klist = keyring->payload.subscriptions;
+
+ if (klist && klist->nkeys < klist->maxkeys) {
+ /* there's sufficient slack space to add directly */
+ atomic_inc(&key->usage);
+
+ write_lock(&keyring->lock);
+ klist->keys[klist->nkeys++] = key;
+ write_unlock(&keyring->lock);
+
+ ret = 0;
+ }
+ else {
+ /* grow the key list */
+ max = 4;
+ if (klist)
+ max += klist->maxkeys;
+
+ ret = -ENFILE;
+ size = sizeof(*klist) + sizeof(*key) * max;
+ if (size > PAGE_SIZE)
+ goto error3;
+
+ ret = -ENOMEM;
+ nklist = kmalloc(size, GFP_KERNEL);
+ if (!nklist)
+ goto error3;
+ nklist->maxkeys = max;
+ nklist->nkeys = 0;
+
+ if (klist) {
+ nklist->nkeys = klist->nkeys;
+ memcpy(nklist->keys,
+ klist->keys,
+ sizeof(struct key *) * klist->nkeys);
+ }
+
+ /* add the key into the new space */
+ atomic_inc(&key->usage);
+
+ write_lock(&keyring->lock);
+ keyring->payload.subscriptions = nklist;
+ nklist->keys[nklist->nkeys++] = key;
+ write_unlock(&keyring->lock);
+
+ /* dispose of the old keyring list */
+ kfree(klist);
+
+ ret = 0;
+ }
+
+ error2:
+ up_write(&keyring_serialise_link_sem);
+ error:
+ return ret;
+
+ error3:
+ /* undo the quota changes */
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+ goto error2;
+
+} /* end __key_link() */
+
+/*****************************************************************************/
+/*
+ * link a key to a keyring
+ */
+int key_link(struct key *keyring, struct key *key)
+{
+ int ret;
+
+ key_check(keyring);
+ key_check(key);
+
+ down_write(&keyring->sem);
+ ret = __key_link(keyring, key);
+ up_write(&keyring->sem);
+
+ return ret;
+
+} /* end key_link() */
+
+EXPORT_SYMBOL(key_link);
+
+/*****************************************************************************/
+/*
+ * unlink the first link to a key from a keyring
+ */
+int key_unlink(struct key *keyring, struct key *key)
+{
+ struct keyring_list *klist;
+ int loop, ret;
+
+ key_check(keyring);
+ key_check(key);
+
+ ret = -ENOTDIR;
+ if (keyring->type != &key_type_keyring)
+ goto error;
+
+ down_write(&keyring->sem);
+
+ klist = keyring->payload.subscriptions;
+ if (klist) {
+ /* search the keyring for the key */
+ for (loop = 0; loop < klist->nkeys; loop++)
+ if (klist->keys[loop] == key)
+ goto key_is_present;
+ }
+
+ up_write(&keyring->sem);
+ ret = -ENOENT;
+ goto error;
+
+ key_is_present:
+ /* adjust the user's quota */
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+
+ /* shuffle down the key pointers
+ * - it might be worth shrinking the allocated memory, but that runs
+ * the risk of ENOMEM as we would have to copy
+ */
+ write_lock(&keyring->lock);
+
+ klist->nkeys--;
+ if (loop < klist->nkeys)
+ memcpy(&klist->keys[loop],
+ &klist->keys[loop + 1],
+ (klist->nkeys - loop) * sizeof(struct key *));
+
+ write_unlock(&keyring->lock);
+
+ up_write(&keyring->sem);
+ key_put(key);
+ ret = 0;
+
+ error:
+ return ret;
+
+} /* end key_unlink() */
+
+EXPORT_SYMBOL(key_unlink);
+
+/*****************************************************************************/
+/*
+ * clear the specified process keyring
+ * - implements keyctl(KEYCTL_CLEAR)
+ */
+int keyring_clear(struct key *keyring)
+{
+ struct keyring_list *klist;
+ int loop, ret;
+
+ ret = -ENOTDIR;
+ if (keyring->type == &key_type_keyring) {
+ /* detach the pointer block with the locks held */
+ down_write(&keyring->sem);
+
+ klist = keyring->payload.subscriptions;
+ if (klist) {
+ /* adjust the quota */
+ key_payload_reserve(keyring,
+ sizeof(struct keyring_list));
+
+ write_lock(&keyring->lock);
+ keyring->payload.subscriptions = NULL;
+ write_unlock(&keyring->lock);
+ }
+
+ up_write(&keyring->sem);
+
+ /* free the keys after the locks have been dropped */
+ if (klist) {
+ for (loop = klist->nkeys - 1; loop >= 0; loop--)
+ key_put(klist->keys[loop]);
+
+ kfree(klist);
+ }
+
+ ret = 0;
+ }
+
+ return ret;
+
+} /* end keyring_clear() */
+
+EXPORT_SYMBOL(keyring_clear);
diff --git a/security/keys/proc.c b/security/keys/proc.c
new file mode 100644
index 000000000000..91343b85c39c
--- /dev/null
+++ b/security/keys/proc.c
@@ -0,0 +1,251 @@
+/* proc.c: proc files for key database enumeration
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/errno.h>
+#include "internal.h"
+
+#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
+static int proc_keys_open(struct inode *inode, struct file *file);
+static void *proc_keys_start(struct seq_file *p, loff_t *_pos);
+static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos);
+static void proc_keys_stop(struct seq_file *p, void *v);
+static int proc_keys_show(struct seq_file *m, void *v);
+
+static struct seq_operations proc_keys_ops = {
+ .start = proc_keys_start,
+ .next = proc_keys_next,
+ .stop = proc_keys_stop,
+ .show = proc_keys_show,
+};
+
+static struct file_operations proc_keys_fops = {
+ .open = proc_keys_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
+
+static int proc_key_users_open(struct inode *inode, struct file *file);
+static void *proc_key_users_start(struct seq_file *p, loff_t *_pos);
+static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos);
+static void proc_key_users_stop(struct seq_file *p, void *v);
+static int proc_key_users_show(struct seq_file *m, void *v);
+
+static struct seq_operations proc_key_users_ops = {
+ .start = proc_key_users_start,
+ .next = proc_key_users_next,
+ .stop = proc_key_users_stop,
+ .show = proc_key_users_show,
+};
+
+static struct file_operations proc_key_users_fops = {
+ .open = proc_key_users_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*****************************************************************************/
+/*
+ * declare the /proc files
+ */
+static int __init key_proc_init(void)
+{
+ struct proc_dir_entry *p;
+
+#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
+ p = create_proc_entry("keys", 0, NULL);
+ if (!p)
+ panic("Cannot create /proc/keys\n");
+
+ p->proc_fops = &proc_keys_fops;
+#endif
+
+ p = create_proc_entry("key-users", 0, NULL);
+ if (!p)
+ panic("Cannot create /proc/key-users\n");
+
+ p->proc_fops = &proc_key_users_fops;
+
+ return 0;
+
+} /* end key_proc_init() */
+
+__initcall(key_proc_init);
+
+/*****************************************************************************/
+/*
+ * implement "/proc/keys" to provides a list of the keys on the system
+ */
+#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
+
+static int proc_keys_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &proc_keys_ops);
+
+}
+
+static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
+{
+ struct rb_node *_p;
+ loff_t pos = *_pos;
+
+ spin_lock(&key_serial_lock);
+
+ _p = rb_first(&key_serial_tree);
+ while (pos > 0 && _p) {
+ pos--;
+ _p = rb_next(_p);
+ }
+
+ return _p;
+
+}
+
+static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ (*_pos)++;
+ return rb_next((struct rb_node *) v);
+
+}
+
+static void proc_keys_stop(struct seq_file *p, void *v)
+{
+ spin_unlock(&key_serial_lock);
+}
+
+static int proc_keys_show(struct seq_file *m, void *v)
+{
+ struct rb_node *_p = v;
+ struct key *key = rb_entry(_p, struct key, serial_node);
+ struct timespec now;
+ unsigned long timo;
+ char xbuf[12];
+
+ now = current_kernel_time();
+
+ read_lock(&key->lock);
+
+ /* come up with a suitable timeout value */
+ if (key->expiry == 0) {
+ memcpy(xbuf, "perm", 5);
+ }
+ else if (now.tv_sec >= key->expiry) {
+ memcpy(xbuf, "expd", 5);
+ }
+ else {
+ timo = key->expiry - now.tv_sec;
+
+ if (timo < 60)
+ sprintf(xbuf, "%lus", timo);
+ else if (timo < 60*60)
+ sprintf(xbuf, "%lum", timo / 60);
+ else if (timo < 60*60*24)
+ sprintf(xbuf, "%luh", timo / (60*60));
+ else if (timo < 60*60*24*7)
+ sprintf(xbuf, "%lud", timo / (60*60*24));
+ else
+ sprintf(xbuf, "%luw", timo / (60*60*24*7));
+ }
+
+ seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %06x %5d %5d %-9.9s ",
+ key->serial,
+ key->flags & KEY_FLAG_INSTANTIATED ? 'I' : '-',
+ key->flags & KEY_FLAG_REVOKED ? 'R' : '-',
+ key->flags & KEY_FLAG_DEAD ? 'D' : '-',
+ key->flags & KEY_FLAG_IN_QUOTA ? 'Q' : '-',
+ key->flags & KEY_FLAG_USER_CONSTRUCT ? 'U' : '-',
+ key->flags & KEY_FLAG_NEGATIVE ? 'N' : '-',
+ atomic_read(&key->usage),
+ xbuf,
+ key->perm,
+ key->uid,
+ key->gid,
+ key->type->name);
+
+ if (key->type->describe)
+ key->type->describe(key, m);
+ seq_putc(m, '\n');
+
+ read_unlock(&key->lock);
+
+ return 0;
+
+}
+
+#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
+
+/*****************************************************************************/
+/*
+ * implement "/proc/key-users" to provides a list of the key users
+ */
+static int proc_key_users_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &proc_key_users_ops);
+
+}
+
+static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
+{
+ struct rb_node *_p;
+ loff_t pos = *_pos;
+
+ spin_lock(&key_user_lock);
+
+ _p = rb_first(&key_user_tree);
+ while (pos > 0 && _p) {
+ pos--;
+ _p = rb_next(_p);
+ }
+
+ return _p;
+
+}
+
+static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ (*_pos)++;
+ return rb_next((struct rb_node *) v);
+
+}
+
+static void proc_key_users_stop(struct seq_file *p, void *v)
+{
+ spin_unlock(&key_user_lock);
+}
+
+static int proc_key_users_show(struct seq_file *m, void *v)
+{
+ struct rb_node *_p = v;
+ struct key_user *user = rb_entry(_p, struct key_user, node);
+
+ seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
+ user->uid,
+ atomic_read(&user->usage),
+ atomic_read(&user->nkeys),
+ atomic_read(&user->nikeys),
+ user->qnkeys,
+ KEYQUOTA_MAX_KEYS,
+ user->qnbytes,
+ KEYQUOTA_MAX_BYTES
+ );
+
+ return 0;
+
+}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
new file mode 100644
index 000000000000..e5bd7b940550
--- /dev/null
+++ b/security/keys/process_keys.c
@@ -0,0 +1,640 @@
+/* process_keys.c: management of a process's keyrings
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/keyctl.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+/* session keyring create vs join semaphore */
+static DECLARE_MUTEX(key_session_sem);
+
+/* the root user's tracking struct */
+struct key_user root_key_user = {
+ .usage = ATOMIC_INIT(3),
+ .consq = LIST_HEAD_INIT(root_key_user.consq),
+ .lock = SPIN_LOCK_UNLOCKED,
+ .nkeys = ATOMIC_INIT(2),
+ .nikeys = ATOMIC_INIT(2),
+ .uid = 0,
+};
+
+/* the root user's UID keyring */
+struct key root_user_keyring = {
+ .usage = ATOMIC_INIT(1),
+ .serial = 2,
+ .type = &key_type_keyring,
+ .user = &root_key_user,
+ .lock = RW_LOCK_UNLOCKED,
+ .sem = __RWSEM_INITIALIZER(root_user_keyring.sem),
+ .perm = KEY_USR_ALL,
+ .flags = KEY_FLAG_INSTANTIATED,
+ .description = "_uid.0",
+#ifdef KEY_DEBUGGING
+ .magic = KEY_DEBUG_MAGIC,
+#endif
+};
+
+/* the root user's default session keyring */
+struct key root_session_keyring = {
+ .usage = ATOMIC_INIT(1),
+ .serial = 1,
+ .type = &key_type_keyring,
+ .user = &root_key_user,
+ .lock = RW_LOCK_UNLOCKED,
+ .sem = __RWSEM_INITIALIZER(root_session_keyring.sem),
+ .perm = KEY_USR_ALL,
+ .flags = KEY_FLAG_INSTANTIATED,
+ .description = "_uid_ses.0",
+#ifdef KEY_DEBUGGING
+ .magic = KEY_DEBUG_MAGIC,
+#endif
+};
+
+/*****************************************************************************/
+/*
+ * allocate the keyrings to be associated with a UID
+ */
+int alloc_uid_keyring(struct user_struct *user)
+{
+ struct key *uid_keyring, *session_keyring;
+ char buf[20];
+ int ret;
+
+ /* concoct a default session keyring */
+ sprintf(buf, "_uid_ses.%u", user->uid);
+
+ session_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, 0, NULL);
+ if (IS_ERR(session_keyring)) {
+ ret = PTR_ERR(session_keyring);
+ goto error;
+ }
+
+ /* and a UID specific keyring, pointed to by the default session
+ * keyring */
+ sprintf(buf, "_uid.%u", user->uid);
+
+ uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, 0,
+ session_keyring);
+ if (IS_ERR(uid_keyring)) {
+ key_put(session_keyring);
+ ret = PTR_ERR(uid_keyring);
+ goto error;
+ }
+
+ /* install the keyrings */
+ user->uid_keyring = uid_keyring;
+ user->session_keyring = session_keyring;
+ ret = 0;
+
+ error:
+ return ret;
+
+} /* end alloc_uid_keyring() */
+
+/*****************************************************************************/
+/*
+ * deal with the UID changing
+ */
+void switch_uid_keyring(struct user_struct *new_user)
+{
+#if 0 /* do nothing for now */
+ struct key *old;
+
+ /* switch to the new user's session keyring if we were running under
+ * root's default session keyring */
+ if (new_user->uid != 0 &&
+ current->session_keyring == &root_session_keyring
+ ) {
+ atomic_inc(&new_user->session_keyring->usage);
+
+ task_lock(current);
+ old = current->session_keyring;
+ current->session_keyring = new_user->session_keyring;
+ task_unlock(current);
+
+ key_put(old);
+ }
+#endif
+
+} /* end switch_uid_keyring() */
+
+/*****************************************************************************/
+/*
+ * install a fresh thread keyring, discarding the old one
+ */
+int install_thread_keyring(struct task_struct *tsk)
+{
+ struct key *keyring, *old;
+ char buf[20];
+ int ret;
+
+ sprintf(buf, "_tid.%u", tsk->pid);
+
+ keyring = keyring_alloc(buf, tsk->uid, tsk->gid, 1, NULL);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error;
+ }
+
+ task_lock(tsk);
+ old = tsk->thread_keyring;
+ tsk->thread_keyring = keyring;
+ task_unlock(tsk);
+
+ ret = 0;
+
+ key_put(old);
+ error:
+ return ret;
+
+} /* end install_thread_keyring() */
+
+/*****************************************************************************/
+/*
+ * install a fresh process keyring, discarding the old one
+ */
+static int install_process_keyring(struct task_struct *tsk)
+{
+ struct key *keyring, *old;
+ char buf[20];
+ int ret;
+
+ sprintf(buf, "_pid.%u", tsk->tgid);
+
+ keyring = keyring_alloc(buf, tsk->uid, tsk->gid, 1, NULL);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error;
+ }
+
+ task_lock(tsk);
+ old = tsk->process_keyring;
+ tsk->process_keyring = keyring;
+ task_unlock(tsk);
+
+ ret = 0;
+
+ key_put(old);
+ error:
+ return ret;
+
+} /* end install_process_keyring() */
+
+/*****************************************************************************/
+/*
+ * install a session keyring, discarding the old one
+ * - if a keyring is not supplied, an empty one is invented
+ */
+static int install_session_keyring(struct task_struct *tsk,
+ struct key *keyring)
+{
+ struct key *old;
+ char buf[20];
+ int ret;
+
+ /* create an empty session keyring */
+ if (!keyring) {
+ sprintf(buf, "_ses.%u", tsk->tgid);
+
+ keyring = keyring_alloc(buf, tsk->uid, tsk->gid, 1, NULL);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error;
+ }
+ }
+ else {
+ atomic_inc(&keyring->usage);
+ }
+
+ /* install the keyring */
+ task_lock(tsk);
+ old = tsk->session_keyring;
+ tsk->session_keyring = keyring;
+ task_unlock(tsk);
+
+ ret = 0;
+
+ key_put(old);
+ error:
+ return ret;
+
+} /* end install_session_keyring() */
+
+/*****************************************************************************/
+/*
+ * copy the keys for fork
+ */
+int copy_keys(unsigned long clone_flags, struct task_struct *tsk)
+{
+ int ret = 0;
+
+ key_check(tsk->session_keyring);
+ key_check(tsk->process_keyring);
+ key_check(tsk->thread_keyring);
+
+ if (tsk->session_keyring)
+ atomic_inc(&tsk->session_keyring->usage);
+
+ if (tsk->process_keyring) {
+ if (clone_flags & CLONE_THREAD) {
+ atomic_inc(&tsk->process_keyring->usage);
+ }
+ else {
+ tsk->process_keyring = NULL;
+ ret = install_process_keyring(tsk);
+ }
+ }
+
+ tsk->thread_keyring = NULL;
+ return ret;
+
+} /* end copy_keys() */
+
+/*****************************************************************************/
+/*
+ * dispose of keys upon exit
+ */
+void exit_keys(struct task_struct *tsk)
+{
+ key_put(tsk->session_keyring);
+ key_put(tsk->process_keyring);
+ key_put(tsk->thread_keyring);
+
+} /* end exit_keys() */
+
+/*****************************************************************************/
+/*
+ * deal with execve()
+ */
+int exec_keys(struct task_struct *tsk)
+{
+ struct key *old;
+
+ /* newly exec'd tasks don't get a thread keyring */
+ task_lock(tsk);
+ old = tsk->thread_keyring;
+ tsk->thread_keyring = NULL;
+ task_unlock(tsk);
+
+ key_put(old);
+
+ /* newly exec'd tasks get a fresh process keyring */
+ return install_process_keyring(tsk);
+
+} /* end exec_keys() */
+
+/*****************************************************************************/
+/*
+ * deal with SUID programs
+ * - we might want to make this invent a new session keyring
+ */
+int suid_keys(struct task_struct *tsk)
+{
+ return 0;
+
+} /* end suid_keys() */
+
+/*****************************************************************************/
+/*
+ * the filesystem user ID changed
+ */
+void key_fsuid_changed(struct task_struct *tsk)
+{
+ /* update the ownership of the process keyring */
+ if (tsk->process_keyring) {
+ down_write(&tsk->process_keyring->sem);
+ write_lock(&tsk->process_keyring->lock);
+ tsk->process_keyring->uid = tsk->fsuid;
+ write_unlock(&tsk->process_keyring->lock);
+ up_write(&tsk->process_keyring->sem);
+ }
+
+ /* update the ownership of the thread keyring */
+ if (tsk->thread_keyring) {
+ down_write(&tsk->thread_keyring->sem);
+ write_lock(&tsk->thread_keyring->lock);
+ tsk->thread_keyring->uid = tsk->fsuid;
+ write_unlock(&tsk->thread_keyring->lock);
+ up_write(&tsk->thread_keyring->sem);
+ }
+
+} /* end key_fsuid_changed() */
+
+/*****************************************************************************/
+/*
+ * the filesystem group ID changed
+ */
+void key_fsgid_changed(struct task_struct *tsk)
+{
+ /* update the ownership of the process keyring */
+ if (tsk->process_keyring) {
+ down_write(&tsk->process_keyring->sem);
+ write_lock(&tsk->process_keyring->lock);
+ tsk->process_keyring->gid = tsk->fsgid;
+ write_unlock(&tsk->process_keyring->lock);
+ up_write(&tsk->process_keyring->sem);
+ }
+
+ /* update the ownership of the thread keyring */
+ if (tsk->thread_keyring) {
+ down_write(&tsk->thread_keyring->sem);
+ write_lock(&tsk->thread_keyring->lock);
+ tsk->thread_keyring->gid = tsk->fsgid;
+ write_unlock(&tsk->thread_keyring->lock);
+ up_write(&tsk->thread_keyring->sem);
+ }
+
+} /* end key_fsgid_changed() */
+
+/*****************************************************************************/
+/*
+ * search the process keyrings for the first matching key
+ * - we use the supplied match function to see if the description (or other
+ * feature of interest) matches
+ * - we return -EAGAIN if we didn't find any matching key
+ * - we return -ENOKEY if we found only negative matching keys
+ */
+struct key *search_process_keyrings_aux(struct key_type *type,
+ const void *description,
+ key_match_func_t match)
+{
+ struct task_struct *tsk = current;
+ struct key *key, *ret, *err, *session;
+
+ /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
+ * searchable, but we failed to find a key or we found a negative key;
+ * otherwise we want to return a sample error (probably -EACCES) if
+ * none of the keyrings were searchable
+ *
+ * in terms of priority: success > -ENOKEY > -EAGAIN > other error
+ */
+ key = NULL;
+ ret = NULL;
+ err = ERR_PTR(-EAGAIN);
+
+ /* search the thread keyring first */
+ if (tsk->thread_keyring) {
+ key = keyring_search_aux(tsk->thread_keyring, type,
+ description, match);
+ if (!IS_ERR(key))
+ goto found;
+
+ switch (PTR_ERR(key)) {
+ case -EAGAIN: /* no key */
+ if (ret)
+ break;
+ case -ENOKEY: /* negative key */
+ ret = key;
+ break;
+ default:
+ err = key;
+ break;
+ }
+ }
+
+ /* search the process keyring second */
+ if (tsk->process_keyring) {
+ key = keyring_search_aux(tsk->process_keyring, type,
+ description, match);
+ if (!IS_ERR(key))
+ goto found;
+
+ switch (PTR_ERR(key)) {
+ case -EAGAIN: /* no key */
+ if (ret)
+ break;
+ case -ENOKEY: /* negative key */
+ ret = key;
+ break;
+ default:
+ err = key;
+ break;
+ }
+ }
+
+ /* search the session keyring last */
+ session = tsk->session_keyring;
+ if (!session)
+ session = tsk->user->session_keyring;
+
+ key = keyring_search_aux(session, type,
+ description, match);
+ if (!IS_ERR(key))
+ goto found;
+
+ switch (PTR_ERR(key)) {
+ case -EAGAIN: /* no key */
+ if (ret)
+ break;
+ case -ENOKEY: /* negative key */
+ ret = key;
+ break;
+ default:
+ err = key;
+ break;
+ }
+
+ /* no key - decide on the error we're going to go for */
+ key = ret ? ret : err;
+
+ found:
+ return key;
+
+} /* end search_process_keyrings_aux() */
+
+/*****************************************************************************/
+/*
+ * search the process keyrings for the first matching key
+ * - we return -EAGAIN if we didn't find any matching key
+ * - we return -ENOKEY if we found only negative matching keys
+ */
+struct key *search_process_keyrings(struct key_type *type,
+ const char *description)
+{
+ return search_process_keyrings_aux(type, description, type->match);
+
+} /* end search_process_keyrings() */
+
+/*****************************************************************************/
+/*
+ * lookup a key given a key ID from userspace with a given permissions mask
+ * - don't create special keyrings unless so requested
+ * - partially constructed keys aren't found unless requested
+ */
+struct key *lookup_user_key(key_serial_t id, int create, int partial,
+ key_perm_t perm)
+{
+ struct task_struct *tsk = current;
+ struct key *key;
+ int ret;
+
+ key = ERR_PTR(-ENOKEY);
+
+ switch (id) {
+ case KEY_SPEC_THREAD_KEYRING:
+ if (!tsk->thread_keyring) {
+ if (!create)
+ goto error;
+
+ ret = install_thread_keyring(tsk);
+ if (ret < 0) {
+ key = ERR_PTR(ret);
+ goto error;
+ }
+ }
+
+ key = tsk->thread_keyring;
+ atomic_inc(&key->usage);
+ break;
+
+ case KEY_SPEC_PROCESS_KEYRING:
+ if (!tsk->process_keyring) {
+ if (!create)
+ goto error;
+
+ ret = install_process_keyring(tsk);
+ if (ret < 0) {
+ key = ERR_PTR(ret);
+ goto error;
+ }
+ }
+
+ key = tsk->process_keyring;
+ atomic_inc(&key->usage);
+ break;
+
+ case KEY_SPEC_SESSION_KEYRING:
+ if (!tsk->session_keyring) {
+ /* always install a session keyring upon access if one
+ * doesn't exist yet */
+ ret = install_session_keyring(
+ tsk, tsk->user->session_keyring);
+ if (ret < 0)
+ goto error;
+ }
+
+ key = tsk->session_keyring;
+ atomic_inc(&key->usage);
+ break;
+
+ case KEY_SPEC_USER_KEYRING:
+ key = tsk->user->uid_keyring;
+ atomic_inc(&key->usage);
+ break;
+
+ case KEY_SPEC_USER_SESSION_KEYRING:
+ key = tsk->user->session_keyring;
+ atomic_inc(&key->usage);
+ break;
+
+ case KEY_SPEC_GROUP_KEYRING:
+ /* group keyrings are not yet supported */
+ key = ERR_PTR(-EINVAL);
+ goto error;
+
+ default:
+ key = ERR_PTR(-EINVAL);
+ if (id < 1)
+ goto error;
+
+ key = key_lookup(id);
+ if (IS_ERR(key))
+ goto error;
+ break;
+ }
+
+ /* check the status and permissions */
+ if (perm) {
+ ret = key_validate(key);
+ if (ret < 0)
+ goto invalid_key;
+ }
+
+ ret = -EIO;
+ if (!partial && !(key->flags & KEY_FLAG_INSTANTIATED))
+ goto invalid_key;
+
+ ret = -EACCES;
+ if (!key_permission(key, perm))
+ goto invalid_key;
+
+ error:
+ return key;
+
+ invalid_key:
+ key_put(key);
+ key = ERR_PTR(ret);
+ goto error;
+
+} /* end lookup_user_key() */
+
+/*****************************************************************************/
+/*
+ * join the named keyring as the session keyring if possible, or attempt to
+ * create a new one of that name if not
+ * - if the name is NULL, an empty anonymous keyring is installed instead
+ * - named session keyring joining is done with a semaphore held
+ */
+long join_session_keyring(const char *name)
+{
+ struct task_struct *tsk = current;
+ struct key *keyring;
+ long ret;
+
+ /* if no name is provided, install an anonymous keyring */
+ if (!name) {
+ ret = install_session_keyring(tsk, NULL);
+ if (ret < 0)
+ goto error;
+
+ ret = tsk->session_keyring->serial;
+ goto error;
+ }
+
+ /* allow the user to join or create a named keyring */
+ down(&key_session_sem);
+
+ /* look for an existing keyring of this name */
+ keyring = find_keyring_by_name(name, 0);
+ if (PTR_ERR(keyring) == -ENOKEY) {
+ /* not found - try and create a new one */
+ keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error;
+ }
+ }
+ else if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error2;
+ }
+
+ /* we've got a keyring - now to install it */
+ ret = install_session_keyring(tsk, keyring);
+ if (ret < 0)
+ goto error2;
+
+ key_put(keyring);
+
+ ret = tsk->session_keyring->serial;
+
+ error2:
+ up(&key_session_sem);
+ error:
+ return ret;
+
+} /* end join_session_keyring() */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
new file mode 100644
index 000000000000..fd6ba06f2503
--- /dev/null
+++ b/security/keys/request_key.c
@@ -0,0 +1,337 @@
+/* request_key.c: request a key from userspace
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kmod.h>
+#include <linux/err.h>
+#include "internal.h"
+
+struct key_construction {
+ struct list_head link; /* link in construction queue */
+ struct key *key; /* key being constructed */
+};
+
+/* when waiting for someone else's keys, you get added to this */
+DECLARE_WAIT_QUEUE_HEAD(request_key_conswq);
+
+/*****************************************************************************/
+/*
+ * request userspace finish the construction of a key
+ * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring> <info>"
+ * - if callout_info is an empty string, it'll be rendered as a "-" instead
+ */
+static int call_request_key(struct key *key,
+ const char *op,
+ const char *callout_info)
+{
+ struct task_struct *tsk = current;
+ char *argv[10], *envp[3], uid_str[12], gid_str[12];
+ char key_str[12], keyring_str[3][12];
+ int i;
+
+ /* record the UID and GID */
+ sprintf(uid_str, "%d", current->fsuid);
+ sprintf(gid_str, "%d", current->fsgid);
+
+ /* we say which key is under construction */
+ sprintf(key_str, "%d", key->serial);
+
+ /* we specify the process's default keyrings */
+ task_lock(current);
+ sprintf(keyring_str[0], "%d",
+ tsk->thread_keyring ? tsk->thread_keyring->serial : 0);
+ sprintf(keyring_str[1], "%d",
+ tsk->process_keyring ? tsk->process_keyring->serial : 0);
+ sprintf(keyring_str[2], "%d",
+ (tsk->session_keyring ?
+ tsk->session_keyring->serial :
+ tsk->user->session_keyring->serial));
+ task_unlock(tsk);
+
+ /* set up a minimal environment */
+ i = 0;
+ envp[i++] = "HOME=/";
+ envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+ envp[i] = NULL;
+
+ /* set up the argument list */
+ i = 0;
+ argv[i++] = "/sbin/request-key";
+ argv[i++] = (char *) op;
+ argv[i++] = key_str;
+ argv[i++] = uid_str;
+ argv[i++] = gid_str;
+ argv[i++] = keyring_str[0];
+ argv[i++] = keyring_str[1];
+ argv[i++] = keyring_str[2];
+ argv[i++] = callout_info[0] ? (char *) callout_info : "-";
+ argv[i] = NULL;
+
+ /* do it */
+ return call_usermodehelper(argv[0], argv, envp, 1);
+
+} /* end call_request_key() */
+
+/*****************************************************************************/
+/*
+ * call out to userspace for the key
+ * - called with the construction sem held, but the sem is dropped here
+ * - we ignore program failure and go on key status instead
+ */
+static struct key *__request_key_construction(struct key_type *type,
+ const char *description,
+ const char *callout_info)
+{
+ struct key_construction cons;
+ struct timespec now;
+ struct key *key;
+ int ret, negative;
+
+ /* create a key and add it to the queue */
+ key = key_alloc(type, description,
+ current->fsuid, current->fsgid, KEY_USR_ALL, 0);
+ if (IS_ERR(key))
+ goto alloc_failed;
+
+ write_lock(&key->lock);
+ key->flags |= KEY_FLAG_USER_CONSTRUCT;
+ write_unlock(&key->lock);
+
+ cons.key = key;
+ list_add_tail(&cons.link, &key->user->consq);
+
+ /* we drop the construction sem here on behalf of the caller */
+ up_write(&key_construction_sem);
+
+ /* make the call */
+ ret = call_request_key(key, "create", callout_info);
+ if (ret < 0)
+ goto request_failed;
+
+ /* if the key wasn't instantiated, then we want to give an error */
+ ret = -ENOKEY;
+ if (!(key->flags & KEY_FLAG_INSTANTIATED))
+ goto request_failed;
+
+ down_write(&key_construction_sem);
+ list_del(&cons.link);
+ up_write(&key_construction_sem);
+
+ /* also give an error if the key was negatively instantiated */
+ check_not_negative:
+ if (key->flags & KEY_FLAG_NEGATIVE) {
+ key_put(key);
+ key = ERR_PTR(-ENOKEY);
+ }
+
+ out:
+ return key;
+
+ request_failed:
+ /* it wasn't instantiated
+ * - remove from construction queue
+ * - mark the key as dead
+ */
+ negative = 0;
+ down_write(&key_construction_sem);
+
+ list_del(&cons.link);
+
+ write_lock(&key->lock);
+ key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
+
+ /* check it didn't get instantiated between the check and the down */
+ if (!(key->flags & KEY_FLAG_INSTANTIATED)) {
+ key->flags |= KEY_FLAG_INSTANTIATED | KEY_FLAG_NEGATIVE;
+ negative = 1;
+ }
+
+ write_unlock(&key->lock);
+ up_write(&key_construction_sem);
+
+ if (!negative)
+ goto check_not_negative; /* surprisingly, the key got
+ * instantiated */
+
+ /* set the timeout and store in the session keyring if we can */
+ now = current_kernel_time();
+ key->expiry = now.tv_sec + key_negative_timeout;
+
+ if (current->session_keyring)
+ key_link(current->session_keyring, key);
+ key_put(key);
+
+ /* notify anyone who was waiting */
+ wake_up_all(&request_key_conswq);
+
+ key = ERR_PTR(ret);
+ goto out;
+
+ alloc_failed:
+ up_write(&key_construction_sem);
+ goto out;
+
+} /* end __request_key_construction() */
+
+/*****************************************************************************/
+/*
+ * call out to userspace to request the key
+ * - we check the construction queue first to see if an appropriate key is
+ * already being constructed by userspace
+ */
+static struct key *request_key_construction(struct key_type *type,
+ const char *description,
+ struct key_user *user,
+ const char *callout_info)
+{
+ struct key_construction *pcons;
+ struct key *key, *ckey;
+
+ DECLARE_WAITQUEUE(myself, current);
+
+ /* see if there's such a key under construction already */
+ down_write(&key_construction_sem);
+
+ list_for_each_entry(pcons, &user->consq, link) {
+ ckey = pcons->key;
+
+ if (ckey->type != type)
+ continue;
+
+ if (type->match(ckey, description))
+ goto found_key_under_construction;
+ }
+
+ /* see about getting userspace to construct the key */
+ key = __request_key_construction(type, description, callout_info);
+ error:
+ return key;
+
+ /* someone else has the same key under construction
+ * - we want to keep an eye on their key
+ */
+ found_key_under_construction:
+ atomic_inc(&ckey->usage);
+ up_write(&key_construction_sem);
+
+ /* wait for the key to be completed one way or another */
+ add_wait_queue(&request_key_conswq, &myself);
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!(ckey->flags & KEY_FLAG_USER_CONSTRUCT))
+ break;
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&request_key_conswq, &myself);
+
+ /* we'll need to search this process's keyrings to see if the key is
+ * now there since we can't automatically assume it's also available
+ * there */
+ key_put(ckey);
+ ckey = NULL;
+
+ key = NULL; /* request a retry */
+ goto error;
+
+} /* end request_key_construction() */
+
+/*****************************************************************************/
+/*
+ * request a key
+ * - search the process's keyrings
+ * - check the list of keys being created or updated
+ * - call out to userspace for a key if requested (supplementary info can be
+ * passed)
+ */
+struct key *request_key(struct key_type *type,
+ const char *description,
+ const char *callout_info)
+{
+ struct key_user *user;
+ struct key *key;
+
+ /* search all the process keyrings for a key */
+ key = search_process_keyrings_aux(type, description, type->match);
+
+ if (PTR_ERR(key) == -EAGAIN) {
+ /* the search failed, but the keyrings were searchable, so we
+ * should consult userspace if we can */
+ key = ERR_PTR(-ENOKEY);
+ if (!callout_info)
+ goto error;
+
+ /* - get hold of the user's construction queue */
+ user = key_user_lookup(current->fsuid);
+ if (IS_ERR(user)) {
+ key = ERR_PTR(PTR_ERR(user));
+ goto error;
+ }
+
+ for (;;) {
+ /* ask userspace (returns NULL if it waited on a key
+ * being constructed) */
+ key = request_key_construction(type, description,
+ user, callout_info);
+ if (key)
+ break;
+
+ /* someone else made the key we want, so we need to
+ * search again as it might now be available to us */
+ key = search_process_keyrings_aux(type, description,
+ type->match);
+ if (PTR_ERR(key) != -EAGAIN)
+ break;
+ }
+
+ key_user_put(user);
+ }
+
+ error:
+ return key;
+
+} /* end request_key() */
+
+EXPORT_SYMBOL(request_key);
+
+/*****************************************************************************/
+/*
+ * validate a key
+ */
+int key_validate(struct key *key)
+{
+ struct timespec now;
+ int ret = 0;
+
+ if (key) {
+ /* check it's still accessible */
+ ret = -EKEYREVOKED;
+ if (key->flags & (KEY_FLAG_REVOKED | KEY_FLAG_DEAD))
+ goto error;
+
+ /* check it hasn't expired */
+ ret = 0;
+ if (key->expiry) {
+ now = current_kernel_time();
+ if (now.tv_sec >= key->expiry)
+ ret = -EKEYEXPIRED;
+ }
+ }
+
+ error:
+ return ret;
+
+} /* end key_validate() */
+
+EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
new file mode 100644
index 000000000000..8d65b3a28129
--- /dev/null
+++ b/security/keys/user_defined.c
@@ -0,0 +1,191 @@
+/* user_defined.c: user defined key type
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <asm/uaccess.h>
+#include "internal.h"
+
+static int user_instantiate(struct key *key, const void *data, size_t datalen);
+static int user_duplicate(struct key *key, const struct key *source);
+static int user_update(struct key *key, const void *data, size_t datalen);
+static int user_match(const struct key *key, const void *criterion);
+static void user_destroy(struct key *key);
+static void user_describe(const struct key *user, struct seq_file *m);
+static long user_read(const struct key *key,
+ char __user *buffer, size_t buflen);
+
+/*
+ * user defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_user = {
+ .name = "user",
+ .instantiate = user_instantiate,
+ .duplicate = user_duplicate,
+ .update = user_update,
+ .match = user_match,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
+};
+
+/*****************************************************************************/
+/*
+ * instantiate a user defined key
+ */
+static int user_instantiate(struct key *key, const void *data, size_t datalen)
+{
+ int ret;
+
+ ret = -EINVAL;
+ if (datalen <= 0 || datalen > 32767 || !data)
+ goto error;
+
+ ret = key_payload_reserve(key, datalen);
+ if (ret < 0)
+ goto error;
+
+ /* attach the data */
+ ret = -ENOMEM;
+ key->payload.data = kmalloc(datalen, GFP_KERNEL);
+ if (!key->payload.data)
+ goto error;
+
+ memcpy(key->payload.data, data, datalen);
+ ret = 0;
+
+ error:
+ return ret;
+
+} /* end user_instantiate() */
+
+/*****************************************************************************/
+/*
+ * duplicate a user defined key
+ */
+static int user_duplicate(struct key *key, const struct key *source)
+{
+ int ret;
+
+ /* just copy the payload */
+ ret = -ENOMEM;
+ key->payload.data = kmalloc(source->datalen, GFP_KERNEL);
+
+ if (key->payload.data) {
+ key->datalen = source->datalen;
+ memcpy(key->payload.data, source->payload.data, source->datalen);
+ ret = 0;
+ }
+
+ return ret;
+
+} /* end user_duplicate() */
+
+/*****************************************************************************/
+/*
+ * update a user defined key
+ */
+static int user_update(struct key *key, const void *data, size_t datalen)
+{
+ void *new, *zap;
+ int ret;
+
+ ret = -EINVAL;
+ if (datalen <= 0 || datalen > 32767 || !data)
+ goto error;
+
+ /* copy the data */
+ ret = -ENOMEM;
+ new = kmalloc(datalen, GFP_KERNEL);
+ if (!new)
+ goto error;
+
+ memcpy(new, data, datalen);
+
+ /* check the quota and attach the new data */
+ zap = new;
+ write_lock(&key->lock);
+
+ ret = key_payload_reserve(key, datalen);
+
+ if (ret == 0) {
+ /* attach the new data, displacing the old */
+ zap = key->payload.data;
+ key->payload.data = new;
+ key->expiry = 0;
+ }
+
+ write_unlock(&key->lock);
+ kfree(zap);
+
+ error:
+ return ret;
+
+} /* end user_update() */
+
+/*****************************************************************************/
+/*
+ * match users on their name
+ */
+static int user_match(const struct key *key, const void *description)
+{
+ return strcmp(key->description, description) == 0;
+
+} /* end user_match() */
+
+/*****************************************************************************/
+/*
+ * dispose of the data dangling from the corpse of a user
+ */
+static void user_destroy(struct key *key)
+{
+ kfree(key->payload.data);
+
+} /* end user_destroy() */
+
+/*****************************************************************************/
+/*
+ * describe the user
+ */
+static void user_describe(const struct key *key, struct seq_file *m)
+{
+ seq_puts(m, key->description);
+
+ seq_printf(m, ": %u", key->datalen);
+
+} /* end user_describe() */
+
+/*****************************************************************************/
+/*
+ * read the key data
+ */
+static long user_read(const struct key *key,
+ char __user *buffer, size_t buflen)
+{
+ long ret = key->datalen;
+
+ /* we can return the data as is */
+ if (buffer && buflen > 0) {
+ if (buflen > key->datalen)
+ buflen = key->datalen;
+
+ if (copy_to_user(buffer, key->payload.data, buflen) != 0)
+ ret = -EFAULT;
+ }
+
+ return ret;
+
+} /* end user_read() */
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 349d54dc71c0..40b6911892d4 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -64,6 +64,7 @@
#include <net/ipv6.h>
#include <linux/hugetlb.h>
#include <linux/personality.h>
+#include <linux/sysctl.h>
#include "avc.h"
#include "objsec.h"
@@ -386,13 +387,6 @@ static int try_context_mount(struct super_block *sb, void *data)
break;
case Opt_fscontext:
- if (sbsec->behavior != SECURITY_FS_USE_XATTR) {
- rc = -EINVAL;
- printk(KERN_WARNING "SELinux: "
- "fscontext option is invalid for"
- " this filesystem type\n");
- goto out_free;
- }
if (seen & (Opt_context|Opt_fscontext)) {
rc = -EINVAL;
printk(KERN_WARNING SEL_MOUNT_FAIL_MSG);
@@ -2331,9 +2325,8 @@ static int selinux_inode_removexattr (struct dentry *dentry, char *name)
return -EACCES;
}
-static int selinux_inode_getsecurity(struct dentry *dentry, const char *name, void *buffer, size_t size)
+static int selinux_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size)
{
- struct inode *inode = dentry->d_inode;
struct inode_security_struct *isec = inode->i_security;
char *context;
unsigned len;
@@ -2361,10 +2354,9 @@ static int selinux_inode_getsecurity(struct dentry *dentry, const char *name, vo
return len;
}
-static int selinux_inode_setsecurity(struct dentry *dentry, const char *name,
+static int selinux_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
- struct inode *inode = dentry->d_inode;
struct inode_security_struct *isec = inode->i_security;
u32 newsid;
int rc;
@@ -2383,10 +2375,10 @@ static int selinux_inode_setsecurity(struct dentry *dentry, const char *name,
return 0;
}
-static int selinux_inode_listsecurity(struct dentry *dentry, char *buffer)
+static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
const int len = sizeof(XATTR_NAME_SELINUX);
- if (buffer)
+ if (buffer && len <= buffer_size)
memcpy(buffer, XATTR_NAME_SELINUX, len);
return len;
}