summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@kernel.bkbits.net>2003-04-27 11:46:28 -0700
committerDavid S. Miller <davem@kernel.bkbits.net>2003-04-27 11:46:28 -0700
commit8a0aa9f7c9eb7cd0c5bbb498e5606a3130a766f8 (patch)
tree9175a7b5265981e9c47c5548b96221e1c0abb8cf
parent67b818917d89369be612a01061ccd2816d8bd6d8 (diff)
parenta58e4d1d2909f45b0e7dcb36d307d20e233c12f7 (diff)
Merge davem@nuts.ninka.net:/home/davem/src/BK/net-2.5
into kernel.bkbits.net:/home/davem/net-2.5
-rw-r--r--arch/arm/Makefile19
-rw-r--r--arch/arm/boot/compressed/head-sa1100.S13
-rw-r--r--arch/arm/def-configs/shark533
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/setup.c121
-rw-r--r--arch/arm/mach-integrator/cpu.c5
-rw-r--r--arch/arm/mach-integrator/irq.c1
-rw-r--r--arch/arm/mach-sa1100/assabet.c2
-rw-r--r--arch/arm/mm/Makefile16
-rw-r--r--arch/arm/mm/cache-v3.S118
-rw-r--r--arch/arm/mm/cache-v4.S120
-rw-r--r--arch/arm/mm/cache-v4wb.S196
-rw-r--r--arch/arm/mm/cache-v4wt.S170
-rw-r--r--arch/arm/mm/consistent.c12
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm/mm/fault-common.c6
-rw-r--r--arch/arm/mm/mm-armv.c157
-rw-r--r--arch/arm/mm/proc-arm1020.S370
-rw-r--r--arch/arm/mm/proc-arm2_3.S24
-rw-r--r--arch/arm/mm/proc-arm6_7.S39
-rw-r--r--arch/arm/mm/proc-arm720.S78
-rw-r--r--arch/arm/mm/proc-arm920.S377
-rw-r--r--arch/arm/mm/proc-arm922.S378
-rw-r--r--arch/arm/mm/proc-arm926.S366
-rw-r--r--arch/arm/mm/proc-sa110.S452
-rw-r--r--arch/arm/mm/proc-sa1100.S297
-rw-r--r--arch/arm/mm/proc-syms.c1
-rw-r--r--arch/arm/mm/proc-xscale.S346
-rw-r--r--arch/arm/vmlinux-armv.lds.in3
-rw-r--r--drivers/block/ataflop.c87
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/floppy98.c2
-rw-r--r--drivers/block/genhd.c11
-rw-r--r--drivers/block/ioctl.c2
-rw-r--r--drivers/ide/legacy/hd98.c272
-rw-r--r--drivers/isdn/capi/capifs.c34
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-table.c21
-rw-r--r--drivers/md/md.c11
-rw-r--r--drivers/mtd/devices/blkmtd.c49
-rw-r--r--drivers/mtd/maps/iq80321.c22
-rw-r--r--drivers/usb/core/inode.c64
-rw-r--r--drivers/video/cyber2000fb.c4
-rw-r--r--fs/binfmt_misc.c106
-rw-r--r--fs/block_dev.c19
-rw-r--r--fs/ext3/super.c12
-rw-r--r--fs/inode.c10
-rw-r--r--fs/jfs/jfs_logmgr.c11
-rw-r--r--fs/libfs.c94
-rw-r--r--fs/nfsd/nfsctl.c87
-rw-r--r--fs/partitions/check.c15
-rw-r--r--fs/reiserfs/journal.c21
-rw-r--r--include/asm-arm/arch-iop3xx/iop310-irqs.h80
-rw-r--r--include/asm-arm/bugs.h4
-rw-r--r--include/asm-arm/cpu-multi26.h7
-rw-r--r--include/asm-arm/cpu-multi32.h108
-rw-r--r--include/asm-arm/cpu-single.h37
-rw-r--r--include/asm-arm/hardirq.h5
-rw-r--r--include/asm-arm/proc-armv/cache.h269
-rw-r--r--include/asm-arm/proc-armv/locks.h4
-rw-r--r--include/asm-arm/proc-armv/pgalloc.h5
-rw-r--r--include/asm-arm/proc-armv/pgtable.h20
-rw-r--r--include/asm-arm/proc-armv/system.h46
-rw-r--r--include/asm-arm/proc-armv/tlbflush.h40
-rw-r--r--include/asm-arm/procinfo.h4
-rw-r--r--include/asm-arm/setup.h13
-rw-r--r--include/asm-arm/tlb.h11
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/genhd.h5
-rw-r--r--kernel/ksyms.c5
-rw-r--r--kernel/suspend.c15
-rw-r--r--net/sunrpc/rpc_pipe.c38
72 files changed, 2972 insertions, 2939 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 21da4107722e..09cea076738e 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -31,13 +31,15 @@ apcs-$(CONFIG_CPU_32) :=-mapcs-32
apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3
# This selects which instruction set is used.
-# Note that GCC is lame - it doesn't numerically define an
-# architecture version macro, but instead defines a whole
-# series of macros.
-arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
+# Note that GCC does not numerically define an architecture version
+# macro, but instead defines a whole series of macros which makes
+# testing for a specific architecture or later rather impossible.
+#
+# Note - GCC does accept -march=armv5te, but someone messed up the assembler or the
+# gcc specs file - this needs fixing properly - ie in gcc and/or binutils.
+arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 -march=armv5t
arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
-arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 -march=armv5te
-arch-$(CONFIG_CPU_XSCALE) :=-D__LINUX_ARM_ARCH__=5 -march=armv4 -Wa,-mxscale #-march=armv5te
+arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
# This selects how we optimise for the processor.
tune-$(CONFIG_CPU_ARM610) :=-mtune=arm610
@@ -48,13 +50,13 @@ tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
-tune-$(CONFIG_CPU_XSCALE) :=-mtune=strongarm #-mtune=xscale
+tune-$(CONFIG_CPU_XSCALE) :=-mtune=strongarm -Wa,-mxscale #-mtune=xscale
# Force -mno-fpu to be passed to the assembler. Some versions of gcc don't
# do this with -msoft-float
CFLAGS_BOOT :=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
CFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
-AFLAGS +=$(apcs-y) $(arch-y) -mno-fpu -msoft-float -Wa,-mno-fpu
+AFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mno-fpu -msoft-float -Wa,-mno-fpu
#Default value
DATAADDR := .
@@ -208,6 +210,7 @@ zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall
)
arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
+ include/asm-arm/.arch include/asm-arm/.proc \
include/config/MARKER
include/asm-$(ARCH)/constants.h: arch/$(ARCH)/kernel/asm-offsets.s
diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S
index a3d97910889e..a722f6c858f5 100644
--- a/arch/arm/boot/compressed/head-sa1100.S
+++ b/arch/arm/boot/compressed/head-sa1100.S
@@ -50,6 +50,10 @@ __SA1100_start:
10:
#endif
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ ands r0, r0, #0x0d
+ beq 99f
+
@ Data cache might be active.
@ Be sure to flush kernel binary out of the cache,
@ whatever state it is, before it is turned off.
@@ -68,11 +72,4 @@ __SA1100_start:
bic r0, r0, #0x0d @ clear WB, DC, MMU
bic r0, r0, #0x1000 @ clear Icache
mcr p15, 0, r0, c1, c0, 0
-
-/*
- * Pause for a short time so that we give enough time
- * for the host to start a terminal up.
- */
- mov r0, #0x00200000
-1: subs r0, r0, #1
- bne 1b
+99:
diff --git a/arch/arm/def-configs/shark b/arch/arm/def-configs/shark
index 69ba4f4ea8ce..cd5fc9c90129 100644
--- a/arch/arm/def-configs/shark
+++ b/arch/arm/def-configs/shark
@@ -2,14 +2,9 @@
# Automatically generated make config: don't edit
#
CONFIG_ARM=y
-# CONFIG_EISA is not set
-# CONFIG_SBUS is not set
-# CONFIG_MCA is not set
+CONFIG_MMU=y
CONFIG_UID16=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
-# CONFIG_GENERIC_BUST_SPINLOCK is not set
-# CONFIG_GENERIC_ISA_DMA is not set
#
# Code maturity level options
@@ -19,15 +14,19 @@ CONFIG_EXPERIMENTAL=y
#
# General setup
#
-CONFIG_NET=y
+CONFIG_SWAP=y
CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
+CONFIG_LOG_BUF_SHIFT=14
#
# Loadable module support
#
CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODVERSIONS is not set
CONFIG_KMOD=y
@@ -58,112 +57,41 @@ CONFIG_ARCH_SHARK=y
#
# Archimedes/A5000 Implementations (select only ONE)
#
-# CONFIG_ARCH_ARC is not set
-# CONFIG_ARCH_A5K is not set
#
-# Footbridge Implementations
+# CLPS711X/EP721X Implementations
#
-# CONFIG_ARCH_CATS is not set
-# CONFIG_ARCH_PERSONAL_SERVER is not set
-# CONFIG_ARCH_EBSA285_ADDIN is not set
-# CONFIG_ARCH_EBSA285_HOST is not set
-# CONFIG_ARCH_NETWINDER is not set
#
-# SA11x0 Implementations
+# Epxa10db
#
-# CONFIG_SA1100_ASSABET is not set
-# CONFIG_ASSABET_NEPONSET is not set
-# CONFIG_SA1100_ADSBITSY is not set
-# CONFIG_SA1100_BRUTUS is not set
-# CONFIG_SA1100_CERF is not set
-# CONFIG_SA1100_H3100 is not set
-# CONFIG_SA1100_H3600 is not set
-# CONFIG_SA1100_H3800 is not set
-# CONFIG_SA1100_H3XXX is not set
-# CONFIG_SA1100_EXTENEX1 is not set
-# CONFIG_SA1100_FLEXANET is not set
-# CONFIG_SA1100_FREEBIRD is not set
-# CONFIG_SA1100_GRAPHICSCLIENT is not set
-# CONFIG_SA1100_GRAPHICSMASTER is not set
-# CONFIG_SA1100_BADGE4 is not set
-# CONFIG_SA1100_JORNADA720 is not set
-# CONFIG_SA1100_HUW_WEBPANEL is not set
-# CONFIG_SA1100_ITSY is not set
-# CONFIG_SA1100_LART is not set
-# CONFIG_SA1100_NANOENGINE is not set
-# CONFIG_SA1100_OMNIMETER is not set
-# CONFIG_SA1100_PANGOLIN is not set
-# CONFIG_SA1100_PLEB is not set
-# CONFIG_SA1100_PT_SYSTEM3 is not set
-# CONFIG_SA1100_SHANNON is not set
-# CONFIG_SA1100_SHERMAN is not set
-# CONFIG_SA1100_SIMPAD is not set
-# CONFIG_SA1100_PFS168 is not set
-# CONFIG_SA1100_VICTOR is not set
-# CONFIG_SA1100_XP860 is not set
-# CONFIG_SA1100_YOPY is not set
-# CONFIG_SA1100_STORK is not set
-# CONFIG_SA1100_USB is not set
-# CONFIG_SA1100_USB_NETLINK is not set
-# CONFIG_SA1100_USB_CHAR is not set
-# CONFIG_H3600_SLEEVE is not set
#
-# Intel PXA250/210 Implementations
+# Footbridge Implementations
#
-# CONFIG_ARCH_LUBBOCK is not set
-# CONFIG_ARCH_PXA_IDP is not set
#
-# CLPS711X/EP721X Implementations
+# IOP310 Implementation Options
#
-# CONFIG_ARCH_AUTCPU12 is not set
-# CONFIG_ARCH_CDB89712 is not set
-# CONFIG_ARCH_CLEP7312 is not set
-# CONFIG_ARCH_EDB7211 is not set
-# CONFIG_ARCH_P720T is not set
-# CONFIG_ARCH_FORTUNET is not set
-# CONFIG_ARCH_EP7211 is not set
-# CONFIG_ARCH_EP7212 is not set
#
-# IOP310 Implementation Options
+# IOP310 Chipset Features
#
-# CONFIG_ARCH_IQ80310 is not set
#
-# IOP310 Chipset Features
+# Intel PXA250/210 Implementations
+#
+
+#
+# SA11x0 Implementations
#
-# CONFIG_IOP310_AAU is not set
-# CONFIG_IOP310_DMA is not set
-# CONFIG_IOP310_MU is not set
-# CONFIG_IOP310_PMON is not set
-# CONFIG_ARCH_ACORN is not set
-# CONFIG_FOOTBRIDGE is not set
-# CONFIG_FOOTBRIDGE_HOST is not set
-# CONFIG_FOOTBRIDGE_ADDIN is not set
-CONFIG_CPU_32=y
-# CONFIG_CPU_26 is not set
#
# Processor Type
#
-# CONFIG_CPU_32v3 is not set
-CONFIG_CPU_32v4=y
-# CONFIG_CPU_32v5 is not set
-# CONFIG_CPU_ARM610 is not set
-# CONFIG_CPU_ARM710 is not set
-# CONFIG_CPU_ARM720T is not set
-# CONFIG_CPU_ARM920T is not set
-# CONFIG_CPU_ARM922T is not set
-# CONFIG_CPU_ARM926T is not set
-# CONFIG_CPU_ARM1020 is not set
+CONFIG_CPU_32=y
CONFIG_CPU_SA110=y
-# CONFIG_CPU_SA1100 is not set
-# CONFIG_CPU_XSCALE is not set
-# CONFIG_XSCALE_PMU is not set
+CONFIG_CPU_32v4=y
#
# Processor Features
@@ -172,19 +100,16 @@ CONFIG_CPU_SA110=y
#
# General setup
#
-# CONFIG_DISCONTIGMEM is not set
CONFIG_PCI=y
-# CONFIG_PCI_HOST_PLX90X0 is not set
CONFIG_PCI_HOST_VIA82C505=y
CONFIG_ISA=y
CONFIG_ISA_DMA=y
-# CONFIG_FIQ is not set
# CONFIG_ZBOOT_ROM is not set
-CONFIG_ZBOOT_ROM_TEXT=0
-CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set
# CONFIG_HOTPLUG is not set
-# CONFIG_PCMCIA is not set
#
# At least one math emulation must be selected
@@ -198,7 +123,6 @@ CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_PM is not set
# CONFIG_PREEMPT is not set
-# CONFIG_APM is not set
# CONFIG_ARTHUR is not set
CONFIG_CMDLINE=""
CONFIG_LEDS=y
@@ -216,11 +140,6 @@ CONFIG_PARPORT_PC_CML1=y
# CONFIG_PARPORT_PC_FIFO is not set
# CONFIG_PARPORT_PC_SUPERIO is not set
# CONFIG_PARPORT_ARC is not set
-# CONFIG_PARPORT_AMIGA is not set
-# CONFIG_PARPORT_MFC3 is not set
-# CONFIG_PARPORT_ATARI is not set
-# CONFIG_PARPORT_GSC is not set
-# CONFIG_PARPORT_SUNBPP is not set
# CONFIG_PARPORT_OTHER is not set
# CONFIG_PARPORT_1284 is not set
@@ -230,11 +149,9 @@ CONFIG_PARPORT_PC_CML1=y
# CONFIG_MTD is not set
#
-# Plug and Play configuration
+# Plug and Play support
#
# CONFIG_PNP is not set
-# CONFIG_ISAPNP is not set
-# CONFIG_PNPBIOS is not set
#
# Block devices
@@ -244,7 +161,6 @@ CONFIG_PARPORT_PC_CML1=y
# CONFIG_PARIDE is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_CISS_SCSI_TAPE is not set
# CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_UMEM is not set
CONFIG_BLK_DEV_LOOP=y
@@ -257,13 +173,11 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# Multi-device support (RAID and LVM)
#
# CONFIG_MD is not set
-# CONFIG_BLK_DEV_MD is not set
-# CONFIG_MD_LINEAR is not set
-# CONFIG_MD_RAID0 is not set
-# CONFIG_MD_RAID1 is not set
-# CONFIG_MD_RAID5 is not set
-# CONFIG_MD_MULTIPATH is not set
-# CONFIG_BLK_DEV_LVM is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
#
# Networking options
@@ -272,8 +186,8 @@ CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
# CONFIG_NETLINK_DEV is not set
# CONFIG_NETFILTER is not set
-CONFIG_FILTER=y
CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
# CONFIG_IP_ADVANCED_ROUTER is not set
@@ -283,25 +197,23 @@ CONFIG_INET=y
# CONFIG_ARPD is not set
# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
# CONFIG_IPV6 is not set
-# CONFIG_ATM is not set
-# CONFIG_VLAN_8021Q is not set
-
-#
-#
-#
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
+# CONFIG_XFRM_USER is not set
#
-# Appletalk devices
+# SCTP Configuration (EXPERIMENTAL)
#
-# CONFIG_DEV_APPLETALK is not set
+CONFIG_IPV6_SCTP__=y
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_LLC is not set
# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
-# CONFIG_LLC is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
@@ -314,8 +226,9 @@ CONFIG_INET=y
# CONFIG_NET_SCHED is not set
#
-# Network device support
+# Network testing
#
+# CONFIG_NET_PKTGEN is not set
CONFIG_NETDEVICES=y
#
@@ -332,47 +245,43 @@ CONFIG_NETDEVICES=y
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
-# CONFIG_ARM_AM79C961A is not set
-# CONFIG_SUNLANCE is not set
+# CONFIG_MII is not set
# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNBMAC is not set
-# CONFIG_SUNQE is not set
# CONFIG_SUNGEM is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set
# CONFIG_NET_VENDOR_SMC is not set
# CONFIG_NET_VENDOR_RACAL is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
# CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set
# CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_AC3200 is not set
# CONFIG_APRICOT is not set
+# CONFIG_B44 is not set
CONFIG_CS89x0=y
# CONFIG_DGRS is not set
# CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set
-# CONFIG_LNE390 is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set
-# CONFIG_NE3210 is not set
-# CONFIG_ES3210 is not set
# CONFIG_8139CP is not set
# CONFIG_8139TOO is not set
-# CONFIG_8139TOO_PIO is not set
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-# CONFIG_8139TOO_8129 is not set
-# CONFIG_8139_NEW_RX_RESET is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set
-# CONFIG_VIA_RHINE_MMIO is not set
# CONFIG_NET_POCKET is not set
#
@@ -381,10 +290,10 @@ CONFIG_CS89x0=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
# CONFIG_E1000 is not set
-# CONFIG_MYRI_SBUS is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
# CONFIG_SK98LIN is not set
# CONFIG_TIGON3 is not set
# CONFIG_FDDI is not set
@@ -399,9 +308,8 @@ CONFIG_CS89x0=y
# CONFIG_NET_RADIO is not set
#
-# Token Ring devices
+# Token Ring devices (depends on LLC=y)
#
-# CONFIG_TR is not set
# CONFIG_NET_FC is not set
# CONFIG_RCPCI is not set
# CONFIG_SHAPER is not set
@@ -412,9 +320,9 @@ CONFIG_CS89x0=y
# CONFIG_WAN is not set
#
-# Tulip family network device support
+# IrDA (infrared) support
#
-# CONFIG_NET_TULIP is not set
+# CONFIG_IRDA is not set
#
# Amateur Radio support
@@ -422,75 +330,32 @@ CONFIG_CS89x0=y
# CONFIG_HAMRADIO is not set
#
-# IrDA (infrared) support
+# ATA/ATAPI/MFM/RLL support
#
-# CONFIG_IRDA is not set
+CONFIG_IDE=y
#
-# ATA/ATAPI/MFM/RLL support
+# IDE, ATA and ATAPI Block devices
#
-CONFIG_IDE=y
CONFIG_BLK_DEV_IDE=y
-# CONFIG_BLK_DEV_HD_IDE is not set
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
# CONFIG_BLK_DEV_HD is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
# CONFIG_IDEDISK_STROKE is not set
-CONFIG_ATAPI=y
CONFIG_BLK_DEV_IDECD=y
-# CONFIG_BLK_DEV_IDETAPE is not set
CONFIG_BLK_DEV_IDEFLOPPY=y
# CONFIG_BLK_DEV_IDESCSI is not set
-# CONFIG_BLK_DEV_IDECS is not set
-
-#
-# ATA host controller support
-#
-# CONFIG_BLK_DEV_RZ1000 is not set
-# CONFIG_BLK_DEV_CMD640 is not set
-# CONFIG_BLK_DEV_CMD640_ENHANCED is not set
-# CONFIG_BLK_DEV_ISAPNP is not set
-
-#
-# PCI host controller support
-#
-# CONFIG_BLK_DEV_OFFBOARD is not set
-# CONFIG_IDEPCI_SHARE_IRQ is not set
-# CONFIG_BLK_DEV_IDEDMA_PCI is not set
-# CONFIG_IDEDMA_PCI_AUTO is not set
-# CONFIG_IDEDMA_ONLYDISK is not set
-# CONFIG_BLK_DEV_IDEDMA is not set
-# CONFIG_BLK_DEV_IDE_TCQ is not set
-# CONFIG_BLK_DEV_IDE_TCQ_DEFAULT is not set
-# CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set
-# CONFIG_BLK_DEV_AEC62XX is not set
-# CONFIG_AEC6280_BURST is not set
-# CONFIG_BLK_DEV_ALI15X3 is not set
-# CONFIG_WDC_ALI15X3 is not set
-# CONFIG_BLK_DEV_AMD74XX is not set
-# CONFIG_BLK_DEV_CMD64X is not set
-# CONFIG_BLK_DEV_CY82C693 is not set
-# CONFIG_BLK_DEV_CS5530 is not set
-# CONFIG_BLK_DEV_HPT34X is not set
-# CONFIG_HPT34X_AUTODMA is not set
-# CONFIG_BLK_DEV_HPT366 is not set
-# CONFIG_BLK_DEV_PIIX is not set
-# CONFIG_BLK_DEV_NS87415 is not set
-# CONFIG_BLK_DEV_OPTI621 is not set
-# CONFIG_BLK_DEV_PDC202XX is not set
-# CONFIG_PDC202XX_BURST is not set
-# CONFIG_PDC202XX_FORCE is not set
-# CONFIG_BLK_DEV_SVWKS is not set
-# CONFIG_BLK_DEV_SIS5513 is not set
-# CONFIG_BLK_DEV_TRM290 is not set
-# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_BLK_DEV_SL82C105 is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_BLK_DEV_IDEPCI is not set
# CONFIG_IDE_CHIPSETS is not set
-# CONFIG_IDEDMA_IVB is not set
-# CONFIG_IDEDMA_AUTO is not set
-# CONFIG_BLK_DEV_ATARAID is not set
-# CONFIG_BLK_DEV_ATARAID_PDC is not set
-# CONFIG_BLK_DEV_ATARAID_HPT is not set
#
# SCSI support
@@ -501,12 +366,10 @@ CONFIG_SCSI=m
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=m
-CONFIG_SD_EXTRA_DEVS=40
CONFIG_CHR_DEV_ST=m
# CONFIG_CHR_DEV_OSST is not set
CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set
-CONFIG_SR_EXTRA_DEVS=2
CONFIG_CHR_DEV_SG=m
#
@@ -525,8 +388,10 @@ CONFIG_CHR_DEV_SG=m
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AHA152X is not set
# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_DPT_I2O is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_IN2000 is not set
@@ -537,11 +402,11 @@ CONFIG_CHR_DEV_SG=m
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_DTC3280 is not set
# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_DMA is not set
# CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GDTH is not set
# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_PPA is not set
@@ -559,11 +424,11 @@ CONFIG_CHR_DEV_SG=m
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
-# CONFIG_SCSI_SIM710 is not set
# CONFIG_SCSI_SYM53C416 is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_T128 is not set
# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
#
@@ -575,11 +440,6 @@ CONFIG_CHR_DEV_SG=m
# I2O device support
#
# CONFIG_I2O is not set
-# CONFIG_I2O_PCI is not set
-# CONFIG_I2O_BLOCK is not set
-# CONFIG_I2O_LAN is not set
-# CONFIG_I2O_SCSI is not set
-# CONFIG_I2O_PROC is not set
#
# ISDN subsystem
@@ -589,47 +449,55 @@ CONFIG_CHR_DEV_SG=m
#
# Input device support
#
-# CONFIG_INPUT is not set
+CONFIG_INPUT=y
#
# Userland interfaces
#
-# CONFIG_INPUT_KEYBDEV is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_TSDEV is not set
# CONFIG_INPUT_TSLIBDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
-# CONFIG_INPUT_UINPUT is not set
#
# Input I/O drivers
#
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
-# CONFIG_GAMEPORT_NS558 is not set
-# CONFIG_GAMEPORT_L4 is not set
-# CONFIG_GAMEPORT_EMU10K1 is not set
-# CONFIG_GAMEPORT_VORTEX is not set
-# CONFIG_GAMEPORT_FM801 is not set
-# CONFIG_GAMEPORT_CS461x is not set
-# CONFIG_SERIO is not set
-# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_SERIO_CT82C710 is not set
# CONFIG_SERIO_PARKBD is not set
+# CONFIG_SERIO_PCIPS2 is not set
#
# Input Device Drivers
#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
#
# Character devices
#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
+# CONFIG_VT is not set
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -637,33 +505,12 @@ CONFIG_VT_CONSOLE=y
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_SERIAL_8250_CS is not set
# CONFIG_SERIAL_8250_EXTENDED is not set
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
-# CONFIG_SERIAL_8250_SHARE_IRQ is not set
-# CONFIG_SERIAL_8250_DETECT_IRQ is not set
-# CONFIG_SERIAL_8250_MULTIPORT is not set
-# CONFIG_SERIAL_8250_RSA is not set
#
# Non-8250 serial port support
#
-# CONFIG_ATOMWIDE_SERIAL is not set
-# CONFIG_DUALSP_SERIAL is not set
-# CONFIG_SERIAL_ANAKIN is not set
-# CONFIG_SERIAL_ANAKIN_CONSOLE is not set
-# CONFIG_SERIAL_AMBA is not set
-# CONFIG_SERIAL_AMBA_CONSOLE is not set
-# CONFIG_SERIAL_CLPS711X is not set
-# CONFIG_SERIAL_CLPS711X_CONSOLE is not set
-# CONFIG_SERIAL_CLPS711X_OLD_NAME is not set
-# CONFIG_SERIAL_21285 is not set
-# CONFIG_SERIAL_21285_OLD is not set
-# CONFIG_SERIAL_21285_CONSOLE is not set
-# CONFIG_SERIAL_UART00 is not set
-# CONFIG_SERIAL_UART00_CONSOLE is not set
-# CONFIG_SERIAL_SA1100 is not set
-# CONFIG_SERIAL_SA1100_CONSOLE is not set
+# CONFIG_SERIAL_DZ is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
@@ -671,6 +518,7 @@ CONFIG_UNIX98_PTY_COUNT=256
CONFIG_PRINTER=m
# CONFIG_LP_CONSOLE is not set
# CONFIG_PPDEV is not set
+# CONFIG_TIPAR is not set
#
# I2C support
@@ -678,26 +526,30 @@ CONFIG_PRINTER=m
# CONFIG_I2C is not set
#
-# L3 serial bus support
+# I2C Hardware Sensors Mainboard support
#
-# CONFIG_L3 is not set
-# CONFIG_L3_ALGOBIT is not set
-# CONFIG_L3_BIT_SA1100_GPIO is not set
#
-# Other L3 adapters
+# I2C Hardware Sensors Chip support
#
-# CONFIG_L3_SA1111 is not set
-# CONFIG_BIT_SA1100_GPIO is not set
+
+#
+# L3 serial bus support
+#
+# CONFIG_L3 is not set
#
# Mice
#
# CONFIG_BUSMOUSE is not set
-CONFIG_PSMOUSE=y
# CONFIG_QIC02_TAPE is not set
#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
# Watchdog Cards
#
# CONFIG_WATCHDOG is not set
@@ -714,6 +566,7 @@ CONFIG_RTC=y
# CONFIG_AGP is not set
# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
#
# Multimedia devices
@@ -723,84 +576,83 @@ CONFIG_RTC=y
#
# File systems
#
-# CONFIG_QUOTA is not set
-# CONFIG_QFMT_V1 is not set
-# CONFIG_QFMT_V2 is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_ADFS_FS is not set
-# CONFIG_ADFS_FS_RW is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_BFS_FS is not set
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
-CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
-# CONFIG_UMSDOS_FS is not set
-CONFIG_VFAT_FS=y
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_TMPFS is not set
-CONFIG_RAMFS=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_JFS_DEBUG is not set
-# CONFIG_JFS_STATISTICS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_VXFS_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
# CONFIG_NTFS_FS is not set
-# CONFIG_NTFS_DEBUG is not set
-# CONFIG_NTFS_RW is not set
-# CONFIG_HPFS_FS is not set
+
+#
+# Pseudo filesystems
+#
CONFIG_PROC_FS=y
CONFIG_DEVFS_FS=y
CONFIG_DEVFS_MOUNT=y
# CONFIG_DEVFS_DEBUG is not set
# CONFIG_DEVPTS_FS is not set
+# CONFIG_TMPFS is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
-# CONFIG_QNX4FS_RW is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_EXT2_FS=y
# CONFIG_SYSV_FS is not set
-# CONFIG_UDF_FS is not set
-# CONFIG_UDF_RW is not set
# CONFIG_UFS_FS is not set
-# CONFIG_UFS_FS_WRITE is not set
#
# Network File Systems
#
-# CONFIG_CODA_FS is not set
-# CONFIG_INTERMEZZO_FS is not set
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
-# CONFIG_ROOT_NFS is not set
+# CONFIG_NFS_V4 is not set
# CONFIG_NFSD is not set
-# CONFIG_NFSD_V3 is not set
-# CONFIG_NFSD_TCP is not set
-CONFIG_SUNRPC=y
CONFIG_LOCKD=y
# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_GSS is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
-# CONFIG_NCPFS_PACKET_SIGNING is not set
-# CONFIG_NCPFS_IOCTL_LOCKING is not set
-# CONFIG_NCPFS_STRONG is not set
-# CONFIG_NCPFS_NFS_NS is not set
-# CONFIG_NCPFS_OS2_NS is not set
-# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_NLS is not set
-# CONFIG_NCPFS_EXTRAS is not set
-# CONFIG_ZISOFS_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+# CONFIG_AFS_FS is not set
#
# Partition Types
@@ -817,11 +669,11 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
-# CONFIG_SMB_NLS is not set
CONFIG_NLS=y
#
@@ -867,28 +719,18 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_UTF8 is not set
#
-# Console drivers
-#
-# CONFIG_VGA_CONSOLE is not set
-
-#
-# Frame-buffer support
+# Graphics support
#
CONFIG_FB=y
-CONFIG_DUMMY_CONSOLE=y
-# CONFIG_FB_CLGEN is not set
+# CONFIG_FB_CIRRUS is not set
# CONFIG_FB_PM2 is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_ACORN is not set
-# CONFIG_FB_ANAKIN is not set
-# CONFIG_FB_CLPS711X is not set
-# CONFIG_FB_SA1100 is not set
CONFIG_FB_CYBER2000=y
+# CONFIG_FB_IMSTT is not set
# CONFIG_FB_RIVA is not set
# CONFIG_FB_MATROX is not set
-# CONFIG_FB_ATY is not set
# CONFIG_FB_RADEON is not set
# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
# CONFIG_FB_SIS is not set
# CONFIG_FB_NEOMAGIC is not set
# CONFIG_FB_3DFX is not set
@@ -896,14 +738,11 @@ CONFIG_FB_CYBER2000=y
# CONFIG_FB_TRIDENT is not set
# CONFIG_FB_PM3 is not set
# CONFIG_FB_VIRTUAL is not set
-# CONFIG_FBCON_ADVANCED is not set
-CONFIG_FBCON_CFB8=y
-CONFIG_FBCON_CFB16=y
-CONFIG_FBCON_CFB24=y
-# CONFIG_FBCON_FONTWIDTH8_ONLY is not set
-# CONFIG_FBCON_FONTS is not set
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+# CONFIG_LOGO is not set
#
# Sound
@@ -911,13 +750,17 @@ CONFIG_FONT_8x16=y
CONFIG_SOUND=m
#
+# Advanced Linux Sound Architecture
+#
+# CONFIG_SND is not set
+
+#
# Open Sound System
#
CONFIG_SOUND_PRIME=m
# CONFIG_SOUND_BT878 is not set
# CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set
-# CONFIG_MIDI_EMU10K1 is not set
# CONFIG_SOUND_FUSION is not set
# CONFIG_SOUND_CS4281 is not set
# CONFIG_SOUND_ES1370 is not set
@@ -932,7 +775,6 @@ CONFIG_SOUND_PRIME=m
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set
-# CONFIG_MIDI_VIA82CXXX is not set
CONFIG_SOUND_OSS=m
# CONFIG_SOUND_TRACEINIT is not set
# CONFIG_SOUND_DMAP is not set
@@ -950,7 +792,6 @@ CONFIG_SOUND_ADLIB=m
# CONFIG_SOUND_NM256 is not set
# CONFIG_SOUND_MAD16 is not set
# CONFIG_SOUND_PAS is not set
-# CONFIG_PAS_JOYSTICK is not set
# CONFIG_SOUND_PSS is not set
CONFIG_SOUND_SB=m
# CONFIG_SOUND_AWE32_SYNTH is not set
@@ -960,32 +801,22 @@ CONFIG_SOUND_SB=m
# CONFIG_SOUND_OPL3SA1 is not set
# CONFIG_SOUND_OPL3SA2 is not set
# CONFIG_SOUND_YMFPCI is not set
-# CONFIG_SOUND_YMFPCI_LEGACY is not set
# CONFIG_SOUND_UART6850 is not set
# CONFIG_SOUND_AEDSP16 is not set
-# CONFIG_SOUND_WAVEARTIST is not set
-# CONFIG_SOUND_TVMIXER is not set
#
-# Advanced Linux Sound Architecture
+# Misc devices
#
-# CONFIG_SND is not set
#
# Multimedia Capabilities Port drivers
#
# CONFIG_MCP is not set
-# CONFIG_MCP_SA1100 is not set
-# CONFIG_MCP_UCB1200 is not set
-# CONFIG_MCP_UCB1200_AUDIO is not set
-# CONFIG_MCP_UCB1200_TS is not set
#
# Console Switches
#
# CONFIG_SWITCHES is not set
-# CONFIG_SWITCHES_SA1100 is not set
-# CONFIG_SWITCHES_UCB1X00 is not set
#
# USB support
@@ -1004,24 +835,18 @@ CONFIG_FRAME_POINTER=y
CONFIG_DEBUG_USER=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_SLAB is not set
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_WAITQ is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_DEBUG_ERRORS is not set
-# CONFIG_DEBUG_LL is not set
-# CONFIG_DEBUG_DC21285_PORT is not set
-# CONFIG_DEBUG_CLPS711X_UART2 is not set
#
# Security options
#
-CONFIG_SECURITY_CAPABILITIES=y
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
#
# Library routines
#
CONFIG_CRC32=y
-# CONFIG_ZLIB_INFLATE is not set
-# CONFIG_ZLIB_DEFLATE is not set
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index e7bcb5d14214..bf27843cf3dd 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -418,7 +418,7 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack_page = 4096 + (unsigned long)p;
+ stack_page = 4096 + (unsigned long)p->thread_info;
fp = thread_saved_fp(p);
do {
if (fp < stack_page || fp > 4092+stack_page)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 676630ca56f9..8652bd96fead 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb;
#ifdef MULTI_USER
struct cpu_user_fns cpu_user;
#endif
+#ifdef MULTI_CACHE
+struct cpu_cache_fns cpu_cache;
+#endif
unsigned char aux_device_present;
char elf_platform[ELF_PLATFORM_SIZE];
@@ -282,6 +285,9 @@ static void __init setup_processor(void)
#ifdef MULTI_USER
cpu_user = *list->user;
#endif
+#ifdef MULTI_CACHE
+ cpu_cache = *list->cache;
+#endif
printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
cpu_name, processor_id, (int)processor_id & 15,
@@ -323,58 +329,77 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
return list;
}
+static void __init early_initrd(char **p)
+{
+ unsigned long start, size;
+
+ start = memparse(*p, p);
+ if (**p == ',') {
+ size = memparse((*p) + 1, p);
+
+ phys_initrd_start = start;
+ phys_initrd_size = size;
+ }
+}
+__early_param("initrd=", early_initrd);
+
/*
- * Initial parsing of the command line. We need to pick out the
- * memory size. We look for mem=size@start, where start and size
- * are "size[KkMm]"
+ * Pick out the memory size. We look for mem=size@start,
+ * where start and size are "size[KkMm]"
*/
-static void __init
-parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from)
+static void __init early_mem(char **p)
+{
+ static int usermem __initdata = 0;
+ unsigned long size, start;
+
+ /*
+ * If the user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem == 0) {
+ usermem = 1;
+ meminfo.nr_banks = 0;
+ }
+
+ start = PHYS_OFFSET;
+ size = memparse(*p, p);
+ if (**p == '@')
+ start = memparse(*p + 1, p);
+
+ meminfo.bank[meminfo.nr_banks].start = start;
+ meminfo.bank[meminfo.nr_banks].size = size;
+ meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
+ meminfo.nr_banks += 1;
+}
+__early_param("mem=", early_mem);
+
+/*
+ * Initial parsing of the command line.
+ */
+static void __init parse_cmdline(char **cmdline_p, char *from)
{
char c = ' ', *to = command_line;
- int usermem = 0, len = 0;
+ int len = 0;
for (;;) {
- if (c == ' ' && !memcmp(from, "mem=", 4)) {
- unsigned long size, start;
-
- if (to != command_line)
- to -= 1;
-
- /*
- * If the user specifies memory size, we
- * blow away any automatically generated
- * size.
- */
- if (usermem == 0) {
- usermem = 1;
- mi->nr_banks = 0;
- }
-
- start = PHYS_OFFSET;
- size = memparse(from + 4, &from);
- if (*from == '@')
- start = memparse(from + 1, &from);
-
- mi->bank[mi->nr_banks].start = start;
- mi->bank[mi->nr_banks].size = size;
- mi->bank[mi->nr_banks].node = PHYS_TO_NID(start);
- mi->nr_banks += 1;
- } else if (c == ' ' && !memcmp(from, "initrd=", 7)) {
- unsigned long start, size;
-
- /*
- * Remove space character
- */
- if (to != command_line)
- to -= 1;
-
- start = memparse(from + 7, &from);
- if (*from == ',') {
- size = memparse(from + 1, &from);
-
- phys_initrd_start = start;
- phys_initrd_size = size;
+ if (c == ' ') {
+ extern struct early_params __early_begin, __early_end;
+ struct early_params *p;
+
+ for (p = &__early_begin; p < &__early_end; p++) {
+ int len = strlen(p->arg);
+
+ if (memcmp(from, p->arg, len) == 0) {
+ if (to != command_line)
+ to -= 1;
+ from += len;
+ p->fn(&from);
+
+ while (*from != ' ' && *from != '\0')
+ from++;
+ break;
+ }
}
}
c = *from++;
@@ -536,6 +561,8 @@ __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
static int __init parse_tag_initrd(const struct tag *tag)
{
+ printk(KERN_WARNING "ATAG_INITRD is deprecated; "
+ "please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size;
return 0;
@@ -668,7 +695,7 @@ void __init setup_arch(char **cmdline_p)
memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
- parse_cmdline(&meminfo, cmdline_p, from);
+ parse_cmdline(cmdline_p, from);
bootmem_init(&meminfo);
paging_init(&meminfo, mdesc);
request_standard_resources(&meminfo, mdesc);
diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c
index 9cfe7399a5dd..991a37e170f4 100644
--- a/arch/arm/mach-integrator/cpu.c
+++ b/arch/arm/mach-integrator/cpu.c
@@ -12,6 +12,7 @@
* CPU support functions
*/
#include <linux/config.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/cpufreq.h>
@@ -173,9 +174,9 @@ static int integrator_set_target(struct cpufreq_policy *policy,
return 0;
}
-static int integrator_cpufreq_init(struct cpufreq *policy)
+static int integrator_cpufreq_init(struct cpufreq_policy *policy)
{
- unsigned long cus_allowed;
+ unsigned long cpus_allowed;
unsigned int cpu = policy->cpu;
u_int cm_osc, cm_stat, mem_freq_khz;
struct vco vco;
diff --git a/arch/arm/mach-integrator/irq.c b/arch/arm/mach-integrator/irq.c
index 8bda29873bfa..496301c4c69e 100644
--- a/arch/arm/mach-integrator/irq.c
+++ b/arch/arm/mach-integrator/irq.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
+#include <linux/list.h>
#include <asm/hardware.h>
#include <asm/irq.h>
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index e57c2ccc10cd..b8312f498f63 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/module.h>
+#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/serial_core.h>
#include <linux/delay.h>
@@ -25,6 +26,7 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 1be14732d61b..747343d21eb7 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o
p-$(CONFIG_CPU_26) += proc-arm2_3.o
# ARMv3
-p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o copypage-v3.o
-p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o copypage-v3.o
+p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
+p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
# ARMv4
-p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4wt.o abort-lv4t.o
-p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o
-p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o
-p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o
-p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4wb.o abort-ev4.o minicache.o
-p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
+p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o cache-v4.o copypage-v4wt.o abort-lv4t.o
+p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
+p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
+p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
+p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o cache-v4wb.o copypage-v4wb.o abort-ev4.o
+p-$(CONFIG_CPU_SA1100) += proc-sa1100.o tlb-v4wb.o cache-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
# ARMv5
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
new file mode 100644
index 000000000000..a222e78af7e6
--- /dev/null
+++ b/arch/arm/mm/cache-v3.S
@@ -0,0 +1,118 @@
+/*
+ * linux/arch/arm/mm/cache-v3.S
+ *
+ * Copyright (C) 1997-2002 Russell king
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/hardware.h>
+#include <asm/page.h>
+#include "proc-macros.S"
+
+/*
+ * flush_user_cache_all()
+ *
+ * Invalidate all cache entries in a particular address
+ * space.
+ *
+ * - mm - mm_struct describing address space
+ */
+ENTRY(v3_flush_user_cache_all)
+ /* FALLTHROUGH */
+/*
+ * flush_kern_cache_all()
+ *
+ * Clean and invalidate the entire cache.
+ */
+ENTRY(v3_flush_kern_cache_all)
+ /* FALLTHROUGH */
+
+/*
+ * flush_user_cache_range(start, end, vm_flags)
+ *
+ * Invalidate a range of cache entries in the specified
+ * address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - vma - vma_area_struct describing address space
+ */
+ENTRY(v3_flush_user_cache_range)
+ mov ip, #0
+ mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
+ mov pc, lr
+
+/*
+ * coherent_kern_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_coherent_kern_range)
+ mov pc, lr
+
+/*
+ * flush_kern_dcache_page(void *page)
+ *
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
+ *
+ * - addr - page aligned address
+ */
+ENTRY(v3_flush_kern_dcache_page)
+ /* FALLTHROUGH */
+
+/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_inv_range)
+ /* FALLTHROUGH */
+
+/*
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_flush_range)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_clean_range)
+ mov pc, lr
+
+ENTRY(v3_cache_fns)
+ .long v3_flush_kern_cache_all
+ .long v3_flush_user_cache_all
+ .long v3_flush_user_cache_range
+ .long v3_coherent_kern_range
+ .long v3_flush_kern_dcache_page
+ .long v3_dma_inv_range
+ .long v3_dma_clean_range
+ .long v3_dma_flush_range
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
new file mode 100644
index 000000000000..e3dbea3a3e01
--- /dev/null
+++ b/arch/arm/mm/cache-v4.S
@@ -0,0 +1,120 @@
+/*
+ * linux/arch/arm/mm/cache-v4.S
+ *
+ * Copyright (C) 1997-2002 Russell king
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/hardware.h>
+#include <asm/page.h>
+#include "proc-macros.S"
+
+/*
+ * flush_user_cache_all()
+ *
+ * Invalidate all cache entries in a particular address
+ * space.
+ *
+ * - mm - mm_struct describing address space
+ */
+ENTRY(v4_flush_user_cache_all)
+ /* FALLTHROUGH */
+/*
+ * flush_kern_cache_all()
+ *
+ * Clean and invalidate the entire cache.
+ */
+ENTRY(v4_flush_kern_cache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
+ mov pc, lr
+
+/*
+ * flush_user_cache_range(start, end, vma)
+ *
+ * Invalidate a range of cache entries in the specified
+ * address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - vma - vma_area_struct describing address space
+ */
+ENTRY(v4_flush_user_cache_range)
+ mov ip, #0
+ mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
+ mov pc, lr
+
+/*
+ * coherent_kern_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_coherent_kern_range)
+ mov pc, lr
+
+/*
+ * flush_kern_dcache_page(void *page)
+ *
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
+ *
+ * - addr - page aligned address
+ */
+ENTRY(v4_flush_kern_dcache_page)
+ /* FALLTHROUGH */
+
+/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_dma_inv_range)
+ /* FALLTHROUGH */
+
+/*
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_dma_flush_range)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_dma_clean_range)
+ mov pc, lr
+
+ENTRY(v4_cache_fns)
+ .long v4_flush_kern_cache_all
+ .long v4_flush_user_cache_all
+ .long v4_flush_user_cache_range
+ .long v4_coherent_kern_range
+ .long v4_flush_kern_dcache_page
+ .long v4_dma_inv_range
+ .long v4_dma_clean_range
+ .long v4_dma_flush_range
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
new file mode 100644
index 000000000000..883c377cbb87
--- /dev/null
+++ b/arch/arm/mm/cache-v4wb.S
@@ -0,0 +1,196 @@
+/*
+ * linux/arch/arm/mm/cache-v4wb.S
+ *
+ * Copyright (C) 1997-2002 Russell king
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/hardware.h>
+#include <asm/page.h>
+#include "proc-macros.S"
+
+/*
+ * The size of one data cache line.
+ */
+#define CACHE_DLINESIZE 32
+
+/*
+ * The total size of the data cache.
+ */
+#if defined(CONFIG_CPU_SA110)
+# define CACHE_DSIZE 16384
+#elif defined(CONFIG_CPU_SA1100)
+# define CACHE_DSIZE 8192
+#else
+# error Unknown cache size
+#endif
+
+/*
+ * This is the size at which it becomes more efficient to
+ * clean the whole cache, rather than using the individual
+ * cache line maintainence instructions.
+ *
+ * Size Clean (ticks) Dirty (ticks)
+ * 4096 21 20 21 53 55 54
+ * 8192 40 41 40 106 100 102
+ * 16384 77 77 76 140 140 138
+ * 32768 150 149 150 214 216 212 <---
+ * 65536 296 297 296 351 358 361
+ * 131072 591 591 591 656 657 651
+ * Whole 132 136 132 221 217 207 <---
+ */
+#define CACHE_DLIMIT (CACHE_DSIZE * 4)
+
+/*
+ * flush_user_cache_all()
+ *
+ * Clean and invalidate all cache entries in a particular address
+ * space.
+ */
+ENTRY(v4wb_flush_user_cache_all)
+ /* FALLTHROUGH */
+/*
+ * flush_kern_cache_all()
+ *
+ * Clean and invalidate the entire cache.
+ */
+ENTRY(v4wb_flush_kern_cache_all)
+ mov ip, #0
+ mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
+__flush_whole_cache:
+ mov r0, #FLUSH_BASE
+ add r1, r0, #CACHE_DSIZE
+1: ldr r2, [r0], #32
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
+ mov pc, lr
+
+/*
+ * flush_user_cache_range(start, end, vm_flags)
+ *
+ * Invalidate a range of cache entries in the specified
+ * address space.
+ *
+ * - start - start address (inclusive, page aligned)
+ * - end - end address (exclusive, page aligned)
+ * - vma - vma_area_struct describing address space
+ */
+ENTRY(v4wb_flush_user_cache_range)
+ sub r3, r1, r0 @ calculate total size
+ tst r2, #VM_EXEC @ executable region?
+ mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
+
+ cmp r3, #CACHE_DLIMIT @ total size >= limit?
+ bhs __flush_whole_cache @ flush whole D cache
+
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ tst r2, #VM_EXEC
+ mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
+ mov pc, lr
+
+/*
+ * flush_kern_dcache_page(void *page)
+ *
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
+ *
+ * - addr - page aligned address
+ */
+ENTRY(v4wb_flush_kern_dcache_page)
+ add r1, r0, #PAGE_SZ
+ /* fall through */
+
+/*
+ * coherent_kern_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4wb_coherent_kern_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mov ip, #0
+ mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+
+/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4wb_dma_inv_range)
+ tst r0, #CACHE_DLINESIZE - 1
+ bic r0, r0, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
+ tst r1, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mov pc, lr
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4wb_dma_clean_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mov pc, lr
+
+/*
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * This is actually the same as v4wb_coherent_kern_range()
+ */
+ .globl v4wb_dma_flush_range
+ .set v4wb_dma_flush_range, v4wb_coherent_kern_range
+
+ENTRY(v4wb_cache_fns)
+ .long v4wb_flush_kern_cache_all
+ .long v4wb_flush_user_cache_all
+ .long v4wb_flush_user_cache_range
+ .long v4wb_coherent_kern_range
+ .long v4wb_flush_kern_dcache_page
+ .long v4wb_dma_inv_range
+ .long v4wb_dma_clean_range
+ .long v4wb_dma_flush_range
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
new file mode 100644
index 000000000000..f1a76d632900
--- /dev/null
+++ b/arch/arm/mm/cache-v4wt.S
@@ -0,0 +1,170 @@
+/*
+ * linux/arch/arm/mm/cache-v4wt.S
+ *
+ * Copyright (C) 1997-2002 Russell king
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * ARMv4 write through cache operations support.
+ *
+ * We assume that the write buffer is not enabled.
+ */
+#include <linux/linkage.h>
+#include <asm/hardware.h>
+#include <asm/page.h>
+#include "proc-macros.S"
+
+/*
+ * The size of one data cache line.
+ */
+#define CACHE_DLINESIZE 32
+
+/*
+ * The number of data cache segments.
+ */
+#define CACHE_DSEGMENTS 8
+
+/*
+ * The number of lines in a cache segment.
+ */
+#define CACHE_DENTRIES 64
+
+/*
+ * This is the size at which it becomes more efficient to
+ * clean the whole cache, rather than using the individual
+ * cache line maintainence instructions.
+ *
+ * *** This needs benchmarking
+ */
+#define CACHE_DLIMIT 16384
+
+/*
+ * flush_user_cache_all()
+ *
+ * Invalidate all cache entries in a particular address
+ * space.
+ */
+ENTRY(v4wt_flush_user_cache_all)
+ /* FALLTHROUGH */
+/*
+ * flush_kern_cache_all()
+ *
+ * Clean and invalidate the entire cache.
+ */
+ENTRY(v4wt_flush_kern_cache_all)
+ mov r2, #VM_EXEC
+ mov ip, #0
+__flush_whole_cache:
+ tst r2, #VM_EXEC
+ mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
+ mov pc, lr
+
+/*
+ * flush_user_cache_range(start, end, vm_flags)
+ *
+ * Clean and invalidate a range of cache entries in the specified
+ * address space.
+ *
+ * - start - start address (inclusive, page aligned)
+ * - end - end address (exclusive, page aligned)
+ * - vma - vma_area_struct describing address space
+ */
+ENTRY(v4wt_flush_user_cache_range)
+ sub r3, r1, r0 @ calculate total size
+ cmp r3, #CACHE_DLIMIT
+ bhs __flush_whole_cache
+
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ tst r2, #VM_EXEC
+ mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mov pc, lr
+
+/*
+ * coherent_kern_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4wt_coherent_kern_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mov pc, lr
+
+/*
+ * flush_kern_dcache_page(void *page)
+ *
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
+ *
+ * - addr - page aligned address
+ */
+ENTRY(v4wt_flush_kern_dcache_page)
+ mov r2, #0
+ mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
+ add r1, r0, #PAGE_SZ
+ /* fallthrough */
+
+/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4wt_dma_inv_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4wt_dma_clean_range)
+ mov pc, lr
+
+/*
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ .globl v4wt_dma_flush_range
+ .equ v4wt_dma_flush_range, v4wt_dma_inv_range
+
+ENTRY(v4wt_cache_fns)
+ .long v4wt_flush_kern_cache_all
+ .long v4wt_flush_user_cache_all
+ .long v4wt_flush_user_cache_range
+ .long v4wt_coherent_kern_range
+ .long v4wt_flush_kern_dcache_page
+ .long v4wt_dma_inv_range
+ .long v4wt_dma_clean_range
+ .long v4wt_dma_flush_range
+
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index d67ce3eb8029..265c8fbe868e 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -161,11 +161,11 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle,
/*
* Invalidate any data that might be lurking in the
- * kernel direct-mapped region.
+ * kernel direct-mapped region for device DMA.
*/
{
unsigned long kaddr = (unsigned long)page_address(page);
- invalidate_dcache_range(kaddr, kaddr + size);
+ dmac_inv_range(kaddr, kaddr + size);
}
/*
@@ -330,7 +330,7 @@ static int __init consistent_init(void)
core_initcall(consistent_init);
/*
- * make an area consistent.
+ * make an area consistent for devices.
*/
void consistent_sync(void *vaddr, size_t size, int direction)
{
@@ -339,13 +339,13 @@ void consistent_sync(void *vaddr, size_t size, int direction)
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
- invalidate_dcache_range(start, end);
+ dmac_inv_range(start, end);
break;
case DMA_TO_DEVICE: /* writeback only */
- clean_dcache_range(start, end);
+ dmac_clean_range(start, end);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- flush_dcache_range(start, end);
+ dmac_flush_range(start, end);
break;
default:
BUG();
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 940b75d75bb9..8783e805700e 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page)
{
struct mm_struct *mm = current->active_mm;
struct list_head *l;
- unsigned long kaddr = (unsigned long)page_address(page);
- cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0);
+ __cpuc_flush_dcache_page(page_address(page));
if (!page->mapping)
return;
@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
page = pfn_to_page(pfn);
if (page->mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
- unsigned long kaddr = (unsigned long)page_address(page);
if (dirty)
- cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0);
+ __cpuc_flush_dcache_page(page_address(page));
make_coherent(vma, addr, page, dirty);
}
diff --git a/arch/arm/mm/fault-common.c b/arch/arm/mm/fault-common.c
index 3193a6e9b8c0..e7d602c1d45f 100644
--- a/arch/arm/mm/fault-common.c
+++ b/arch/arm/mm/fault-common.c
@@ -12,19 +12,15 @@
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
#include <linux/string.h>
-#include <linux/types.h>
#include <linux/ptrace.h>
-#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include "fault.h"
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 33e0237223f9..3017f7b1481f 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -24,30 +24,82 @@
#include <asm/mach/map.h>
+static unsigned int cachepolicy __initdata = PMD_SECT_WB;
+static unsigned int ecc_mask __initdata = 0;
+
+struct cachepolicy {
+ char *policy;
+ unsigned int cr_mask;
+ unsigned int pmd;
+};
+
+static struct cachepolicy cache_policies[] __initdata = {
+ { "uncached", CR1_W|CR1_C, PMD_SECT_UNCACHED },
+ { "buffered", CR1_C, PMD_SECT_BUFFERED },
+ { "writethrough", 0, PMD_SECT_WT },
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ { "writeback", 0, PMD_SECT_WB },
+ { "writealloc", 0, PMD_SECT_WBWA }
+#endif
+};
+
/*
* These are useful for identifing cache coherency
* problems by allowing the cache or the cache and
* writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off).
*/
-static int __init nocache_setup(char *__unused)
+static void __init early_cachepolicy(char **p)
{
- cr_alignment &= ~CR1_C;
- cr_no_alignment &= ~CR1_C;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
+ int len = strlen(cache_policies[i].policy);
+
+ if (memcmp(*p, cache_policies[i].policy, len) == 0) {
+ cachepolicy = cache_policies[i].pmd;
+ cr_alignment &= ~cache_policies[i].cr_mask;
+ cr_no_alignment &= ~cache_policies[i].cr_mask;
+ *p += len;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(cache_policies))
+ printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
flush_cache_all();
set_cr(cr_alignment);
- return 1;
}
-static int __init nowrite_setup(char *__unused)
+static void __init early_nocache(char **__unused)
{
- cr_alignment &= ~(CR1_W|CR1_C);
- cr_no_alignment &= ~(CR1_W|CR1_C);
- flush_cache_all();
- set_cr(cr_alignment);
- return 1;
+ char *p = "buffered";
+ printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
+ early_cachepolicy(&p);
+}
+
+static void __init early_nowrite(char **__unused)
+{
+ char *p = "uncached";
+ printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
+ early_cachepolicy(&p);
+}
+
+static void __init early_ecc(char **p)
+{
+ if (memcmp(*p, "on", 2) == 0) {
+ ecc_mask = PMD_PROTECTION;
+ *p += 2;
+ } else if (memcmp(*p, "off", 3) == 0) {
+ ecc_mask = 0;
+ *p += 3;
+ }
}
+__early_param("nocache", early_nocache);
+__early_param("nowb", early_nowrite);
+__early_param("cachepolicy=", early_cachepolicy);
+__early_param("ecc=", early_ecc);
+
static int __init noalign_setup(char *__unused)
{
cr_alignment &= ~CR1_A;
@@ -57,8 +109,6 @@ static int __init noalign_setup(char *__unused)
}
__setup("noalign", noalign_setup);
-__setup("nocache", nocache_setup);
-__setup("nowb", nowrite_setup);
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
@@ -197,7 +247,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
pmdval = __pa(ptep) | prot_l1;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
- cpu_flush_pmd(pmdp);
+ flush_pmd_entry(pmdp);
}
ptep = pte_offset_kernel(pmdp, virt);
@@ -231,32 +281,20 @@ static struct mem_types mem_types[] __initdata = {
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
- .prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_KERNEL,
},
[MT_MINICLEAN] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_CACHEABLE,
- .prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
[MT_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
- .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
- L_PTE_EXEC | L_PTE_WRITE,
- .prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
}
@@ -268,37 +306,50 @@ static struct mem_types mem_types[] __initdata = {
static void __init build_mem_type_table(void)
{
int cpu_arch = cpu_architecture();
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- int writethrough = 1;
-#else
- int writethrough = 0;
-#endif
- int writealloc = 0, ecc = 0;
+ const char *policy;
- if (cpu_arch < CPU_ARCH_ARMv5) {
- writealloc = 0;
- ecc = 0;
+ /*
+ * ARMv5 can use ECC memory.
+ */
+ if (cpu_arch == CPU_ARCH_ARMv5) {
+ mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_MEMORY].prot_sect |= ecc_mask;
+ } else {
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
+ if (cachepolicy == PMD_SECT_WBWA)
+ cachepolicy = PMD_SECT_WB;
+ ecc_mask = 0;
}
- if (writethrough) {
+ mem_types[MT_MEMORY].prot_sect |= cachepolicy;
+
+ switch (cachepolicy) {
+ default:
+ case PMD_SECT_UNCACHED:
+ policy = "uncached";
+ break;
+ case PMD_SECT_BUFFERED:
+ mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE;
+ policy = "buffered";
+ break;
+ case PMD_SECT_WT:
+ mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
- mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WT;
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WT;
- } else {
+ policy = "write through";
+ break;
+ case PMD_SECT_WB:
+ mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
- mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WB;
-
- if (writealloc)
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WBWA;
- else
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WB;
- }
-
- if (ecc) {
- mem_types[MT_VECTORS].prot_sect |= PMD_PROTECTION;
- mem_types[MT_MEMORY].prot_sect |= PMD_PROTECTION;
+ policy = "write back";
+ break;
+ case PMD_SECT_WBWA:
+ mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
+ policy = "write back, write allocate";
+ break;
}
+ printk("Memory policy: ECC %sabled, Data cache %s\n",
+ ecc_mask ? "en" : "dis", policy);
}
/*
@@ -330,6 +381,14 @@ static void __init create_mapping(struct map_desc *md)
off = md->physical - virt;
length = md->length;
+ if (mem_types[md->type].prot_l1 == 0 &&
+ (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
+ printk(KERN_WARNING "MM: map for 0x%08lx at 0x%08lx can not "
+ "be mapped using pages, ignoring.\n",
+ md->physical, md->virtual);
+ return;
+ }
+
while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
alloc_init_page(virt, virt + off, prot_l1, prot_pte);
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index adadfd9fb410..31bca78f8768 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -43,27 +43,29 @@
#define MAX_AREA_SIZE 32768
/*
- * the cache line size of the I and D cache
+ * The size of one data cache line.
*/
-#define DCACHELINESIZE 32
-#define ICACHELINESIZE 32
+#define CACHE_DLINESIZE 32
/*
- * and the page size
+ * The number of data cache segments.
*/
-#define PAGESIZE 4096
+#define CACHE_DSEGMENTS 16
- .text
/*
- * cpu_arm1020_check_bugs()
+ * The number of lines in a cache segment.
*/
-ENTRY(cpu_arm1020_check_bugs)
- mrs ip, cpsr
- bic ip, ip, #PSR_F_BIT
- msr cpsr, ip
- mov pc, lr
+#define CACHE_DENTRIES 64
/*
+ * This is the size at which it becomes more efficient to
+ * clean the whole cache, rather than using the individual
+ * cache line maintainence instructions.
+ */
+#define CACHE_DLIMIT 32768
+
+ .text
+/*
* cpu_arm1020_proc_init()
*/
ENTRY(cpu_arm1020_proc_init)
@@ -114,230 +116,233 @@ ENTRY(cpu_arm1020_do_idle)
/* ================================= CACHE ================================ */
-
+ .align 5
/*
- * cpu_arm1020_cache_clean_invalidate_all()
+ * flush_user_cache_all()
*
- * clean and invalidate all cache lines
+ * Invalidate all cache entries in a particular address
+ * space.
+ */
+ENTRY(arm1020_flush_user_cache_all)
+ /* FALLTHROUGH */
+/*
+ * flush_kern_cache_all()
*
- * Note:
- * 1. we should preserve r0 at all times
+ * Clean and invalidate the entire cache.
*/
- .align 5
-ENTRY(cpu_arm1020_cache_clean_invalidate_all)
- mov r2, #1
-cpu_arm1020_cache_clean_invalidate_all_r2:
+ENTRY(arm1020_flush_kern_cache_all)
+ mov r2, #VM_EXEC
+ mov ip, #0
+__flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE
- mcr p15, 0, ip, c7, c10, 4
-
- mov r1, #0xf @ 16 segments
-1: mov r3, #0x3F @ 64 entries
-2: mov ip, r3, LSL #26 @ shift up entry
- orr ip, ip, r1, LSL #5 @ shift in/up index
- mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
- subs r3, r3, #1
- cmp r3, #0
- bge 2b @ entries 3F to 0
- subs r1, r1, #1
- cmp r1, #0
- bge 1b @ segments 7 to 0
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
+1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
+2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ subs r3, r3, #1 << 26
+ bcs 2b @ entries 63 to 0
+ subs r1, r1, #1 << 5
+ bcs 1b @ segments 15 to 0
#endif
+ tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
- teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm1020_cache_clean_invalidate_range(start, end, flags)
+ * flush_user_cache_range(start, end, flags)
*
- * clean and invalidate all cache lines associated with this area of memory
+ * Invalidate a range of cache entries in the specified
+ * address space.
*
- * start: Area start address
- * end: Area end address
- * flags: nonzero for I cache as well
+ * - start - start address (inclusive)
+ * - end - end address (exclusive)
+ * - flags - vm_flags for this space
*/
- .align 5
-ENTRY(cpu_arm1020_cache_clean_invalidate_range)
- bic r0, r0, #DCACHELINESIZE - 1
- sub r3, r1, r0
- cmp r3, #MAX_AREA_SIZE
- bgt cpu_arm1020_cache_clean_invalidate_all_r2
- mcr p15, 0, r3, c7, c10, 4
+ENTRY(arm1020_flush_user_cache_range)
+ mov ip, #0
+ sub r3, r1, r0 @ calculate total size
+ cmp r3, #CACHE_DLIMIT
+ bhs __flush_whole_cache
+
#ifndef CONFIG_CPU_DCACHE_DISABLE
-1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- mcr p15, 0, r3, c7, c10, 4 @ drain WB
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- mcr p15, 0, r3, c7, c10, 4 @ drain WB
- add r0, r0, #DCACHELINESIZE
+ mcr p15, 0, ip, c7, c10, 4
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
- blt 1b
+ blo 1b
#endif
-
+ tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
- teq r2, #0
- movne r0, #0
- mcrne p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
-/* ================================ D-CACHE =============================== */
-
/*
- * cpu_arm1020_dcache_invalidate_range(start, end)
+ * coherent_kern_range(start, end)
*
- * throw away all D-cached data in specified region without an obligation
- * to write them back. Note however that we must clean the D-cached entries
- * around the boundaries if the start and/or end address are not cache
- * aligned.
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start. If you have non-snooping
+ * Harvard caches, you need to implement this function.
*
- * start: virtual start address
- * end: virtual end address
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_arm1020_dcache_invalidate_range)
+ENTRY(arm1020_coherent_kern_range)
+ mov ip, #0
+ bic r0, r0, #CACHE_DLINESIZE - 1
+ mcr p15, 0, ip, c7, c10, 4
+1:
#ifndef CONFIG_CPU_DCACHE_DISABLE
- /* D cache are on */
- tst r0, #DCACHELINESIZE - 1
- bic r0, r0, #DCACHELINESIZE - 1
- mcrne p15, 0, r0, c7, c10, 4
- mcrne p15, 0, r0, c7, c10, 1 @ clean D entry at start
- mcrne p15, 0, r0, c7, c10, 4 @ drain WB
- tst r1, #DCACHELINESIZE - 1
- mcrne p15, 0, r1, c7, c10, 4
- mcrne p15, 0, r1, c7, c10, 1 @ clean D entry at end
- mcrne p15, 0, r1, c7, c10, 4 @ drain WB
-
-1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- add r0, r0, #DCACHELINESIZE
- cmp r0, r1
- blt 1b
-#else
- /* D cache off, but still drain the write buffer */
- mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
#endif
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
+#endif
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm1020_dcache_clean_range(start, end)
+ * flush_kern_dcache_page(void *page)
*
- * For the specified virtual address range, ensure that all caches contain
- * clean data, such that peripheral accesses to the physical RAM fetch
- * correct data.
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
*
- * start: virtual start address
- * end: virtual end address
+ * - page - page aligned address
*/
- .align 5
-ENTRY(cpu_arm1020_dcache_clean_range)
- bic r0, r0, #DCACHELINESIZE - 1
- sub r3, r1, r0
- cmp r3, #MAX_AREA_SIZE
- bgt cpu_arm1020_cache_clean_invalidate_all_r2
- mcr p15, 0, r3, c7, c10, 4
+ENTRY(arm1020_flush_kern_dcache_page)
+ mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
-1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- mcr p15, 0, r3, c7, c10, 4 @ drain WB
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- mcr p15, 0, r3, c7, c10, 4 @ drain WB
- add r0, r0, #DCACHELINESIZE
+ add r1, r0, #PAGE_SZ
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
- blt 1b
+ blo 1b
#endif
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm1020_dcache_clean_page(page)
+ * dma_inv_range(start, end)
*
- * Cleans a single page of dcache so that if we have any future aliased
- * mappings, they will be consistent at the time that they are created.
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
*
- * page: virtual address of page to clean from dcache
+ * - start - virtual start address
+ * - end - virtual end address
*
- * Note:
- * 1. we don't need to flush the write buffer in this case.
- * 2. we don't invalidate the entries since when we write the page
- * out to disk, the entries may get reloaded into the cache.
+ * (same as v4wb)
*/
- .align 5
-ENTRY(cpu_arm1020_dcache_clean_page)
- mov r1, #PAGESIZE
- mcr p15, 0, r0, c7, c10, 4
+ENTRY(arm1020_dma_inv_range)
+ mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #2 * DCACHELINESIZE
- bhi 1b
+ tst r0, #CACHE_DLINESIZE - 1
+ bic r0, r0, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, ip, c7, c10, 4
+ mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
+ tst r1, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, ip, c7, c10, 4
+ mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
#endif
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm1020_dcache_clean_entry(addr)
+ * dma_clean_range(start, end)
*
- * Clean the specified entry of any caches such that the MMU
- * translation fetches will obtain correct data.
+ * Clean the specified virtual address range.
*
- * addr: cache-unaligned virtual address
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * (same as v4wb)
*/
- .align 5
-ENTRY(cpu_arm1020_dcache_clean_entry)
- mov r1, #0
- mcr p15, 0, r1, c7, c10, 4
+ENTRY(arm1020_dma_clean_range)
+ mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- mcr p15, 0, r0, c7, c10, 1 @ clean single D entry
- mcr p15, 0, r1, c7, c10, 4 @ drain WB
-#endif
-#ifndef CONFIG_CPU_ICACHE_DISABLE
- mcr p15, 0, r1, c7, c5, 1 @ invalidate I entry
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
#endif
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
-/* ================================ I-CACHE =============================== */
-
/*
- * cpu_arm1020_icache_invalidate_range(start, end)
+ * dma_flush_range(start, end)
*
- * invalidate a range of virtual addresses from the Icache
+ * Clean and invalidate the specified virtual address range.
*
- * start: virtual start address
- * end: virtual end address
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_arm1020_icache_invalidate_range)
-1: mcr p15, 0, r0, c7, c10, 4
+ENTRY(arm1020_dma_flush_range)
+ mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
-#endif
- add r0, r0, #DCACHELINESIZE
+ bic r0, r0, #CACHE_DLINESIZE - 1
+ mcr p15, 0, ip, c7, c10, 4
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
-ENTRY(cpu_arm1020_icache_invalidate_page)
- mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+ENTRY(arm1020_cache_fns)
+ .long arm1020_flush_kern_cache_all
+ .long arm1020_flush_user_cache_all
+ .long arm1020_flush_user_cache_range
+ .long arm1020_coherent_kern_range
+ .long arm1020_flush_kern_dcache_page
+ .long arm1020_dma_inv_range
+ .long arm1020_dma_clean_range
+ .long arm1020_dma_flush_range
+
+ .align 5
+ENTRY(cpu_arm1020_dcache_clean_area)
+#ifndef CONFIG_CPU_DCACHE_DISABLE
+ mov ip, #0
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ add r0, r0, #CACHE_DLINESIZE
+ subs r1, r1, #CACHE_DLINESIZE
+ bhi 1b
+#endif
mov pc, lr
/* =============================== PageTable ============================== */
/*
- * cpu_arm1020_set_pgd(pgd)
+ * cpu_arm1020_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm1020_set_pgd)
+ENTRY(cpu_arm1020_switch_mm)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
mov r1, #0xF @ 16 segments
@@ -365,23 +370,6 @@ ENTRY(cpu_arm1020_set_pgd)
mov pc, lr
/*
- * cpu_arm1020_flush_pmd(pmdp)
- *
- * Set a level 1 translation table entry, and clean it out of
- * any caches such that the MMUs can load it correctly.
- *
- * pmdp: pointer to PMD entry
- */
- .align 5
-ENTRY(cpu_arm1020_flush_pmd)
-#ifndef CONFIG_CPU_DCACHE_DISABLE
- mcr p15, 0, r0, c7, c10, 4
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
-#endif
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- mov pc, lr
-
-/*
* cpu_arm1020_set_pte(ptep, pte)
*
* Set a PTE and flush it out
@@ -396,7 +384,7 @@ ENTRY(cpu_arm1020_set_pte)
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
- tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec?
+ tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
@@ -489,29 +477,12 @@ __arm1020_setup:
.type arm1020_processor_functions, #object
arm1020_processor_functions:
.word v4t_early_abort
- .word cpu_arm1020_check_bugs
.word cpu_arm1020_proc_init
.word cpu_arm1020_proc_fin
.word cpu_arm1020_reset
.word cpu_arm1020_do_idle
-
- /* cache */
- .word cpu_arm1020_cache_clean_invalidate_all
- .word cpu_arm1020_cache_clean_invalidate_range
-
- /* dcache */
- .word cpu_arm1020_dcache_invalidate_range
- .word cpu_arm1020_dcache_clean_range
- .word cpu_arm1020_dcache_clean_page
- .word cpu_arm1020_dcache_clean_entry
-
- /* icache */
- .word cpu_arm1020_icache_invalidate_range
- .word cpu_arm1020_icache_invalidate_page
-
- /* pgtable */
- .word cpu_arm1020_set_pgd
- .word cpu_arm1020_flush_pmd
+ .word cpu_arm1020_dcache_clean_area
+ .word cpu_arm1020_switch_mm
.word cpu_arm1020_set_pte
.size arm1020_processor_functions, . - arm1020_processor_functions
@@ -542,4 +513,5 @@ __arm1020_proc_info:
.long arm1020_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
+ .long arm1020_cache_fns
.size __arm1020_proc_info, . - __arm1020_proc_info
diff --git a/arch/arm/mm/proc-arm2_3.S b/arch/arm/mm/proc-arm2_3.S
index 69b584489135..cb5c71e9dc60 100644
--- a/arch/arm/mm/proc-arm2_3.S
+++ b/arch/arm/mm/proc-arm2_3.S
@@ -162,7 +162,7 @@ memc_phys_table_32:
* and inaccessible (0x01f00000).
* Params : r0 = page table pointer
*/
-clear_tables: ldr r1, _arm3_set_pgd - 4
+clear_tables: ldr r1, _arm3_switch_mm - 4
ldr r2, [r1]
sub r1, r0, #256 * 4 @ start of MEMC tables
add r2, r1, r2, lsl #2 @ end of tables
@@ -186,14 +186,16 @@ clear_tables: ldr r1, _arm3_set_pgd - 4
mov pc, lr
/*
- * Function: *_set_pgd(pgd_t *pgd)
+ * Function: *_switch_mm(pgd_t *pgd)
* Params : pgd New page tables/MEMC mapping
* Purpose : update MEMC hardware with new mapping
*/
.word page_nr
-_arm3_set_pgd: mcr p15, 0, r1, c1, c0, 0 @ flush cache
-_arm2_set_pgd: stmfd sp!, {lr}
- ldr r1, _arm3_set_pgd - 4
+_arm3_switch_mm:
+ mcr p15, 0, r1, c1, c0, 0 @ flush cache
+_arm2_switch_mm:
+ stmfd sp!, {lr}
+ ldr r1, _arm3_switch_mm - 4
ldr r2, [r1]
sub r0, r0, #256 * 4 @ start of MEMC tables
add r1, r0, r2, lsl #2 @ end of tables
@@ -273,9 +275,6 @@ _arm2_xchg_4: mov r2, pc
_arm3_xchg_4: swp r0, r0, [r1]
movs pc, lr
-_arm2_3_check_bugs:
- bics pc, lr, #0x04000000 @ Clear FIQ disable bit
-
cpu_arm2_name:
.asciz "ARM 2"
cpu_arm250_name:
@@ -290,28 +289,25 @@ cpu_arm3_name:
*/
.globl arm2_processor_functions
arm2_processor_functions:
- .word _arm2_3_check_bugs
.word _arm2_proc_init
.word _arm2_proc_fin
- .word _arm2_set_pgd
+ .word _arm2_switch_mm
.word _arm2_xchg_1
.word _arm2_xchg_4
.globl arm250_processor_functions
arm250_processor_functions:
- .word _arm2_3_check_bugs
.word _arm2_proc_init
.word _arm2_proc_fin
- .word _arm2_set_pgd
+ .word _arm2_switch_mm
.word _arm3_xchg_1
.word _arm3_xchg_4
.globl arm3_processor_functions
arm3_processor_functions:
- .word _arm2_3_check_bugs
.word _arm3_proc_init
.word _arm3_proc_fin
- .word _arm3_set_pgd
+ .word _arm3_switch_mm
.word _arm3_xchg_1
.word _arm3_xchg_4
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 76b6eed5366c..552332559076 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -188,20 +188,6 @@ Ldata_lateldrpostreg:
addeq r7, r0, r2
b Ldata_saver7
-/*
- * Function: arm6_7_check_bugs (void)
- * : arm6_7_proc_init (void)
- * : arm6_7_proc_fin (void)
- *
- * Notes : This processor does not require these
- */
-ENTRY(cpu_arm6_check_bugs)
-ENTRY(cpu_arm7_check_bugs)
- mrs ip, cpsr
- bic ip, ip, #PSR_F_BIT
- msr cpsr, ip
- mov pc, lr
-
ENTRY(cpu_arm6_proc_init)
ENTRY(cpu_arm7_proc_init)
mov pc, lr
@@ -220,13 +206,13 @@ ENTRY(cpu_arm7_do_idle)
mov pc, lr
/*
- * Function: arm6_7_set_pgd(unsigned long pgd_phys)
+ * Function: arm6_7_switch_mm(unsigned long pgd_phys)
* Params : pgd_phys Physical address of page table
* Purpose : Perform a task switch, saving the old processes state, and restoring
* the new.
*/
-ENTRY(cpu_arm6_set_pgd)
-ENTRY(cpu_arm7_set_pgd)
+ENTRY(cpu_arm6_switch_mm)
+ENTRY(cpu_arm7_switch_mm)
mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ flush cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
@@ -234,17 +220,6 @@ ENTRY(cpu_arm7_set_pgd)
mov pc, lr
/*
- * Function: arm6_flush_pmd(pmdp)
- *
- * Params : r0 = Address to set
- *
- * Purpose : Set a PMD and flush it out of any WB cache
- */
-ENTRY(cpu_arm6_flush_pmd)
-ENTRY(cpu_arm7_flush_pmd)
- mov pc, lr
-
-/*
* Function: arm6_7_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set
* : r1 = value to set
@@ -324,7 +299,6 @@ __arm7_setup: mov r0, #0
.type arm6_processor_functions, #object
ENTRY(arm6_processor_functions)
.word cpu_arm6_data_abort
- .word cpu_arm6_check_bugs
.word cpu_arm6_proc_init
.word cpu_arm6_proc_fin
.word cpu_arm6_reset
@@ -345,8 +319,7 @@ ENTRY(arm6_processor_functions)
.word cpu_arm6_icache_invalidate_page
/* pgtable */
- .word cpu_arm6_set_pgd
- .word cpu_arm6_flush_pmd
+ .word cpu_arm6_switch_mm
.word cpu_arm6_set_pte
.size arm6_processor_functions, . - arm6_processor_functions
@@ -358,7 +331,6 @@ ENTRY(arm6_processor_functions)
.type arm7_processor_functions, #object
ENTRY(arm7_processor_functions)
.word cpu_arm7_data_abort
- .word cpu_arm7_check_bugs
.word cpu_arm7_proc_init
.word cpu_arm7_proc_fin
.word cpu_arm7_reset
@@ -379,8 +351,7 @@ ENTRY(arm7_processor_functions)
.word cpu_arm7_icache_invalidate_page
/* pgtable */
- .word cpu_arm7_set_pgd
- .word cpu_arm7_flush_pmd
+ .word cpu_arm7_switch_mm
.word cpu_arm7_set_pte
.size arm7_processor_functions, . - arm7_processor_functions
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 391dc601d84d..fa6f22d1c169 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -38,47 +38,12 @@
#include <asm/hardware.h>
/*
- * Function: arm720_cache_clean_invalidate_all (void)
- * : arm720_cache_clean_invalidate_page (unsigned long address, int size,
- * int flags)
- *
- * Params : address Area start address
- * : size size of area
- * : flags b0 = I cache as well
- *
- * Purpose : Flush all cache lines
- */
-ENTRY(cpu_arm720_cache_clean_invalidate_all)
-ENTRY(cpu_arm720_cache_clean_invalidate_range)
-ENTRY(cpu_arm720_icache_invalidate_range)
-ENTRY(cpu_arm720_icache_invalidate_page)
-ENTRY(cpu_arm720_dcache_invalidate_range)
- mov r0, #0
- mcr p15, 0, r0, c7, c7, 0 @ flush cache
- mov pc, lr
-
-/*
- * These just expect cache lines to be cleaned. Since we have a writethrough
- * cache, we never have any dirty cachelines to worry about.
- */
-ENTRY(cpu_arm720_dcache_clean_range)
-ENTRY(cpu_arm720_dcache_clean_page)
-ENTRY(cpu_arm720_dcache_clean_entry)
- mov pc, lr
-
-/*
- * Function: arm720_check_bugs (void)
- * : arm720_proc_init (void)
+ * Function: arm720_proc_init (void)
* : arm720_proc_fin (void)
*
* Notes : This processor does not require these
*/
-ENTRY(cpu_arm720_check_bugs)
- mrs ip, cpsr
- bic ip, ip, #PSR_F_BIT
- msr cpsr, ip
- mov pc, lr
-
+ENTRY(cpu_arm720_dcache_clean_area)
ENTRY(cpu_arm720_proc_init)
mov pc, lr
@@ -102,12 +67,12 @@ ENTRY(cpu_arm720_do_idle)
mov pc, lr
/*
- * Function: arm720_set_pgd(unsigned long pgd_phys)
+ * Function: arm720_switch_mm(unsigned long pgd_phys)
* Params : pgd_phys Physical address of page table
* Purpose : Perform a task switch, saving the old process' state and restoring
* the new.
*/
-ENTRY(cpu_arm720_set_pgd)
+ENTRY(cpu_arm720_switch_mm)
mov r1, #0
mcr p15, 0, r1, c7, c7, 0 @ invalidate cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
@@ -115,16 +80,6 @@ ENTRY(cpu_arm720_set_pgd)
mov pc, lr
/*
- * Function: arm720_flush_pmd(pmdp)
- *
- * Params : r0 = Address to set
- *
- * Purpose : Set a PMD and flush it out of any WB cache
- */
-ENTRY(cpu_arm720_flush_pmd)
- mov pc, lr
-
-/*
* Function: arm720_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set
* : r1 = value to set
@@ -140,7 +95,7 @@ ENTRY(cpu_arm720_set_pte)
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
- tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec?
+ tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
@@ -194,31 +149,13 @@ __arm720_setup: mov r0, #0
.type arm720_processor_functions, #object
ENTRY(arm720_processor_functions)
.word v4t_late_abort
- .word cpu_arm720_check_bugs
.word cpu_arm720_proc_init
.word cpu_arm720_proc_fin
.word cpu_arm720_reset
.word cpu_arm720_do_idle
-
- /* cache */
- .word cpu_arm720_cache_clean_invalidate_all
- .word cpu_arm720_cache_clean_invalidate_range
-
- /* dcache */
- .word cpu_arm720_dcache_invalidate_range
- .word cpu_arm720_dcache_clean_range
- .word cpu_arm720_dcache_clean_page
- .word cpu_arm720_dcache_clean_entry
-
- /* icache */
- .word cpu_arm720_icache_invalidate_range
- .word cpu_arm720_icache_invalidate_page
-
- /* pgtable */
- .word cpu_arm720_set_pgd
- .word cpu_arm720_flush_pmd
+ .word cpu_arm720_dcache_clean_area
+ .word cpu_arm720_switch_mm
.word cpu_arm720_set_pte
-
.size arm720_processor_functions, . - arm720_processor_functions
.type cpu_arch_name, #object
@@ -249,4 +186,5 @@ __arm720_proc_info:
.long arm720_processor_functions
.long v4_tlb_fns
.long v4wt_user_fns
+ .long v4_cache_fns
.size __arm720_proc_info, . - __arm720_proc_info
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index cbeaaf9a0853..678cb15acfda 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -28,41 +28,35 @@
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
+#include <asm/page.h>
+#include "proc-macros.S"
/*
- * This is the maximum size of an area which will be invalidated
- * using the single invalidate entry instructions. Anything larger
- * than this, and we go for the whole cache.
- *
- * This value should be chosen such that we choose the cheapest
- * alternative.
+ * The size of one data cache line.
*/
-#define MAX_AREA_SIZE 16384
+#define CACHE_DLINESIZE 32
/*
- * the cache line size of the I and D cache
+ * The number of data cache segments.
*/
-#define DCACHELINESIZE 32
-#define ICACHELINESIZE 32
+#define CACHE_DSEGMENTS 8
/*
- * and the page size
+ * The number of lines in a cache segment.
*/
-#define PAGESIZE 4096
+#define CACHE_DENTRIES 64
- .text
/*
- * cpu_arm920_check_bugs()
+ * This is the size at which it becomes more efficient to
+ * clean the whole cache, rather than using the individual
+ * cache line maintainence instructions.
*/
-ENTRY(cpu_arm920_check_bugs)
- mrs ip, cpsr
- bic ip, ip, #PSR_F_BIT
- msr cpsr, ip
- mov pc, lr
+#define CACHE_DLIMIT 65536
+
+ .text
/*
* cpu_arm920_proc_init()
*/
@@ -76,7 +70,11 @@ ENTRY(cpu_arm920_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
- bl cpu_arm920_cache_clean_invalidate_all
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ bl arm920_flush_kern_cache_all
+#else
+ bl v4wt_flush_kern_cache_all
+#endif
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
@@ -112,249 +110,207 @@ ENTRY(cpu_arm920_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr
-/* ================================= CACHE ================================ */
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/*
- * cpu_arm920_cache_clean_invalidate_all()
+ * flush_user_cache_all()
*
- * clean and invalidate all cache lines
- *
- * Note:
- * 1. we should preserve r0 at all times
+ * Invalidate all cache entries in a particular address
+ * space.
*/
- .align 5
-ENTRY(cpu_arm920_cache_clean_invalidate_all)
- mov r2, #1
-cpu_arm920_cache_clean_invalidate_all_r2:
- mov ip, #0
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
-#else
+ENTRY(arm920_flush_user_cache_all)
+ /* FALLTHROUGH */
+
/*
- * 'Clean & Invalidate whole DCache'
- * Re-written to use Index Ops.
- * Uses registers r1, r3 and ip
+ * flush_kern_cache_all()
+ *
+ * Clean and invalidate the entire cache.
*/
- mov r1, #7 << 5 @ 8 segments
-1: orr r3, r1, #63 << 26 @ 64 entries
-2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
+ENTRY(arm920_flush_kern_cache_all)
+ mov r2, #VM_EXEC
+ mov ip, #0
+__flush_whole_cache:
+ mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
+1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
+2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0
-#endif
- teq r2, #0
+ tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm920_cache_clean_invalidate_range(start, end, flags)
+ * flush_user_cache_range(start, end, flags)
*
- * clean and invalidate all cache lines associated with this area of memory
+ * Invalidate a range of cache entries in the specified
+ * address space.
*
- * start: Area start address
- * end: Area end address
- * flags: nonzero for I cache as well
+ * - start - start address (inclusive)
+ * - end - end address (exclusive)
+ * - flags - vm_flags for address space
*/
- .align 5
-ENTRY(cpu_arm920_cache_clean_invalidate_range)
- bic r0, r0, #DCACHELINESIZE - 1 @ && added by PGM
- bic r1, r1, #DCACHELINESIZE - 1 @ && added by DHM
- sub r3, r1, r0
- cmp r3, #MAX_AREA_SIZE
- bgt cpu_arm920_cache_clean_invalidate_all_r2
-1: teq r2, #0
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
-#else
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
+ENTRY(arm920_flush_user_cache_range)
+ mov ip, #0
+ sub r3, r1, r0 @ calculate total size
+ cmp r3, #CACHE_DLIMIT
+ bhs __flush_whole_cache
+
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
-#endif
+ add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
- blt 1b
-
- mcr p15, 0, r1, c7, c10, 4 @ drain WB
+ blo 1b
+ tst r2, #VM_EXEC
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
-/* ================================ D-CACHE =============================== */
-
/*
- * cpu_arm920_dcache_invalidate_range(start, end)
+ * coherent_kern_range(start, end)
*
- * throw away all D-cached data in specified region without an obligation
- * to write them back. Note however that we must clean the D-cached entries
- * around the boundaries if the start and/or end address are not cache
- * aligned.
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
*
- * start: virtual start address
- * end: virtual end address
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_arm920_dcache_invalidate_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- tst r0, #DCACHELINESIZE - 1
- mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
- tst r1, #DCACHELINESIZE - 1
- mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
-#endif @ clean D entry
- bic r0, r0, #DCACHELINESIZE - 1
- bic r1, r1, #DCACHELINESIZE - 1
-1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- add r0, r0, #DCACHELINESIZE
+ENTRY(arm920_coherent_kern_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
+ add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
- blt 1b
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm920_dcache_clean_range(start, end)
+ * flush_kern_dcache_page(void *page)
*
- * For the specified virtual address range, ensure that all caches contain
- * clean data, such that peripheral accesses to the physical RAM fetch
- * correct data.
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
*
- * start: virtual start address
- * end: virtual end address
+ * - addr - page aligned address
*/
- .align 5
-ENTRY(cpu_arm920_dcache_clean_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- bic r0, r0, #DCACHELINESIZE - 1
- sub r1, r1, r0
- cmp r1, #MAX_AREA_SIZE
- mov r2, #0
- bgt cpu_arm920_cache_clean_invalidate_all_r2
-
- bic r1, r1, #DCACHELINESIZE -1
- add r1, r1, #DCACHELINESIZE
-
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #DCACHELINESIZE
- bpl 1b
-#endif
- mcr p15, 0, r2, c7, c10, 4 @ drain WB
+ENTRY(arm920_flush_kern_dcache_page)
+ add r1, r0, #PAGE_SZ
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm920_dcache_clean_page(page)
+ * dma_inv_range(start, end)
*
- * Cleans a single page of dcache so that if we have any future aliased
- * mappings, they will be consistent at the time that they are created.
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
*
- * page: virtual address of page to clean from dcache
+ * - start - virtual start address
+ * - end - virtual end address
*
- * Note:
- * 1. we don't need to flush the write buffer in this case.
- * 2. we don't invalidate the entries since when we write the page
- * out to disk, the entries may get reloaded into the cache.
+ * (same as v4wb)
*/
- .align 5
-ENTRY(cpu_arm920_dcache_clean_page)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- mov r1, #PAGESIZE
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #2 * DCACHELINESIZE
- bne 1b
-#endif
+ENTRY(arm920_dma_inv_range)
+ tst r0, #CACHE_DLINESIZE - 1
+ bic r0, r0, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
+ tst r1, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm920_dcache_clean_entry(addr)
+ * dma_clean_range(start, end)
+ *
+ * Clean the specified virtual address range.
*
- * Clean the specified entry of any caches such that the MMU
- * translation fetches will obtain correct data.
+ * - start - virtual start address
+ * - end - virtual end address
*
- * addr: cache-unaligned virtual address
+ * (same as v4wb)
*/
- .align 5
-ENTRY(cpu_arm920_dcache_clean_entry)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
-#endif
+ENTRY(arm920_dma_clean_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
-/* ================================ I-CACHE =============================== */
-
/*
- * cpu_arm920_icache_invalidate_range(start, end)
+ * dma_flush_range(start, end)
*
- * invalidate a range of virtual addresses from the Icache
+ * Clean and invalidate the specified virtual address range.
*
- * This is a little misleading, it is not intended to clean out
- * the i-cache but to make sure that any data written to the
- * range is made consistent. This means that when we execute code
- * in that region, everything works as we expect.
- *
- * This generally means writing back data in the Dcache and
- * write buffer and flushing the Icache over that region
- *
- * start: virtual start address
- * end: virtual end address
- *
- * NOTE: ICACHELINESIZE == DCACHELINESIZE (so we don't need to
- * loop twice, once for i-cache, once for d-cache)
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_arm920_icache_invalidate_range)
- bic r0, r0, #ICACHELINESIZE - 1 @ Safety check
- sub r1, r1, r0
- cmp r1, #MAX_AREA_SIZE
- bgt cpu_arm920_cache_clean_invalidate_all_r2
+ENTRY(arm920_dma_flush_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
- bic r1, r1, #ICACHELINESIZE - 1
- add r1, r1, #ICACHELINESIZE
+ENTRY(arm920_cache_fns)
+ .long arm920_flush_kern_cache_all
+ .long arm920_flush_user_cache_all
+ .long arm920_flush_user_cache_range
+ .long arm920_coherent_kern_range
+ .long arm920_flush_kern_dcache_page
+ .long arm920_dma_inv_range
+ .long arm920_dma_clean_range
+ .long arm920_dma_flush_range
-1: mcr p15, 0, r0, c7, c5, 1 @ Clean I entry
- mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
- add r0, r0, #ICACHELINESIZE
- subs r1, r1, #ICACHELINESIZE
- bne 1b
+#endif
- mov r0, #0
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- mov pc, lr
-ENTRY(cpu_arm920_icache_invalidate_page)
- mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ENTRY(cpu_arm920_dcache_clean_area)
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, #CACHE_DLINESIZE
+ subs r1, r1, #CACHE_DLINESIZE
+ bhi 1b
mov pc, lr
/* =============================== PageTable ============================== */
/*
- * cpu_arm920_set_pgd(pgd)
+ * cpu_arm920_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm920_set_pgd)
+ENTRY(cpu_arm920_switch_mm)
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- /* Any reason why we don't use mcr p15, 0, r0, c7, c7, 0 here? --rmk */
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
@ && 'Clean & Invalidate whole DCache'
@ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip
- mov r1, #7 << 5 @ 8 segments
-1: orr r3, r1, #63 << 26 @ 64 entries
+ mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
+1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
@@ -368,28 +324,12 @@ ENTRY(cpu_arm920_set_pgd)
mov pc, lr
/*
- * cpu_arm920_flush_pmd(pmdp)
- *
- * Set a level 1 translation table entry, and clean it out of
- * any caches such that the MMUs can load it correctly.
- *
- * pmdp: pointer to PMD entry
- */
- .align 5
-ENTRY(cpu_arm920_flush_pmd)
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- mov pc, lr
-
-/*
* cpu_arm920_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_arm920_set_pte)
- tst r0, #2048
- streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
@@ -398,7 +338,7 @@ ENTRY(cpu_arm920_set_pte)
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
- tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec?
+ tst r1, #LPTE_USER @ User or Exec?
orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
@@ -477,31 +417,13 @@ __arm920_setup:
.type arm920_processor_functions, #object
arm920_processor_functions:
.word v4t_early_abort
- .word cpu_arm920_check_bugs
.word cpu_arm920_proc_init
.word cpu_arm920_proc_fin
.word cpu_arm920_reset
.word cpu_arm920_do_idle
-
- /* cache */
- .word cpu_arm920_cache_clean_invalidate_all
- .word cpu_arm920_cache_clean_invalidate_range
-
- /* dcache */
- .word cpu_arm920_dcache_invalidate_range
- .word cpu_arm920_dcache_clean_range
- .word cpu_arm920_dcache_clean_page
- .word cpu_arm920_dcache_clean_entry
-
- /* icache */
- .word cpu_arm920_icache_invalidate_range
- .word cpu_arm920_icache_invalidate_page
-
- /* pgtable */
- .word cpu_arm920_set_pgd
- .word cpu_arm920_flush_pmd
+ .word cpu_arm920_dcache_clean_area
+ .word cpu_arm920_switch_mm
.word cpu_arm920_set_pte
-
.size arm920_processor_functions, . - arm920_processor_functions
.type cpu_arch_name, #object
@@ -530,4 +452,9 @@ __arm920_proc_info:
.long arm920_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ .long arm920_cache_fns
+#else
+ .long v4wt_cache_fns
+#endif
.size __arm920_proc_info, . - __arm920_proc_info
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index f115e476bc7b..c547a7b0b4f1 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -29,41 +29,36 @@
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
+#include <asm/page.h>
+#include "proc-macros.S"
/*
- * This is the maximum size of an area which will be invalidated
- * using the single invalidate entry instructions. Anything larger
- * than this, and we go for the whole cache.
- *
- * This value should be chosen such that we choose the cheapest
- * alternative.
+ * The size of one data cache line.
*/
-#define MAX_AREA_SIZE 8192
+#define CACHE_DLINESIZE 32
/*
- * the cache line size of the I and D cache
+ * The number of data cache segments.
*/
-#define DCACHELINESIZE 32
-#define ICACHELINESIZE 32
+#define CACHE_DSEGMENTS 4
/*
- * and the page size
+ * The number of lines in a cache segment.
*/
-#define PAGESIZE 4096
+#define CACHE_DENTRIES 64
- .text
/*
- * cpu_arm922_check_bugs()
+ * This is the size at which it becomes more efficient to
+ * clean the whole cache, rather than using the individual
+ * cache line maintainence instructions. (I think this should
+ * be 32768).
*/
-ENTRY(cpu_arm922_check_bugs)
- mrs ip, cpsr
- bic ip, ip, #PSR_F_BIT
- msr cpsr, ip
- mov pc, lr
+#define CACHE_DLIMIT 8192
+
+ .text
/*
* cpu_arm922_proc_init()
*/
@@ -77,7 +72,11 @@ ENTRY(cpu_arm922_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
- bl cpu_arm922_cache_clean_invalidate_all
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ bl arm922_flush_kern_cache_all
+#else
+ bl v4wt_flush_kern_cache_all
+#endif
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
@@ -113,249 +112,209 @@ ENTRY(cpu_arm922_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr
-/* ================================= CACHE ================================ */
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/*
- * cpu_arm922_cache_clean_invalidate_all()
- *
- * clean and invalidate all cache lines
+ * flush_user_cache_all()
*
- * Note:
- * 1. we should preserve r0 at all times
+ * Clean and invalidate all cache entries in a particular
+ * address space.
*/
- .align 5
-ENTRY(cpu_arm922_cache_clean_invalidate_all)
- mov r2, #1
-cpu_arm922_cache_clean_invalidate_all_r2:
- mov ip, #0
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
-#else
+ENTRY(arm922_flush_user_cache_all)
+ /* FALLTHROUGH */
+
/*
- * 'Clean & Invalidate whole DCache'
- * Re-written to use Index Ops.
- * Uses registers r1, r3 and ip
+ * flush_kern_cache_all()
+ *
+ * Clean and invalidate the entire cache.
*/
- mov r1, #3 << 5 @ 4 segments
-1: orr r3, r1, #63 << 26 @ 64 entries
-2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
+ENTRY(arm922_flush_kern_cache_all)
+ mov r2, #VM_EXEC
+ mov ip, #0
+__flush_whole_cache:
+ mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
+1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
+2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0
-#endif
- teq r2, #0
+ tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm922_cache_clean_invalidate_range(start, end, flags)
+ * flush_user_cache_range(start, end, flags)
*
- * clean and invalidate all cache lines associated with this area of memory
+ * Clean and invalidate a range of cache entries in the
+ * specified address range.
*
- * start: Area start address
- * end: Area end address
- * flags: nonzero for I cache as well
+ * - start - start address (inclusive)
+ * - end - end address (exclusive)
+ * - flags - vm_flags describing address space
*/
- .align 5
-ENTRY(cpu_arm922_cache_clean_invalidate_range)
- bic r0, r0, #DCACHELINESIZE - 1 @ && added by PGM
- bic r1, r1, #DCACHELINESIZE - 1 @ && added by DHM
- sub r3, r1, r0
- cmp r3, #MAX_AREA_SIZE
- bgt cpu_arm922_cache_clean_invalidate_all_r2
-1: teq r2, #0
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
-#else
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
+ENTRY(arm922_flush_user_cache_range)
+ mov ip, #0
+ sub r3, r1, r0 @ calculate total size
+ cmp r3, #CACHE_DLIMIT
+ bhs __flush_whole_cache
+
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
-#endif
+ add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
- blt 1b
-
- mcr p15, 0, r1, c7, c10, 4 @ drain WB
+ blo 1b
+ tst r2, #VM_EXEC
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
-/* ================================ D-CACHE =============================== */
-
/*
- * cpu_arm922_dcache_invalidate_range(start, end)
+ * coherent_kern_range(start, end)
*
- * throw away all D-cached data in specified region without an obligation
- * to write them back. Note however that we must clean the D-cached entries
- * around the boundaries if the start and/or end address are not cache
- * aligned.
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
*
- * start: virtual start address
- * end: virtual end address
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_arm922_dcache_invalidate_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- tst r0, #DCACHELINESIZE - 1
- mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
- tst r1, #DCACHELINESIZE - 1
- mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
-#endif @ clean D entry
- bic r0, r0, #DCACHELINESIZE - 1
- bic r1, r1, #DCACHELINESIZE - 1
-1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- add r0, r0, #DCACHELINESIZE
+ENTRY(arm922_coherent_kern_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
+ add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
- blt 1b
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm922_dcache_clean_range(start, end)
+ * flush_kern_dcache_page(void *page)
*
- * For the specified virtual address range, ensure that all caches contain
- * clean data, such that peripheral accesses to the physical RAM fetch
- * correct data.
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
*
- * start: virtual start address
- * end: virtual end address
+ * - addr - page aligned address
*/
- .align 5
-ENTRY(cpu_arm922_dcache_clean_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- bic r0, r0, #DCACHELINESIZE - 1
- sub r1, r1, r0
- cmp r1, #MAX_AREA_SIZE
- mov r2, #0
- bgt cpu_arm922_cache_clean_invalidate_all_r2
-
- bic r1, r1, #DCACHELINESIZE -1
- add r1, r1, #DCACHELINESIZE
-
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #DCACHELINESIZE
- bpl 1b
-#endif
- mcr p15, 0, r2, c7, c10, 4 @ drain WB
+ENTRY(arm922_flush_kern_dcache_page)
+ add r1, r0, #PAGE_SZ
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm922_dcache_clean_page(page)
+ * dma_inv_range(start, end)
*
- * Cleans a single page of dcache so that if we have any future aliased
- * mappings, they will be consistent at the time that they are created.
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
*
- * page: virtual address of page to clean from dcache
+ * - start - virtual start address
+ * - end - virtual end address
*
- * Note:
- * 1. we don't need to flush the write buffer in this case.
- * 2. we don't invalidate the entries since when we write the page
- * out to disk, the entries may get reloaded into the cache.
+ * (same as v4wb)
*/
- .align 5
-ENTRY(cpu_arm922_dcache_clean_page)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- mov r1, #PAGESIZE
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #2 * DCACHELINESIZE
- bne 1b
-#endif
+ENTRY(arm922_dma_inv_range)
+ tst r0, #CACHE_DLINESIZE - 1
+ bic r0, r0, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
+ tst r1, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm922_dcache_clean_entry(addr)
+ * dma_clean_range(start, end)
+ *
+ * Clean the specified virtual address range.
*
- * Clean the specified entry of any caches such that the MMU
- * translation fetches will obtain correct data.
+ * - start - virtual start address
+ * - end - virtual end address
*
- * addr: cache-unaligned virtual address
+ * (same as v4wb)
*/
- .align 5
-ENTRY(cpu_arm922_dcache_clean_entry)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
-#endif
+ENTRY(arm922_dma_clean_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
-/* ================================ I-CACHE =============================== */
-
/*
- * cpu_arm922_icache_invalidate_range(start, end)
- *
- * invalidate a range of virtual addresses from the Icache
- *
- * This is a little misleading, it is not intended to clean out
- * the i-cache but to make sure that any data written to the
- * range is made consistent. This means that when we execute code
- * in that region, everything works as we expect.
+ * dma_flush_range(start, end)
*
- * This generally means writing back data in the Dcache and
- * write buffer and flushing the Icache over that region
+ * Clean and invalidate the specified virtual address range.
*
- * start: virtual start address
- * end: virtual end address
- *
- * NOTE: ICACHELINESIZE == DCACHELINESIZE (so we don't need to
- * loop twice, once for i-cache, once for d-cache)
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_arm922_icache_invalidate_range)
- bic r0, r0, #ICACHELINESIZE - 1 @ Safety check
- sub r1, r1, r0
- cmp r1, #MAX_AREA_SIZE
- bgt cpu_arm922_cache_clean_invalidate_all_r2
+ENTRY(arm922_dma_flush_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
- bic r1, r1, #ICACHELINESIZE - 1
- add r1, r1, #ICACHELINESIZE
+ENTRY(arm922_cache_fns)
+ .long arm922_flush_kern_cache_all
+ .long arm922_flush_user_cache_all
+ .long arm922_flush_user_cache_range
+ .long arm922_coherent_kern_range
+ .long arm922_flush_kern_dcache_page
+ .long arm922_dma_inv_range
+ .long arm922_dma_clean_range
+ .long arm922_dma_flush_range
-1: mcr p15, 0, r0, c7, c5, 1 @ Clean I entry
- mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
- add r0, r0, #ICACHELINESIZE
- subs r1, r1, #ICACHELINESIZE
- bne 1b
+#endif
- mov r0, #0
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- mov pc, lr
-ENTRY(cpu_arm922_icache_invalidate_page)
- mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ENTRY(cpu_arm922_dcache_clean_area)
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, #CACHE_DLINESIZE
+ subs r1, r1, #CACHE_DLINESIZE
+ bhi 1b
+#endif
mov pc, lr
/* =============================== PageTable ============================== */
/*
- * cpu_arm922_set_pgd(pgd)
+ * cpu_arm922_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm922_set_pgd)
+ENTRY(cpu_arm922_switch_mm)
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- /* Any reason why we don't use mcr p15, 0, r0, c7, c7, 0 here? --rmk */
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
@ && 'Clean & Invalidate whole DCache'
@ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip
- mov r1, #3 << 5 @ 4 segments
-1: orr r3, r1, #63 << 26 @ 64 entries
+ mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 4 segments
+1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
@@ -369,20 +328,6 @@ ENTRY(cpu_arm922_set_pgd)
mov pc, lr
/*
- * cpu_arm922_flush_pmd(pmdp)
- *
- * Set a level 1 translation table entry, and clean it out of
- * any caches such that the MMUs can load it correctly.
- *
- * pmdp: pointer to PMD entry
- */
- .align 5
-ENTRY(cpu_arm922_flush_pmd)
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- mov pc, lr
-
-/*
* cpu_arm922_set_pte(ptep, pte)
*
* Set a PTE and flush it out
@@ -397,7 +342,7 @@ ENTRY(cpu_arm922_set_pte)
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
- tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec?
+ tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
@@ -476,31 +421,13 @@ __arm922_setup:
.type arm922_processor_functions, #object
arm922_processor_functions:
.word v4t_early_abort
- .word cpu_arm922_check_bugs
.word cpu_arm922_proc_init
.word cpu_arm922_proc_fin
.word cpu_arm922_reset
.word cpu_arm922_do_idle
-
- /* cache */
- .word cpu_arm922_cache_clean_invalidate_all
- .word cpu_arm922_cache_clean_invalidate_range
-
- /* dcache */
- .word cpu_arm922_dcache_invalidate_range
- .word cpu_arm922_dcache_clean_range
- .word cpu_arm922_dcache_clean_page
- .word cpu_arm922_dcache_clean_entry
-
- /* icache */
- .word cpu_arm922_icache_invalidate_range
- .word cpu_arm922_icache_invalidate_page
-
- /* pgtable */
- .word cpu_arm922_set_pgd
- .word cpu_arm922_flush_pmd
+ .word cpu_arm922_dcache_clean_area
+ .word cpu_arm922_switch_mm
.word cpu_arm922_set_pte
-
.size arm922_processor_functions, . - arm922_processor_functions
.type cpu_arch_name, #object
@@ -529,4 +456,9 @@ __arm922_proc_info:
.long arm922_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ .long arm922_cache_fns
+#else
+ .long v4wt_cache_fns
+#endif
.size __arm922_proc_info, . - __arm922_proc_info
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index a2cdcdd3d9f3..a445460fca44 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -28,9 +28,10 @@
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
+#include <asm/page.h>
+#include "proc-macros.S"
/*
* This is the maximum size of an area which will be invalidated
@@ -40,30 +41,14 @@
* This value should be chosen such that we choose the cheapest
* alternative.
*/
-#define MAX_AREA_SIZE 16384
+#define CACHE_DLIMIT 16384
/*
* the cache line size of the I and D cache
*/
-#define DCACHELINESIZE 32
-#define ICACHELINESIZE 32
-
-/*
- * and the page size
- */
-#define PAGESIZE 4096
+#define CACHE_DLINESIZE 32
.text
-
-/*
- * cpu_arm926_check_bugs()
- */
-ENTRY(cpu_arm926_check_bugs)
- mrs ip, cpsr
- bic ip, ip, #PSR_F_BIT
- msr cpsr, ip
- mov pc, lr
-
/*
* cpu_arm926_proc_init()
*/
@@ -77,17 +62,17 @@ ENTRY(cpu_arm926_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
- bl cpu_arm926_cache_clean_invalidate_all
+ bl arm926_flush_kern_cache_all
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
- bic r0, r0, #0x1000 @ ...i............
- bic r0, r0, #0x000e @ ............wca.
+ bic r0, r0, #0x1000 @ ...i............
+ bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_arm926_reset(loc)
*
- * Perform a soft reset of the system. Put the CPU into the
+ * Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
@@ -100,243 +85,236 @@ ENTRY(cpu_arm926_reset)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
- bic ip, ip, #0x000f @ ............wcam
- bic ip, ip, #0x1100 @ ...i...s........
+ bic ip, ip, #0x000f @ ............wcam
+ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_arm926_do_idle()
+ *
+ * Called with IRQs disabled
*/
- .align 5
+ .align 10
ENTRY(cpu_arm926_do_idle)
+ mov r0, #0
+ mrc p15, 0, r1, c1, c0, 0 @ Read control register
+ mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
+ bic r2, r1, #1 << 12
+ mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
+ mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
mov pc, lr
-/* ================================= CACHE ================================ */
-
-
/*
- * cpu_arm926_cache_clean_invalidate_all()
+ * flush_user_cache_all()
*
- * clean and invalidate all cache lines
+ * Clean and invalidate all cache entries in a particular
+ * address space.
+ */
+ENTRY(arm926_flush_user_cache_all)
+ /* FALLTHROUGH */
+
+/*
+ * flush_kern_cache_all()
*
- * Note:
- * 1. we should preserve r0 at all times
+ * Clean and invalidate the entire cache.
*/
- .align 5
-ENTRY(cpu_arm926_cache_clean_invalidate_all)
- mov r2, #1
-cpu_arm926_cache_clean_invalidate_all_r2:
+ENTRY(arm926_flush_kern_cache_all)
+ mov r2, #VM_EXEC
mov ip, #0
+__flush_whole_cache:
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate
bne 1b
#endif
- teq r2, #0
+ tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm926_cache_clean_invalidate_range(start, end, flags)
- *
- * clean and invalidate all cache lines associated with this area of memory
+ * flush_user_cache_range(start, end, flags)
*
- * This is a little misleading, it is not intended to clean out
- * the i-cache but to make sure that any data written to the
- * range is made consistent. This means that when we execute code
- * in that region, everything works as we expect.
+ * Clean and invalidate a range of cache entries in the
+ * specified address range.
*
- * This generally means writing back data in the Dcache and
- * write buffer and flushing the Icache over that region
- * start: Area start address
- * end: Area end address
- * flags: nonzero for I cache as well
+ * - start - start address (inclusive)
+ * - end - end address (exclusive)
+ * - flags - vm_flags describing address space
*/
- .align 5
-ENTRY(cpu_arm926_cache_clean_invalidate_range)
- bic r0, r0, #DCACHELINESIZE - 1 @ && added by PGM
- bic r1, r1, #DCACHELINESIZE - 1 @ && added by DHM
- sub r3, r1, r0
- cmp r3, #MAX_AREA_SIZE
- bgt cpu_arm926_cache_clean_invalidate_all_r2
-
-1: teq r2, #0
+ENTRY(arm926_flush_user_cache_range)
+ mov ip, #0
+ sub r3, r1, r0 @ calculate total size
+ cmp r3, #CACHE_DLIMIT
+ bgt __flush_whole_cache
+1: tst r2, #VM_EXEC
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
+ add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
+ add r0, r0, #CACHE_DLINESIZE
#else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
+ add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #DCACHELINESIZE
+ add r0, r0, #CACHE_DLINESIZE
#endif
-
cmp r0, r1
- blt 1b
-
- mcr p15, 0, r1, c7, c10, 4 @ drain WB
-
+ blo 1b
+ tst r2, #VM_EXEC
+ mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
-/* ================================ D-CACHE =============================== */
-
/*
- * cpu_arm926_dcache_invalidate_range(start, end)
+ * coherent_kern_range(start, end)
*
- * throw away all D-cached data in specified region without an obligation
- * to write them back. Note however that we must clean the D-cached entries
- * around the boundaries if the start and/or end address are not cache
- * aligned.
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
*
- * start: virtual start address
- * end: virtual end address
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_arm926_dcache_invalidate_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- tst r0, #DCACHELINESIZE - 1
- mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
- tst r1, #DCACHELINESIZE - 1
- mcrne p15, 0, r1, c7, c10, 1
-#endif @ clean D entry
- bic r0, r0, #DCACHELINESIZE - 1
- bic r1, r1, #DCACHELINESIZE - 1
-1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- add r0, r0, #DCACHELINESIZE
+ENTRY(arm926_coherent_kern_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
+ add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
- blt 1b
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm926_dcache_clean_range(start, end)
+ * flush_kern_dcache_page(void *page)
*
- * For the specified virtual address range, ensure that all caches contain
- * clean data, such that peripheral accesses to the physical RAM fetch
- * correct data.
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
*
- * start: virtual start address
- * end: virtual end address
+ * - addr - page aligned address
*/
- .align 5
-ENTRY(cpu_arm926_dcache_clean_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- bic r0, r0, #DCACHELINESIZE - 1
- sub r1, r1, r0
- cmp r1, #MAX_AREA_SIZE
- mov r2, #0
- bgt cpu_arm926_cache_clean_invalidate_all_r2
-
- bic r1, r1, #DCACHELINESIZE -1
- add r1, r1, #DCACHELINESIZE
-
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #DCACHELINESIZE
- bpl 1b
-#endif
- mcr p15, 0, r2, c7, c10, 4 @ drain WB
+ENTRY(arm926_flush_kern_dcache_page)
+ add r1, r0, #PAGE_SZ
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm926_dcache_clean_page(page)
+ * dma_inv_range(start, end)
*
- * Cleans a single page of dcache so that if we have any future aliased
- * mappings, they will be consistent at the time that they are created.
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
*
- * page: virtual address of page to clean from dcache
+ * - start - virtual start address
+ * - end - virtual end address
*
- * Note:
- * 1. we don't need to flush the write buffer in this case.
- * 2. we don't invalidate the entries since when we write the page
- * out to disk, the entries may get reloaded into the cache.
+ * (same as v4wb)
*/
- .align 5
-ENTRY(cpu_arm926_dcache_clean_page)
+ENTRY(arm926_dma_inv_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- mov r1, #PAGESIZE
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #2 * DCACHELINESIZE
- bne 1b
+ tst r0, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
+ tst r1, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
#endif
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
- * cpu_arm926_dcache_clean_entry(addr)
+ * dma_clean_range(start, end)
+ *
+ * Clean the specified virtual address range.
*
- * Clean the specified entry of any caches such that the MMU
- * translation fetches will obtain correct data.
+ * - start - virtual start address
+ * - end - virtual end address
*
- * addr: cache-unaligned virtual address
+ * (same as v4wb)
*/
- .align 5
-ENTRY(cpu_arm926_dcache_clean_entry)
+ENTRY(arm926_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
-/* ================================ I-CACHE =============================== */
-
/*
- * cpu_arm926_icache_invalidate_range(start, end)
+ * dma_flush_range(start, end)
*
- * invalidate a range of virtual addresses from the Icache
+ * Clean and invalidate the specified virtual address range.
*
- * start: virtual start address
- * end: virtual end address
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_arm926_icache_invalidate_range)
- bic r0, r0, #DCACHELINESIZE - 1 @ Safety check
- sub r1, r1, r0
- cmp r1, #MAX_AREA_SIZE
- bgt cpu_arm926_cache_clean_invalidate_all_r2
-
- bic r1, r1, #DCACHELINESIZE - 1
- add r1, r1, #DCACHELINESIZE
-
-1: mcr p15, 0, r0, c7, c5, 1 @ clean I entries
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #DCACHELINESIZE
- bne 1b
-
- mov r0, #0
+ENTRY(arm926_dma_flush_range)
+ bic r0, r0, #CACHE_DLINESIZE - 1
+1:
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
+#else
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+#endif
+ add r0, r0, #CACHE_DLINESIZE
+ cmp r0, r1
+ blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
-ENTRY(cpu_arm926_icache_invalidate_page)
- mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ENTRY(arm926_cache_fns)
+ .long arm926_flush_kern_cache_all
+ .long arm926_flush_user_cache_all
+ .long arm926_flush_user_cache_range
+ .long arm926_coherent_kern_range
+ .long arm926_flush_kern_dcache_page
+ .long arm926_dma_inv_range
+ .long arm926_dma_clean_range
+ .long arm926_dma_flush_range
+
+ENTRY(cpu_arm926_dcache_clean_area)
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, #CACHE_DLINESIZE
+ subs r1, r1, #CACHE_DLINESIZE
+ bhi 1b
+#endif
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
-
/* =============================== PageTable ============================== */
/*
- * cpu_arm926_set_pgd(pgd)
+ * cpu_arm926_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm926_set_pgd)
+ENTRY(cpu_arm926_switch_mm)
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- /* Any reason why we don't use mcr p15, 0, r0, c7, c7, 0 here? --rmk */
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
@ && 'Clean & Invalidate whole DCache'
@@ -350,22 +328,6 @@ ENTRY(cpu_arm926_set_pgd)
mov pc, lr
/*
- * cpu_arm926_flush_pmd(pmdp)
- *
- * Set a level 1 translation table entry, and clean it out of
- * any caches such that the MMUs can load it correctly.
- *
- * pmdp: pointer to PMD entry
- */
- .align 5
-ENTRY(cpu_arm926_flush_pmd)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
-#endif
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- mov pc, lr
-
-/*
* cpu_arm926_set_pte(ptep, pte)
*
* Set a PTE and flush it out
@@ -380,7 +342,7 @@ ENTRY(cpu_arm926_set_pte)
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
- tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec?
+ tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
@@ -447,21 +409,21 @@ __arm926_setup:
bic r0, r0, #0x0e00
bic r0, r0, #0x0002
bic r0, r0, #0x000c
- bic r0, r0, #0x1000 @ ...0 000. .... 000.
+ bic r0, r0, #0x1000 @ ...0 000. .... 000.
/*
* Turn on what we want
*/
orr r0, r0, #0x0031
- orr r0, r0, #0x2100 @ ..1. ...1 ..11 ...1
+ orr r0, r0, #0x2100 @ ..1. ...1 ..11 ...1
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
- orr r0, r0, #0x4000 @ .1.. .... .... ....
+ orr r0, r0, #0x4000 @ .1.. .... .... ....
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
- orr r0, r0, #0x0004 @ .... .... .... .1..
+ orr r0, r0, #0x0004 @ .... .... .... .1..
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
- orr r0, r0, #0x1000 @ ...1 .... .... ....
+ orr r0, r0, #0x1000 @ ...1 .... .... ....
#endif
mov pc, lr
@@ -474,31 +436,13 @@ __arm926_setup:
.type arm926_processor_functions, #object
arm926_processor_functions:
.word v5tej_early_abort
- .word cpu_arm926_check_bugs
.word cpu_arm926_proc_init
.word cpu_arm926_proc_fin
.word cpu_arm926_reset
.word cpu_arm926_do_idle
-
- /* cache */
- .word cpu_arm926_cache_clean_invalidate_all
- .word cpu_arm926_cache_clean_invalidate_range
-
- /* dcache */
- .word cpu_arm926_dcache_invalidate_range
- .word cpu_arm926_dcache_clean_range
- .word cpu_arm926_dcache_clean_page
- .word cpu_arm926_dcache_clean_entry
-
- /* icache */
- .word cpu_arm926_icache_invalidate_range
- .word cpu_arm926_icache_invalidate_page
-
- /* pgtable */
- .word cpu_arm926_set_pgd
- .word cpu_arm926_flush_pmd
+ .word cpu_arm926_dcache_clean_area
+ .word cpu_arm926_switch_mm
.word cpu_arm926_set_pte
-
.size arm926_processor_functions, . - arm926_processor_functions
.type cpu_arch_name, #object
@@ -522,10 +466,10 @@ __arm926_proc_info:
b __arm926_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | \
- HWCAP_FAST_MULT | HWCAP_JAVA
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | HWCAP_JAVA
.long cpu_arm926_name
.long arm926_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
+ .long arm926_cache_fns
.size __arm926_proc_info, . - __arm926_proc_info
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 7242fe7bfe70..50ae23f081e1 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -10,12 +10,7 @@
* MMU functions for SA110
*
* These are the low level assembler for performing cache and TLB
- * functions on the StrongARM-110, StrongARM-1100 and StrongARM-1110.
- *
- * Note that SA1100 and SA1110 share everything but their name and CPU ID.
- *
- * 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
- * Flush the read buffer at context switches
+ * functions on the StrongARM-110.
*/
#include <linux/linkage.h>
#include <linux/init.h>
@@ -25,71 +20,32 @@
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
-/* This is the maximum size of an area which will be flushed. If the area
- * is larger than this, then we flush the whole cache
- */
-#define MAX_AREA_SIZE 32768
-
/*
* the cache line size of the I and D cache
*/
#define DCACHELINESIZE 32
-
-/*
- * and the page size
- */
-#define PAGESIZE 4096
-
#define FLUSH_OFFSET 32768
- .macro flush_110_dcache rd, ra, re
- ldr \rd, =flush_base
- ldr \ra, [\rd]
- eor \ra, \ra, #FLUSH_OFFSET
- str \ra, [\rd]
- add \re, \ra, #16384 @ only necessary for 16k
-1001: ldr \rd, [\ra], #DCACHELINESIZE
- teq \re, \ra
- bne 1001b
- .endm
-
- .macro flush_1100_dcache rd, ra, re
- ldr \rd, =flush_base
- ldr \ra, [\rd]
- eor \ra, \ra, #FLUSH_OFFSET
- str \ra, [\rd]
- add \re, \ra, #8192 @ only necessary for 8k
-1001: ldr \rd, [\ra], #DCACHELINESIZE
- teq \re, \ra
- bne 1001b
-#ifdef FLUSH_BASE_MINICACHE
- add \ra, \ra, #FLUSH_BASE_MINICACHE - FLUSH_BASE
- add \re, \ra, #512 @ only 512 bytes
-1002: ldr \rd, [\ra], #DCACHELINESIZE
- teq \re, \ra
- bne 1002b
-#endif
- .endm
-
- .data
-flush_base: .long FLUSH_BASE
- .text
-
-/*
- * cpu_sa110_check_bugs()
- */
-ENTRY(cpu_sa110_check_bugs)
-ENTRY(cpu_sa1100_check_bugs)
- mrs ip, cpsr
- bic ip, ip, #PSR_F_BIT
- msr cpsr, ip
- mov pc, lr
+ .macro flush_110_dcache rd, ra, re
+ ldr \rd, =flush_base
+ ldr \ra, [\rd]
+ eor \ra, \ra, #FLUSH_OFFSET
+ str \ra, [\rd]
+ add \re, \ra, #16384 @ only necessary for 16k
+1001: ldr \rd, [\ra], #DCACHELINESIZE
+ teq \re, \ra
+ bne 1001b
+ .endm
+
+ .data
+flush_base:
+ .long FLUSH_BASE
+ .text
/*
* cpu_sa110_proc_init()
*/
ENTRY(cpu_sa110_proc_init)
-ENTRY(cpu_sa1100_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mov pc, lr
@@ -101,7 +57,7 @@ ENTRY(cpu_sa110_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
- bl cpu_sa110_cache_clean_invalidate_all @ clean caches
+ bl v4wb_flush_kern_cache_all @ clean caches
1: mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
@@ -110,13 +66,6 @@ ENTRY(cpu_sa110_proc_fin)
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
-ENTRY(cpu_sa1100_proc_fin)
- stmfd sp!, {lr}
- mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
- msr cpsr_c, ip
- bl cpu_sa1100_cache_clean_invalidate_all @ clean caches
- b 1b
-
/*
* cpu_sa110_reset(loc)
*
@@ -128,7 +77,6 @@ ENTRY(cpu_sa1100_proc_fin)
*/
.align 5
ENTRY(cpu_sa110_reset)
-ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -151,204 +99,25 @@ ENTRY(cpu_sa1100_reset)
* 3 = switch to fast processor clock
*/
.align 5
-idle: mcr p15, 0, r0, c15, c8, 2 @ Wait for interrupt, cache aligned
- mov r0, r0 @ safety
- mov pc, lr
ENTRY(cpu_sa110_do_idle)
- mov ip, #0
- cmp r0, #4
- addcc pc, pc, r0, lsl #2
- mov pc, lr
-
- b idle
- b idle
- b slow_clock
- b fast_clock
-
-fast_clock:
- mcr p15, 0, ip, c15, c1, 2 @ enable clock switching
- mov pc, lr
-
-slow_clock:
mcr p15, 0, ip, c15, c2, 2 @ disable clock switching
ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc
ldr r1, [r1, #0] @ force switch to MCLK
- mov pc, lr
-
- .align 5
-ENTRY(cpu_sa1100_do_idle)
- mov r0, r0 @ 4 nop padding
- mov r0, r0
- mov r0, r0
- mov r0, #0
- ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address
- mrs r2, cpsr
- orr r3, r2, #192 @ disallow interrupts
- msr cpsr_c, r3
- @ --- aligned to a cache line
- mcr p15, 0, r0, c15, c2, 2 @ disable clock switching
- ldr r1, [r1, #0] @ force switch to MCLK
- mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
+ mov r0, r0 @ safety
+ mov r0, r0 @ safety
+ mov r0, r0 @ safety
+ mcr p15, 0, r0, c15, c8, 2 @ Wait for interrupt, cache aligned
+ mov r0, r0 @ safety
+ mov r0, r0 @ safety
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
- msr cpsr_c, r2 @ allow interrupts
mov pc, lr
/* ================================= CACHE ================================ */
-
/*
- * cpu_sa110_cache_clean_invalidate_all (void)
- *
- * clean and invalidate all cache lines
- *
- * Note:
- * 1. we should preserve r0 at all times
- */
- .align 5
-ENTRY(cpu_sa110_cache_clean_invalidate_all)
- mov r2, #1
-cpu_sa110_cache_clean_invalidate_all_r2:
- flush_110_dcache r3, ip, r1
- mov ip, #0
- teq r2, #0
- mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
- mov pc, lr
-
- .align 5
-ENTRY(cpu_sa1100_cache_clean_invalidate_all)
- mov r2, #1
-cpu_sa1100_cache_clean_invalidate_all_r2:
- flush_1100_dcache r3, ip, r1
- mov ip, #0
- teq r2, #0
- mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, r1, c9, c0, 0 @ invalidate RB
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
- mov pc, lr
-
-/*
- * cpu_sa110_cache_clean_invalidate_range(start, end, flags)
- *
- * clean and invalidate all cache lines associated with this area of memory
- *
- * start: Area start address
- * end: Area end address
- * flags: nonzero for I cache as well
- */
- .align 5
-ENTRY(cpu_sa110_cache_clean_invalidate_range)
- bic r0, r0, #DCACHELINESIZE - 1
- sub r3, r1, r0
- cmp r3, #MAX_AREA_SIZE
- bhi cpu_sa110_cache_clean_invalidate_all_r2
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- add r0, r0, #DCACHELINESIZE
- cmp r0, r1
- blo 1b
- teq r2, #0
- movne r0, #0
- mcrne p15, 0, r0, c7, c5, 0 @ invalidate I cache
- mov pc, lr
-
-ENTRY(cpu_sa1100_cache_clean_invalidate_range)
- sub r3, r1, r0
- cmp r3, #MAX_AREA_SIZE
- bhi cpu_sa1100_cache_clean_invalidate_all_r2
- b 1b
-
-/* ================================ D-CACHE =============================== */
-
-/*
- * cpu_sa110_dcache_invalidate_range(start, end)
- *
- * throw away all D-cached data in specified region without an obligation
- * to write them back. Note however that we must clean the D-cached entries
- * around the boundaries if the start and/or end address are not cache
- * aligned.
- *
- * start: virtual start address
- * end: virtual end address
- */
- .align 5
-ENTRY(cpu_sa110_dcache_invalidate_range)
-ENTRY(cpu_sa1100_dcache_invalidate_range)
- tst r0, #DCACHELINESIZE - 1
- bic r0, r0, #DCACHELINESIZE - 1
- mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
- tst r1, #DCACHELINESIZE - 1
- mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
-1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- add r0, r0, #DCACHELINESIZE
- cmp r0, r1
- blo 1b
- mov pc, lr
-
-/*
- * cpu_sa110_dcache_clean_range(start, end)
- *
- * For the specified virtual address range, ensure that all caches contain
- * clean data, such that peripheral accesses to the physical RAM fetch
- * correct data.
- *
- * start: virtual start address
- * end: virtual end address
- */
- .align 5
-ENTRY(cpu_sa110_dcache_clean_range)
- bic r0, r0, #DCACHELINESIZE - 1
- sub r1, r1, r0
- cmp r1, #MAX_AREA_SIZE
- mov r2, #0
- bhi cpu_sa110_cache_clean_invalidate_all_r2
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #2 * DCACHELINESIZE
- bpl 1b
- mcr p15, 0, r2, c7, c10, 4 @ drain WB
- mov pc, lr
-
-ENTRY(cpu_sa1100_dcache_clean_range)
- bic r0, r0, #DCACHELINESIZE - 1
- sub r1, r1, r0
- cmp r1, #MAX_AREA_SIZE
- mov r2, #0
- bhi cpu_sa1100_cache_clean_invalidate_all_r2
- b 1b
-
-/*
- * cpu_sa110_clean_dcache_page(page)
- *
- * Cleans a single page of dcache so that if we have any future aliased
- * mappings, they will be consistent at the time that they are created.
- *
- * Note:
- * 1. we don't need to flush the write buffer in this case.
- * 2. we don't invalidate the entries since when we write the page
- * out to disk, the entries may get reloaded into the cache.
- */
- .align 5
-ENTRY(cpu_sa110_dcache_clean_page)
-ENTRY(cpu_sa1100_dcache_clean_page)
- mov r1, #PAGESIZE
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- add r0, r0, #DCACHELINESIZE
- subs r1, r1, #2 * DCACHELINESIZE
- bne 1b
- mov pc, lr
-
-/*
- * cpu_sa110_dcache_clean_entry(addr)
+ * cpu_sa110_dcache_clean_area(addr,sz)
*
* Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data.
@@ -356,48 +125,24 @@ ENTRY(cpu_sa1100_dcache_clean_page)
* addr: cache-unaligned virtual address
*/
.align 5
-ENTRY(cpu_sa110_dcache_clean_entry)
-ENTRY(cpu_sa1100_dcache_clean_entry)
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- mov pc, lr
-
-/* ================================ I-CACHE =============================== */
-
-/*
- * cpu_sa110_icache_invalidate_range(start, end)
- *
- * invalidate a range of virtual addresses from the Icache
- *
- * start: virtual start address
- * end: virtual end address
- */
- .align 5
-ENTRY(cpu_sa110_icache_invalidate_range)
-ENTRY(cpu_sa1100_icache_invalidate_range)
- bic r0, r0, #DCACHELINESIZE - 1
-1: mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
+ENTRY(cpu_sa110_dcache_clean_area)
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
- cmp r0, r1
- blo 1b
- mov r0, #0
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
-ENTRY(cpu_sa110_icache_invalidate_page)
-ENTRY(cpu_sa1100_icache_invalidate_page)
- mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ subs r1, r1, #DCACHELINESIZE
+ bhi 1b
mov pc, lr
/* =============================== PageTable ============================== */
/*
- * cpu_sa110_set_pgd(pgd)
+ * cpu_sa110_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_sa110_set_pgd)
+ENTRY(cpu_sa110_switch_mm)
flush_110_dcache r3, ip, r1
mov r1, #0
mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
@@ -407,48 +152,12 @@ ENTRY(cpu_sa110_set_pgd)
mov pc, lr
/*
- * cpu_sa1100_set_pgd(pgd)
- *
- * Set the translation base pointer to be as described by pgd.
- *
- * pgd: new page tables
- */
- .align 5
-ENTRY(cpu_sa1100_set_pgd)
- flush_1100_dcache r3, ip, r1
- mov ip, #0
- mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
- mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
- mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
- mov pc, lr
-
-/*
- * cpu_sa110_flush_pmd(pmdp)
- *
- * Set a level 1 translation table entry, and clean it out of
- * any caches such that the MMUs can load it correctly.
- *
- * pmdp: pointer to PMD entry
- */
- .align 5
-ENTRY(cpu_sa110_flush_pmd)
-ENTRY(cpu_sa1100_flush_pmd)
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
- mcr p15, 0, r0, c7, c10, 4 @ drain WB
- mov pc, lr
-
-/*
* cpu_sa110_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_sa110_set_pte)
-ENTRY(cpu_sa1100_set_pte)
- tst r0, #2048
- streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
@@ -457,7 +166,7 @@ ENTRY(cpu_sa1100_set_pte)
bic r2, r2, #3
orr r2, r2, #PTE_TYPE_SMALL
- tst r1, #L_PTE_USER | L_PTE_EXEC @ User or Exec?
+ tst r1, #L_PTE_USER @ User or Exec?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
@@ -475,30 +184,16 @@ ENTRY(cpu_sa1100_set_pte)
cpu_sa110_name:
.asciz "StrongARM-110"
-cpu_sa1100_name:
- .asciz "StrongARM-1100"
-cpu_sa1110_name:
- .asciz "StrongARM-1110"
.align
__INIT
-__sa1100_setup: @ Allow read-buffer operations from userland
- mcr p15, 0, r0, c9, c0, 5
- mrc p15, 0, r0, c1, c0 @ get control register v4
- bic r0, r0, #0x0e00 @ ..VI ZFRS BLDP WCAM
- bic r0, r0, #0x0002 @ .... 000. .... ..0.
- orr r0, r0, #0x003d
- orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
- b __setup_common
-
__sa110_setup:
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x2e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ ..0. 000. .... ..0.
orr r0, r0, #0x003d
orr r0, r0, #0x1100 @ ...1 ...1 ..11 11.1
-__setup_common:
mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
@@ -518,66 +213,20 @@ __setup_common:
.type sa110_processor_functions, #object
ENTRY(sa110_processor_functions)
.word v4_early_abort
- .word cpu_sa110_check_bugs
.word cpu_sa110_proc_init
.word cpu_sa110_proc_fin
.word cpu_sa110_reset
.word cpu_sa110_do_idle
- /* cache */
- .word cpu_sa110_cache_clean_invalidate_all
- .word cpu_sa110_cache_clean_invalidate_range
-
/* dcache */
- .word cpu_sa110_dcache_invalidate_range
- .word cpu_sa110_dcache_clean_range
- .word cpu_sa110_dcache_clean_page
- .word cpu_sa110_dcache_clean_entry
-
- /* icache */
- .word cpu_sa110_icache_invalidate_range
- .word cpu_sa110_icache_invalidate_page
+ .word cpu_sa110_dcache_clean_area
/* pgtable */
- .word cpu_sa110_set_pgd
- .word cpu_sa110_flush_pmd
+ .word cpu_sa110_switch_mm
.word cpu_sa110_set_pte
.size sa110_processor_functions, . - sa110_processor_functions
-/*
- * SA1100 and SA1110 share the same function calls
- */
- .type sa1100_processor_functions, #object
-ENTRY(sa1100_processor_functions)
- .word v4_early_abort
- .word cpu_sa1100_check_bugs
- .word cpu_sa1100_proc_init
- .word cpu_sa1100_proc_fin
- .word cpu_sa1100_reset
- .word cpu_sa1100_do_idle
-
- /* cache */
- .word cpu_sa1100_cache_clean_invalidate_all
- .word cpu_sa1100_cache_clean_invalidate_range
-
- /* dcache */
- .word cpu_sa1100_dcache_invalidate_range
- .word cpu_sa1100_dcache_clean_range
- .word cpu_sa1100_dcache_clean_page
- .word cpu_sa1100_dcache_clean_entry
-
- /* icache */
- .word cpu_sa1100_icache_invalidate_range
- .word cpu_sa1100_icache_invalidate_page
-
- /* pgtable */
- .word cpu_sa1100_set_pgd
- .word cpu_sa1100_flush_pmd
- .word cpu_sa1100_set_pte
-
- .size sa1100_processor_functions, . - sa1100_processor_functions
-
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4"
@@ -591,7 +240,6 @@ cpu_elf_name:
.section ".proc.info", #alloc, #execinstr
-#ifdef CONFIG_CPU_SA110
.type __sa110_proc_info,#object
__sa110_proc_info:
.long 0x4401a100
@@ -605,37 +253,5 @@ __sa110_proc_info:
.long sa110_processor_functions
.long v4wb_tlb_fns
.long v4wb_user_fns
+ .long v4wb_cache_fns
.size __sa110_proc_info, . - __sa110_proc_info
-#endif
-
-#ifdef CONFIG_CPU_SA1100
- .type __sa1100_proc_info,#object
-__sa1100_proc_info:
- .long 0x4401a110
- .long 0xfffffff0
- .long 0x00000c0e
- b __sa1100_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
- .long cpu_sa1100_name
- .long sa1100_processor_functions
- .long v4wb_tlb_fns
- .long v4_mc_user_fns
- .size __sa1100_proc_info, . - __sa1100_proc_info
-
- .type __sa1110_proc_info,#object
-__sa1110_proc_info:
- .long 0x6901b110
- .long 0xfffffff0
- .long 0x00000c0e
- b __sa1100_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
- .long cpu_sa1110_name
- .long sa1100_processor_functions
- .long v4wb_tlb_fns
- .long v4_mc_user_fns
- .size __sa1110_proc_info, . - __sa1110_proc_info
-#endif
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
new file mode 100644
index 000000000000..28c8425afaf7
--- /dev/null
+++ b/arch/arm/mm/proc-sa1100.S
@@ -0,0 +1,297 @@
+/*
+ * linux/arch/arm/mm/proc-sa110.S
+ *
+ * Copyright (C) 1997-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * MMU functions for SA110
+ *
+ * These are the low level assembler for performing cache and TLB
+ * functions on the StrongARM-1100 and StrongARM-1110.
+ *
+ * Note that SA1100 and SA1110 share everything but their name and CPU ID.
+ *
+ * 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
+ * Flush the read buffer at context switches
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+#include <asm/constants.h>
+#include <asm/procinfo.h>
+#include <asm/hardware.h>
+#include <asm/proc/pgtable.h>
+
+/*
+ * the cache line size of the I and D cache
+ */
+#define DCACHELINESIZE 32
+#define FLUSH_OFFSET 32768
+
+ .macro flush_1100_dcache rd, ra, re
+ ldr \rd, =flush_base
+ ldr \ra, [\rd]
+ eor \ra, \ra, #FLUSH_OFFSET
+ str \ra, [\rd]
+ add \re, \ra, #8192 @ only necessary for 8k
+1001: ldr \rd, [\ra], #DCACHELINESIZE
+ teq \re, \ra
+ bne 1001b
+#ifdef FLUSH_BASE_MINICACHE
+ add \ra, \ra, #FLUSH_BASE_MINICACHE - FLUSH_BASE
+ add \re, \ra, #512 @ only 512 bytes
+1002: ldr \rd, [\ra], #DCACHELINESIZE
+ teq \re, \ra
+ bne 1002b
+#endif
+ .endm
+
+ .data
+flush_base:
+ .long FLUSH_BASE
+ .text
+
+ __INIT
+
+/*
+ * cpu_sa1100_proc_init()
+ */
+ENTRY(cpu_sa1100_proc_init)
+ mov r0, #0
+ mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
+ mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
+ mov pc, lr
+
+ .previous
+
+/*
+ * cpu_sa1100_proc_fin()
+ *
+ * Prepare the CPU for reset:
+ * - Disable interrupts
+ * - Clean and turn off caches.
+ */
+ENTRY(cpu_sa1100_proc_fin)
+ stmfd sp!, {lr}
+ mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
+ msr cpsr_c, ip
+ flush_1100_dcache r0, r1, r2 @ clean caches
+ mov r0, #0
+ mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
+ mrc p15, 0, r0, c1, c0, 0 @ ctrl register
+ bic r0, r0, #0x1000 @ ...i............
+ bic r0, r0, #0x000e @ ............wca.
+ mcr p15, 0, r0, c1, c0, 0 @ disable caches
+ ldmfd sp!, {pc}
+
+/*
+ * cpu_sa1100_reset(loc)
+ *
+ * Perform a soft reset of the system. Put the CPU into the
+ * same state as it would be if it had been reset, and branch
+ * to what would be the reset vector.
+ *
+ * loc: location to jump to for soft reset
+ */
+ .align 5
+ENTRY(cpu_sa1100_reset)
+ mov ip, #0
+ mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+ mrc p15, 0, ip, c1, c0, 0 @ ctrl register
+ bic ip, ip, #0x000f @ ............wcam
+ bic ip, ip, #0x1100 @ ...i...s........
+ mcr p15, 0, ip, c1, c0, 0 @ ctrl register
+ mov pc, r0
+
+/*
+ * cpu_sa1100_do_idle(type)
+ *
+ * Cause the processor to idle
+ *
+ * type: call type:
+ * 0 = slow idle
+ * 1 = fast idle
+ * 2 = switch to slow processor clock
+ * 3 = switch to fast processor clock
+ */
+ .align 5
+ENTRY(cpu_sa1100_do_idle)
+ mov r0, r0 @ 4 nop padding
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0 @ 4 nop padding
+ mov r0, r0
+ mov r0, r0
+ mov r0, #0
+ ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address
+ @ --- aligned to a cache line
+ mcr p15, 0, r0, c15, c2, 2 @ disable clock switching
+ ldr r1, [r1, #0] @ force switch to MCLK
+ mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
+ mov r0, r0 @ safety
+ mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
+ mov pc, lr
+
+/* ================================= CACHE ================================ */
+
+/*
+ * cpu_sa1100_dcache_clean_area(addr,sz)
+ *
+ * Clean the specified entry of any caches such that the MMU
+ * translation fetches will obtain correct data.
+ *
+ * addr: cache-unaligned virtual address
+ */
+ .align 5
+ENTRY(cpu_sa1100_dcache_clean_area)
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ add r0, r0, #DCACHELINESIZE
+ subs r1, r1, #DCACHELINESIZE
+ bhi 1b
+ mov pc, lr
+
+/* =============================== PageTable ============================== */
+
+/*
+ * cpu_sa1100_switch_mm(pgd)
+ *
+ * Set the translation base pointer to be as described by pgd.
+ *
+ * pgd: new page tables
+ */
+ .align 5
+ENTRY(cpu_sa1100_switch_mm)
+ flush_1100_dcache r3, ip, r1
+ mov ip, #0
+ mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
+ mov pc, lr
+
+/*
+ * cpu_sa1100_set_pte(ptep, pte)
+ *
+ * Set a PTE and flush it out
+ */
+ .align 5
+ENTRY(cpu_sa1100_set_pte)
+ str r1, [r0], #-2048 @ linux version
+
+ eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
+
+ bic r2, r1, #0xff0
+ bic r2, r2, #3
+ orr r2, r2, #PTE_TYPE_SMALL
+
+ tst r1, #L_PTE_USER @ User or Exec?
+ orrne r2, r2, #PTE_SMALL_AP_URO_SRW
+
+ tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
+ orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
+
+ tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
+ movne r2, #0
+
+ str r2, [r0] @ hardware version
+ mov r0, r0
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+
+cpu_sa1100_name:
+ .asciz "StrongARM-1100"
+cpu_sa1110_name:
+ .asciz "StrongARM-1110"
+ .align
+
+ __INIT
+
+__sa1100_setup:
+ mov r10, #0
+ mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
+ mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
+ mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4
+ mov r0, #0x1f @ Domains 0, 1 = client
+ mcr p15, 0, r0, c3, c0 @ load domain access register
+ mcr p15, 0, r4, c2, c0 @ load page table pointer
+ mrc p15, 0, r0, c1, c0 @ get control register v4
+ bic r0, r0, #0x0e00 @ ..VI ZFRS BLDP WCAM
+ bic r0, r0, #0x0002 @ .... 000. .... ..0.
+ orr r0, r0, #0x003d
+ orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
+ mov pc, lr
+
+ .text
+
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ * come through these
+ */
+
+/*
+ * SA1100 and SA1110 share the same function calls
+ */
+ .type sa1100_processor_functions, #object
+ENTRY(sa1100_processor_functions)
+ .word v4_early_abort
+ .word cpu_sa1100_proc_init
+ .word cpu_sa1100_proc_fin
+ .word cpu_sa1100_reset
+ .word cpu_sa1100_do_idle
+ .word cpu_sa1100_dcache_clean_area
+ .word cpu_sa1100_switch_mm
+ .word cpu_sa1100_set_pte
+ .size sa1100_processor_functions, . - sa1100_processor_functions
+
+ .type cpu_arch_name, #object
+cpu_arch_name:
+ .asciz "armv4"
+ .size cpu_arch_name, . - cpu_arch_name
+
+ .type cpu_elf_name, #object
+cpu_elf_name:
+ .asciz "v4"
+ .size cpu_elf_name, . - cpu_elf_name
+ .align
+
+ .section ".proc.info", #alloc, #execinstr
+
+ .type __sa1100_proc_info,#object
+__sa1100_proc_info:
+ .long 0x4401a110
+ .long 0xfffffff0
+ .long 0x00000c0e
+ b __sa1100_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
+ .long cpu_sa1100_name
+ .long sa1100_processor_functions
+ .long v4wb_tlb_fns
+ .long v4_mc_user_fns
+ .long v4wb_cache_fns
+ .size __sa1100_proc_info, . - __sa1100_proc_info
+
+ .type __sa1110_proc_info,#object
+__sa1110_proc_info:
+ .long 0x6901b110
+ .long 0xfffffff0
+ .long 0x00000c0e
+ b __sa1100_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
+ .long cpu_sa1110_name
+ .long sa1100_processor_functions
+ .long v4wb_tlb_fns
+ .long v4_mc_user_fns
+ .long v4wb_cache_fns
+ .size __sa1110_proc_info, . - __sa1110_proc_info
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 47c5448e619f..697fb05c70c4 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -27,7 +27,6 @@ EXPORT_SYMBOL(cpu_dcache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_page);
EXPORT_SYMBOL(cpu_set_pgd);
-EXPORT_SYMBOL(cpu_flush_pmd);
EXPORT_SYMBOL(cpu_set_pte);
#else
EXPORT_SYMBOL(processor);
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index ba13ebc13e2e..1c8027a6ea2c 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -23,10 +23,11 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
+#include <asm/page.h>
+#include "proc-macros.S"
/*
* This is the maximum size of an area which will be flushed. If the area
@@ -45,11 +46,6 @@
#define CACHESIZE 32768
/*
- * and the page size
- */
-#define PAGESIZE 4096
-
-/*
* Virtual address used to allocate the cache when flushed
*
* This must be an address range which is _never_ used. It should
@@ -112,15 +108,6 @@ clean_addr: .word CLEAN_ADDR
.text
/*
- * cpu_xscale_check_bugs()
- */
-ENTRY(cpu_xscale_check_bugs)
- mrs ip, cpsr
- bic ip, ip, #PSR_F_BIT
- msr cpsr, ip
- mov pc, lr
-
-/*
* cpu_xscale_proc_init()
*
* Nothing too exciting at the moment
@@ -135,11 +122,11 @@ ENTRY(cpu_xscale_proc_fin)
str lr, [sp, #-4]!
mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r0
+ bl xscale_flush_kern_cache_all @ clean caches
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...IZ...........
bic r0, r0, #0x0006 @ .............CA.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
- bl cpu_xscale_cache_clean_invalidate_all @ clean caches
ldr pc, [sp], #4
/*
@@ -168,16 +155,10 @@ ENTRY(cpu_xscale_reset)
mov pc, r0
/*
- * cpu_xscale_do_idle(type)
+ * cpu_xscale_do_idle()
*
* Cause the processor to idle
*
- * type:
- * 0 = slow idle
- * 1 = fast idle
- * 2 = switch to slow processor clock
- * 3 = switch to fast processor clock
- *
* For now we do nothing but go to idle mode for every case
*
* XScale supports clock switching, but using idle mode support
@@ -193,226 +174,179 @@ ENTRY(cpu_xscale_do_idle)
/* ================================= CACHE ================================ */
/*
- * cpu_xscale_cache_clean_invalidate_all (void)
+ * flush_user_cache_all()
*
- * clean and invalidate all cache lines
+ * Invalidate all cache entries in a particular address
+ * space.
+ */
+ENTRY(xscale_flush_user_cache_all)
+ /* FALLTHROUGH */
+
+/*
+ * flush_kern_cache_all()
*
- * Note:
- * 1. We should preserve r0 at all times.
- * 2. Even if this function implies cache "invalidation" by its name,
- * we don't need to actually use explicit invalidation operations
- * since the goal is to discard all valid references from the cache
- * and the cleaning of it already has that effect.
- * 3. Because of 2 above and the fact that kernel space memory is always
- * coherent across task switches there is no need to worry about
- * inconsistencies due to interrupts, ence no irq disabling.
+ * Clean and invalidate the entire cache.
*/
- .align 5
-ENTRY(cpu_xscale_cache_clean_invalidate_all)
- mov r2, #1
-cpu_xscale_cache_clean_invalidate_all_r2:
+ENTRY(xscale_flush_kern_cache_all)
+ mov r2, #VM_EXEC
+ mov ip, #0
+__flush_whole_cache:
clean_d_cache r0, r1
- teq r2, #0
+ tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
+ mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr
/*
- * cpu_xscale_cache_clean_invalidate_range(start, end, flags)
+ * flush_user_cache_range(start, end, vm_flags)
*
- * clean and invalidate all cache lines associated with this area of memory
+ * Invalidate a range of cache entries in the specified
+ * address space.
*
- * start: Area start address
- * end: Area end address
- * flags: nonzero for I cache as well
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - vma - vma_area_struct describing address space
*/
.align 5
-ENTRY(cpu_xscale_cache_clean_invalidate_range)
- bic r0, r0, #CACHELINESIZE - 1 @ round down to cache line
- sub r3, r1, r0
+ENTRY(xscale_flush_user_cache_range)
+ mov ip, #0
+ sub r3, r1, r0 @ calculate total size
cmp r3, #MAX_AREA_SIZE
- bhi cpu_xscale_cache_clean_invalidate_all_r2
-1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
+ bhs __flush_whole_cache
+
+1: tst r2, #VM_EXEC
+ mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
+ mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
- teq r2, #0
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
- moveq pc, lr
- sub r0, r0, r3
-1: mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
- add r0, r0, #CACHELINESIZE
- cmp r0, r1
- blo 1b
- mcr p15, 0, ip, c7, c5, 6 @ Invalidate BTB
+ tst r2, #VM_EXEC
+ mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
+ mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr
/*
- * cpu_xscale_flush_ram_page(page)
+ * coherent_kern_range(start, end)
*
- * clean all cache lines associated with this memory page
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start. If you have non-snooping
+ * Harvard caches, you need to implement this function.
*
- * page: page to clean
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_xscale_flush_ram_page)
- mov r1, #PAGESIZE
-1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- add r0, r0, #CACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
+ENTRY(xscale_coherent_kern_range)
+ bic r0, r0, #CACHELINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
- subs r1, r1, #2 * CACHELINESIZE
- bne 1b
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
+ cmp r0, r1
+ blo 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
+ mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr
-/* ================================ D-CACHE =============================== */
-
/*
- * cpu_xscale_dcache_invalidate_range(start, end)
+ * flush_kern_dcache_page(void *page)
*
- * throw away all D-cached data in specified region without an obligation
- * to write them back. Note however that on XScale we must clean all
- * entries also due to hardware errata (80200 A0 & A1 only).
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
*
- * start: virtual start address
- * end: virtual end address
+ * - addr - page aligned address
*/
- .align 5
-ENTRY(cpu_xscale_dcache_invalidate_range)
- mrc p15, 0, r2, c0, c0, 0 @ Read part no.
- eor r2, r2, #0x69000000
- eor r2, r2, #0x00052000 @ 80200 XX part no.
- bics r2, r2, #0x1 @ Clear LSB in revision field
- moveq r2, #0
- beq cpu_xscale_cache_clean_invalidate_range @ An 80200 A0 or A1
-
- tst r0, #CACHELINESIZE - 1
- mcrne p15, 0, r0, c7, c10, 1 @ Clean D cache line
- tst r1, #CACHELINESIZE - 1
- mcrne p15, 0, r1, c7, c10, 1 @ Clean D cache line
- bic r0, r0, #CACHELINESIZE - 1 @ round down to cache line
-1: mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line
+ENTRY(xscale_flush_kern_dcache_page)
+ add r1, r0, #PAGE_SZ
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
+ mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr
/*
- * cpu_xscale_dcache_clean_range(start, end)
+ * dma_inv_range(start, end)
*
- * For the specified virtual address range, ensure that all caches contain
- * clean data, such that peripheral accesses to the physical RAM fetch
- * correct data.
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
*
- * start: virtual start address
- * end: virtual end address
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_xscale_dcache_clean_range)
- bic r0, r0, #CACHELINESIZE - 1
- sub r2, r1, r0
- cmp r2, #MAX_AREA_SIZE
- movhi r2, #0
- bhi cpu_xscale_cache_clean_invalidate_all_r2
+ENTRY(xscale_dma_inv_range)
+ mrc p15, 0, r2, c0, c0, 0 @ read ID
+ eor r2, r2, #0x69000000
+ eor r2, r2, #0x00052000
+ bics r2, r2, #1
+ beq xscale_dma_flush_range
-1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- add r0, r0, #CACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
+ tst r0, #CACHELINESIZE - 1
+ bic r0, r0, #CACHELINESIZE - 1
+ mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
+ tst r1, #CACHELINESIZE - 1
+ mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
+1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
+ mcr p15, 0, r0, c7, c10, 1 @ Drain Write (& Fill) Buffer
mov pc, lr
/*
- * cpu_xscale_clean_dcache_page(page)
+ * dma_clean_range(start, end)
*
- * Cleans a single page of dcache so that if we have any future aliased
- * mappings, they will be consistent at the time that they are created.
+ * Clean the specified virtual address range.
*
- * Note:
- * 1. we don't need to flush the write buffer in this case. [really? -Nico]
- * 2. we don't invalidate the entries since when we write the page
- * out to disk, the entries may get reloaded into the cache.
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_xscale_dcache_clean_page)
- mov r1, #PAGESIZE
-1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- add r0, r0, #CACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- add r0, r0, #CACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- add r0, r0, #CACHELINESIZE
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
+ENTRY(xscale_dma_clean_range)
+ bic r0, r0, #CACHELINESIZE - 1
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
- subs r1, r1, #4 * CACHELINESIZE
- bne 1b
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
- mov pc, lr
-
-/*
- * cpu_xscale_dcache_clean_entry(addr)
- *
- * Clean the specified entry of any caches such that the MMU
- * translation fetches will obtain correct data.
- *
- * addr: cache-unaligned virtual address
- */
- .align 5
-ENTRY(cpu_xscale_dcache_clean_entry)
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
+ cmp r0, r1
+ blo 1b
+ mcr p15, 0, r0, c7, c10, 1 @ Drain Write (& Fill) Buffer
mov pc, lr
-/* ================================ I-CACHE =============================== */
-
/*
- * cpu_xscale_icache_invalidate_range(start, end)
- *
- * invalidate a range of virtual addresses from the Icache
+ * dma_flush_range(start, end)
*
- * start: virtual start address
- * end: virtual end address
+ * Clean and invalidate the specified virtual address range.
*
- * Note: This is vaguely defined as supposed to bring the dcache and the
- * icache in sync by the way this function is used.
+ * - start - virtual start address
+ * - end - virtual end address
*/
- .align 5
-ENTRY(cpu_xscale_icache_invalidate_range)
+ENTRY(xscale_dma_flush_range)
bic r0, r0, #CACHELINESIZE - 1
-1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
- mcr p15, 0, ip, c7, c5, 6 @ Invalidate BTB
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
+ mcr p15, 0, r0, c7, c10, 1 @ Drain Write (& Fill) Buffer
mov pc, lr
-/*
- * cpu_xscale_icache_invalidate_page(page)
- *
- * invalidate all Icache lines associated with this area of memory
- *
- * page: page to invalidate
- */
- .align 5
-ENTRY(cpu_xscale_icache_invalidate_page)
- mov r1, #PAGESIZE
-1: mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
- add r0, r0, #CACHELINESIZE
- mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
- add r0, r0, #CACHELINESIZE
- mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
- add r0, r0, #CACHELINESIZE
- mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
+ENTRY(xscale_cache_fns)
+ .long xscale_flush_kern_cache_all
+ .long xscale_flush_user_cache_all
+ .long xscale_flush_user_cache_range
+ .long xscale_coherent_kern_range
+ .long xscale_flush_kern_dcache_page
+ .long xscale_dma_inv_range
+ .long xscale_dma_clean_range
+ .long xscale_dma_flush_range
+
+ENTRY(cpu_xscale_dcache_clean_area)
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
- subs r1, r1, #4 * CACHELINESIZE
- bne 1b
- mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
+ subs r1, r1, #CACHELINESIZE
+ bhi 1b
mov pc, lr
/* ================================ CACHE LOCKING============================
@@ -553,18 +487,17 @@ ENTRY(xscale_dtlb_unlock)
/* =============================== PageTable ============================== */
-#define PMD_CACHE_WRITE_ALLOCATE 0
#define PTE_CACHE_WRITE_ALLOCATE 0
/*
- * cpu_xscale_set_pgd(pgd)
+ * cpu_xscale_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_xscale_set_pgd)
+ENTRY(cpu_xscale_switch_mm)
clean_d_cache r1, r2
mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
@@ -573,21 +506,6 @@ ENTRY(cpu_xscale_set_pgd)
cpwait_ret lr, ip
/*
- * cpu_xscale_flush_pmd(pmdp)
- *
- * Set a level 1 translation table entry, and clean it out of
- * any caches such that the MMUs can load it correctly.
- *
- * pmdp: pointer to PMD entry
- */
- .align 5
-ENTRY(cpu_xscale_flush_pmd)
- mov ip, #0
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
- mov pc, lr
-
-/*
* cpu_xscale_set_pte(ptep, pte)
*
* Set a PTE and flush it out
@@ -603,7 +521,7 @@ ENTRY(cpu_xscale_set_pte)
eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
- tst r3, #L_PTE_USER | L_PTE_EXEC @ User or Exec?
+ tst r3, #L_PTE_USER @ User?
orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
@@ -631,12 +549,10 @@ ENTRY(cpu_xscale_set_pte)
@ Erratum 40: The B bit must be cleared for a user read-only
@ cacheable page.
@
- @ B = B & ~((U|E) & C & ~W)
+ @ B = B & ~(U & C & ~W)
@
- and ip, r1, #L_PTE_USER | L_PTE_EXEC | L_PTE_WRITE | L_PTE_CACHEABLE
+ and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE
teq ip, #L_PTE_USER | L_PTE_CACHEABLE
- teqne ip, #L_PTE_EXEC | L_PTE_CACHEABLE
- teqne ip, #L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE
biceq r2, r2, #PTE_BUFFERABLE
tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
@@ -696,30 +612,12 @@ __xscale_setup:
.type xscale_processor_functions, #object
ENTRY(xscale_processor_functions)
.word xscale_abort
- .word cpu_xscale_check_bugs
.word cpu_xscale_proc_init
.word cpu_xscale_proc_fin
.word cpu_xscale_reset
.word cpu_xscale_do_idle
-
- /* cache */
- .word cpu_xscale_cache_clean_invalidate_all
- .word cpu_xscale_cache_clean_invalidate_range
- .word cpu_xscale_flush_ram_page
-
- /* dcache */
- .word cpu_xscale_dcache_invalidate_range
- .word cpu_xscale_dcache_clean_range
- .word cpu_xscale_dcache_clean_page
- .word cpu_xscale_dcache_clean_entry
-
- /* icache */
- .word cpu_xscale_icache_invalidate_range
- .word cpu_xscale_icache_invalidate_page
-
- /* pgtable */
- .word cpu_xscale_set_pgd
- .word cpu_xscale_flush_pmd
+ .word cpu_xscale_dcache_clean_area
+ .word cpu_xscale_switch_mm
.word cpu_xscale_set_pte
.size xscale_processor_functions, . - xscale_processor_functions
@@ -749,6 +647,7 @@ __80200_proc_info:
.long xscale_processor_functions
.long v4wbi_tlb_fns
.long xscale_mc_user_fns
+ .long xscale_cache_fns
.size __80200_proc_info, . - __80200_proc_info
.type __80321_proc_info,#object
@@ -780,6 +679,7 @@ __pxa250_proc_info:
.long xscale_processor_functions
.long v4wbi_tlb_fns
.long xscale_mc_user_fns
+ .long xscale_cache_fns
.size __pxa250_proc_info, . - __pxa250_proc_info
.type __pxa210_proc_info,#object
diff --git a/arch/arm/vmlinux-armv.lds.in b/arch/arm/vmlinux-armv.lds.in
index 5155359e1a46..d43fee522ecf 100644
--- a/arch/arm/vmlinux-armv.lds.in
+++ b/arch/arm/vmlinux-armv.lds.in
@@ -35,6 +35,9 @@ SECTIONS
__setup_start = .;
*(.init.setup)
__setup_end = .;
+ __early_begin = .;
+ *(__early_param)
+ __early_end = .;
__start___param = .;
*(__param)
__stop___param = .;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 30a47a7aff2d..4301d4b80345 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -237,6 +237,8 @@ static struct atari_floppy_struct {
disk change detection) */
int flags; /* flags */
struct gendisk *disk;
+ int ref;
+ int type;
} unit[FD_MAX_UNITS];
#define UD unit[drive]
@@ -1328,12 +1330,6 @@ static void finish_fdc_done( int dummy )
DPRINT(("finish_fdc() finished\n"));
}
-
-/* Prevent "aliased" accesses. */
-static int fd_ref[4] = { 0,0,0,0 };
-static int fd_device[4] = { 0,0,0,0 };
-
-
/* The detection of disk changes is a dark chapter in Atari history :-(
* Because the "Drive ready" signal isn't present in the Atari
* hardware, one has to rely on the "Write Protect". This works fine,
@@ -1378,7 +1374,7 @@ static int floppy_revalidate(struct gendisk *disk)
if (test_bit(drive, &changed_floppies) ||
test_bit(drive, &fake_change) ||
- unit[drive].disktype == 0) {
+ p->disktype == 0) {
if (UD.flags & FTD_MSG)
printk(KERN_ERR "floppy: clear format %p!\n", UDT);
BufferDrive = -1;
@@ -1445,7 +1441,7 @@ repeat:
floppy = CURRENT->rq_disk->private_data;
drive = floppy - unit;
- type = fd_device[drive];
+ type = floppy->type;
if (!UD.connected) {
/* drive not connected */
@@ -1458,7 +1454,7 @@ repeat:
if (!UDT) {
Probing = 1;
UDT = disk_type + StartDiskType[DriveType];
- set_capacity(unit[drive].disk, UDT->blocks);
+ set_capacity(floppy->disk, UDT->blocks);
UD.autoprobe = 1;
}
}
@@ -1476,7 +1472,7 @@ repeat:
}
type = minor2disktype[type].index;
UDT = &disk_type[type];
- set_capacity(unit[drive].disk, UDT->blocks);
+ set_capacity(floppy->disk, UDT->blocks);
UD.autoprobe = 0;
}
@@ -1522,19 +1518,16 @@ void do_fd_request(request_queue_t * q)
static int fd_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long param)
{
- int drive, type;
- kdev_t device;
struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct atari_floppy_struct *floppy = disk->private_data;
+ int drive = floppy - unit;
+ int type = floppy->type;
struct atari_format_descr fmt_desc;
struct atari_disk_type *dtp;
struct floppy_struct getprm;
int settype;
struct floppy_struct setprm;
- device = inode->i_rdev;
- drive = minor (device);
- type = drive >> 2;
- drive &= 3;
switch (cmd) {
case FDGETPRM:
if (type) {
@@ -1577,7 +1570,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
*/
/* get the parameters from user space */
- if (fd_ref[drive] != 1 && fd_ref[drive] != -1)
+ if (p->ref != 1 && p->ref != -1)
return -EBUSY;
if (copy_from_user(&setprm, (void *) param, sizeof(setprm)))
return -EFAULT;
@@ -1624,7 +1617,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
printk (KERN_INFO "floppy%d: setting %s %p!\n",
drive, dtp->name, dtp);
UDT = dtp;
- set_capacity(unit[drive].disk, UDT->blocks);
+ set_capacity(p->disk, UDT->blocks);
if (cmd == FDDEFPRM) {
/* save settings as permanent default type */
@@ -1670,7 +1663,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
}
UDT = dtp;
- set_capacity(unit[drive].disk, UDT->blocks);
+ set_capacity(p->disk, UDT->blocks);
return 0;
case FDMSGON:
@@ -1684,7 +1677,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
case FDFMTBEG:
return 0;
case FDFMTTRK:
- if (fd_ref[drive] != 1 && fd_ref[drive] != -1)
+ if (p->ref != 1 && p->ref != -1)
return -EBUSY;
if (copy_from_user(&fmt_desc, (void *) param, sizeof(fmt_desc)))
return -EFAULT;
@@ -1693,7 +1686,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
UDT = NULL;
/* MSch: invalidate default_params */
default_params[drive].blocks = 0;
- set_capacity(unit[drive].disk, MAX_DISK_SIZE * 2);
+ set_capacity(p->disk, MAX_DISK_SIZE * 2);
case FDFMTEND:
case FDFLUSH:
/* invalidate the buffer track to force a reread */
@@ -1844,23 +1837,22 @@ static void __init config_types( void )
static int floppy_open( struct inode *inode, struct file *filp )
{
- int drive = minor(inode->i_rdev) & 3;
+ struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data;
int type = minor(inode->i_rdev) >> 2;
- int old_dev = fd_device[drive];
DPRINT(("fd_open: type=%d\n",type));
- if (fd_ref[drive] && old_dev != type)
+ if (p->ref && p->type != type)
return -EBUSY;
- if (fd_ref[drive] == -1 || (fd_ref[drive] && filp->f_flags & O_EXCL))
+ if (p->ref == -1 || (p->ref && filp->f_flags & O_EXCL))
return -EBUSY;
if (filp->f_flags & O_EXCL)
- fd_ref[drive] = -1;
+ p->ref = -1;
else
- fd_ref[drive]++;
+ p->ref++;
- fd_device[drive] = type;
+ p->type = type;
if (filp->f_flags & O_NDELAY)
return 0;
@@ -1868,28 +1860,29 @@ static int floppy_open( struct inode *inode, struct file *filp )
if (filp->f_mode & 3) {
check_disk_change(inode->i_bdev);
if (filp->f_mode & 2) {
- if (UD.wpstat) {
+ if (p->wpstat) {
+ if (p->ref < 0)
+ p->ref = 0;
+ else
+ p->ref--;
floppy_release(inode, filp);
return -EROFS;
}
}
}
-
return 0;
}
static int floppy_release( struct inode * inode, struct file * filp )
{
- int drive = minor(inode->i_rdev) & 3;
-
- if (fd_ref[drive] < 0)
- fd_ref[drive] = 0;
- else if (!fd_ref[drive]--) {
+ struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data;
+ if (p->ref < 0)
+ p->ref = 0;
+ else if (!p->ref--) {
printk(KERN_ERR "floppy_release with fd_ref == 0");
- fd_ref[drive] = 0;
+ p->ref = 0;
}
-
return 0;
}
@@ -1912,7 +1905,7 @@ static struct gendisk *floppy_find(dev_t dev, int *part, void *data)
return get_disk(unit[drive].disk);
}
-int __init atari_floppy_init (void)
+static int __init atari_floppy_init (void)
{
int i;
@@ -2014,18 +2007,7 @@ void __init atari_floppy_setup( char *str, int *ints )
}
}
-#ifdef MODULE
-
-MODULE_LICENSE("GPL");
-
-int init_module (void)
-{
- if (!MACH_IS_ATARI)
- return -ENXIO;
- return atari_floppy_init ();
-}
-
-void cleanup_module (void)
+static void atari_floppy_exit(void)
{
int i;
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
@@ -2039,5 +2021,8 @@ void cleanup_module (void)
del_timer_sync(&fd_timer);
atari_stram_free( DMABuffer );
}
-#endif
+module_init(atari_floppy_init)
+module_exit(atari_floppy_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e85256befeec..ee016885325c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3885,7 +3885,7 @@ static int floppy_read_block_0(struct gendisk *disk)
struct block_device *bdev;
int ret;
- bdev = bdget(MKDEV(disk->major, disk->first_minor));
+ bdev = bdget_disk(disk, 0);
if (!bdev) {
printk("No block device for %s\n", disk->disk_name);
BUG();
diff --git a/drivers/block/floppy98.c b/drivers/block/floppy98.c
index a01a515f0618..226b0cee8a20 100644
--- a/drivers/block/floppy98.c
+++ b/drivers/block/floppy98.c
@@ -3995,7 +3995,7 @@ static int floppy_read_block_0(struct gendisk *disk)
struct block_device *bdev;
int ret;
- bdev = bdget(MKDEV(disk->major, disk->first_minor));
+ bdev = bdget_disk(disk, 0);
if (!bdev) {
printk("No block device for %s\n", disk->disk_name);
BUG();
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c
index 34d9d4dd91fc..fe922bcc5805 100644
--- a/drivers/block/genhd.c
+++ b/drivers/block/genhd.c
@@ -721,6 +721,17 @@ int bdev_read_only(struct block_device *bdev)
return disk->policy;
}
+int invalidate_partition(struct gendisk *disk, int index)
+{
+ int res = 0;
+ struct block_device *bdev = bdget_disk(disk, index);
+ if (bdev)
+ res = __invalidate_device(bdev, 1);
+ bdput(bdev);
+ return res;
+}
+
EXPORT_SYMBOL(bdev_read_only);
EXPORT_SYMBOL(set_device_ro);
EXPORT_SYMBOL(set_disk_ro);
+EXPORT_SYMBOL(invalidate_partition);
diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
index 3dbd0824319b..04aae2485102 100644
--- a/drivers/block/ioctl.c
+++ b/drivers/block/ioctl.c
@@ -62,7 +62,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg *arg)
if (disk->part[part - 1]->nr_sects == 0)
return -ENXIO;
/* partition in use? Incomplete check for now. */
- bdevp = bdget(MKDEV(disk->major, disk->first_minor) + part);
+ bdevp = bdget_disk(disk, part);
if (!bdevp)
return -ENOMEM;
if (bd_claim(bdevp, &holder) < 0) {
diff --git a/drivers/ide/legacy/hd98.c b/drivers/ide/legacy/hd98.c
index b21c147d9fee..b8399d81327e 100644
--- a/drivers/ide/legacy/hd98.c
+++ b/drivers/ide/legacy/hd98.c
@@ -46,8 +46,6 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#define MAJOR_NR HD_MAJOR
-#define DEVICE_NR(device) (minor(device)>>6)
#include <linux/blk.h>
#include "io_ports.h"
@@ -100,6 +98,9 @@
#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
static spinlock_t hd_lock = SPIN_LOCK_UNLOCKED;
+static struct request_queue hd_queue;
+
+#define CURRENT elv_next_request(&hd_queue)
#define TIMEOUT_VALUE (6*HZ)
#define HD_DELAY 0
@@ -115,26 +116,24 @@ static spinlock_t hd_lock = SPIN_LOCK_UNLOCKED;
static void recal_intr(void);
static void bad_rw_intr(void);
-static char recalibrate[MAX_HD];
-static char special_op[MAX_HD];
-
static int reset;
static int hd_error;
-#define SUBSECTOR(block) (CURRENT->current_nr_sectors > 0)
-
/*
* This struct defines the HD's and their types.
*/
struct hd_i_struct {
unsigned int head,sect,cyl,wpcom,lzone,ctl;
+ int unit;
+ int recalibrate;
+ int special_op;
};
#ifdef HD_TYPE
-struct hd_i_struct hd_info[] = { HD_TYPE };
+static struct hd_i_struct hd_info[] = { HD_TYPE };
static int NR_HD = ((sizeof (hd_info))/(sizeof (struct hd_i_struct)));
#else
-struct hd_i_struct hd_info[MAX_HD];
+static struct hd_i_struct hd_info[MAX_HD];
static int NR_HD;
#endif
@@ -195,11 +194,11 @@ void __init hd_setup(char *str, int *ints)
static void dump_status (const char *msg, unsigned int stat)
{
- char devc;
-
- devc = CURRENT ? 'a' + DEVICE_NR(CURRENT->rq_dev) : '?';
+ char *name = CURRENT ?
+ CURRENT->rq_dev->bd_disk->disk_name :
+ "hd?";
#ifdef VERBOSE_ERRORS
- printk("hd%c: %s: status=0x%02x { ", devc, msg, stat & 0xff);
+ printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
if (stat & BUSY_STAT) printk("Busy ");
if (stat & READY_STAT) printk("DriveReady ");
if (stat & WRERR_STAT) printk("WriteFault ");
@@ -213,7 +212,7 @@ static void dump_status (const char *msg, unsigned int stat)
hd_error = 0;
} else {
hd_error = inb(HD_ERROR);
- printk("hd%c: %s: error=0x%02x { ", devc, msg, hd_error & 0xff);
+ printk("%s: %s: error=0x%02x { ", name, msg, hd_error & 0xff);
if (hd_error & BBD_ERR) printk("BadSector ");
if (hd_error & ECC_ERR) printk("UncorrectableError ");
if (hd_error & ID_ERR) printk("SectorIdNotFound ");
@@ -230,12 +229,12 @@ static void dump_status (const char *msg, unsigned int stat)
printk("\n");
}
#else
- printk("hd%c: %s: status=0x%02x.\n", devc, msg, stat & 0xff);
+ printk("%s: %s: status=0x%02x.\n", name, msg, stat & 0xff);
if ((stat & ERR_STAT) == 0) {
hd_error = 0;
} else {
hd_error = inb(HD_ERROR);
- printk("hd%c: %s: error=0x%02x.\n", devc, msg, hd_error & 0xff);
+ printk("%s: %s: error=0x%02x.\n", name, msg, hd_error & 0xff);
}
#endif
}
@@ -290,9 +289,13 @@ static int controller_ready(unsigned int drive, unsigned int head)
return 0;
}
-static void hd_out(unsigned int drive,unsigned int nsect,unsigned int sect,
- unsigned int head,unsigned int cyl,unsigned int cmd,
- void (*intr_addr)(void))
+static void hd_out(struct hd_i_struct *disk,
+ unsigned int nsect,
+ unsigned int sect,
+ unsigned int head,
+ unsigned int cyl,
+ unsigned int cmd,
+ void (*intr_addr)(void))
{
unsigned short port;
@@ -302,19 +305,19 @@ static void hd_out(unsigned int drive,unsigned int nsect,unsigned int sect,
#endif
if (reset)
return;
- if (!controller_ready(drive, head)) {
+ if (!controller_ready(disk->unit, head)) {
reset = 1;
return;
}
SET_HANDLER(intr_addr);
- outb(hd_info[drive].ctl,HD_CMD);
+ outb(disk->ctl,HD_CMD);
port=HD_DATA + 2;
- outb(hd_info[drive].wpcom>>2, port); port += 2;
+ outb(disk->wpcom>>2, port); port += 2;
outb(nsect, port); port += 2;
outb(sect, port); port += 2;
outb(cyl, port); port += 2;
outb(cyl>>8, port); port += 2;
- outb(0xA0|(drive<<4)|head, port); port += 2;
+ outb(0xA0|(disk->unit<<4)|head, port); port += 2;
outb(cmd, port);
}
@@ -363,9 +366,10 @@ repeat:
goto repeat;
}
if (++i < NR_HD) {
- special_op[i] = recalibrate[i] = 1;
- hd_out(i,hd_info[i].sect,hd_info[i].sect,hd_info[i].head-1,
- hd_info[i].cyl,WIN_SPECIFY,&reset_hd);
+ struct hd_i_struct *disk = &hd_info[i];
+ disk->special_op = disk->recalibrate = 1;
+ hd_out(disk, disk->sect, disk->sect, disk->head-1,
+ disk->cyl, WIN_SPECIFY, &reset_hd);
if (reset)
goto repeat;
} else
@@ -398,18 +402,19 @@ void unexpected_hd_interrupt(void)
*/
static void bad_rw_intr(void)
{
- int dev;
+ struct request *req = CURRENT;
+ struct hd_i_struct *disk;
- if (!CURRENT)
+ if (!req)
return;
- dev = DEVICE_NR(CURRENT->rq_dev);
- if (++CURRENT->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
- end_request(CURRENT, 0);
- special_op[dev] = recalibrate[dev] = 1;
- } else if (CURRENT->errors % RESET_FREQ == 0)
+ disk = req->rq_disk->private_data;
+ if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
+ end_request(req, 0);
+ disk->special_op = disk->recalibrate = 1;
+ } else if (req->errors % RESET_FREQ == 0)
reset = 1;
- else if ((hd_error & TRK0_ERR) || CURRENT->errors % RECAL_FREQ == 0)
- special_op[dev] = recalibrate[dev] = 1;
+ else if ((hd_error & TRK0_ERR) || req->errors % RECAL_FREQ == 0)
+ disk->special_op = disk->recalibrate = 1;
/* Otherwise just retry */
}
@@ -427,6 +432,7 @@ static inline int wait_DRQ(void)
static void read_intr(void)
{
int i, retries = 100000;
+ struct request *req;
do {
i = (unsigned) inb(HD_STATUS);
@@ -442,19 +448,20 @@ static void read_intr(void)
hd_request();
return;
ok_to_read:
- insw(HD_DATA,CURRENT->buffer,256);
- CURRENT->sector++;
- CURRENT->buffer += 512;
- CURRENT->errors = 0;
- i = --CURRENT->nr_sectors;
- --CURRENT->current_nr_sectors;
+ req = CURRENT;
+ insw(HD_DATA,req->buffer,256);
+ req->sector++;
+ req->buffer += 512;
+ req->errors = 0;
+ i = --req->nr_sectors;
+ --req->current_nr_sectors;
#ifdef DEBUG
- printk("hd%c: read: sector %ld, remaining = %ld, buffer=0x%08lx\n",
- dev+'a', CURRENT->sector, CURRENT->nr_sectors,
- (unsigned long) CURRENT->buffer+512);
+ printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
+ req->rq_disk->disk_name, req->sector, req->nr_sectors,
+ req->buffer+512);
#endif
- if (CURRENT->current_nr_sectors <= 0)
- end_request(CURRENT, 1);
+ if (req->current_nr_sectors <= 0)
+ end_request(req, 1);
if (i > 0) {
SET_HANDLER(&read_intr);
return;
@@ -472,6 +479,7 @@ static void write_intr(void)
{
int i;
int retries = 100000;
+ struct request *req = CURRENT;
do {
i = (unsigned) inb(HD_STATUS);
@@ -479,7 +487,7 @@ static void write_intr(void)
continue;
if (!OK_STATUS(i))
break;
- if ((CURRENT->nr_sectors <= 1) || (i & DRQ_STAT))
+ if ((req->nr_sectors <= 1) || (i & DRQ_STAT))
goto ok_to_write;
} while (--retries > 0);
dump_status("write_intr", i);
@@ -487,15 +495,15 @@ static void write_intr(void)
hd_request();
return;
ok_to_write:
- CURRENT->sector++;
- i = --CURRENT->nr_sectors;
- --CURRENT->current_nr_sectors;
- CURRENT->buffer += 512;
- if (!i || (CURRENT->bio && !SUBSECTOR(i)))
- end_request(CURRENT, 1);
+ req->sector++;
+ i = --req->nr_sectors;
+ --req->current_nr_sectors;
+ req->buffer += 512;
+ if (!i || (req->bio && req->current_nr_sectors < 1))
+ end_request(req, 1);
if (i > 0) {
SET_HANDLER(&write_intr);
- outsw(HD_DATA,CURRENT->buffer,256);
+ outsw(HD_DATA,req->buffer,256);
local_irq_enable();
} else {
#if (HD_DELAY > 0)
@@ -521,8 +529,6 @@ static void recal_intr(void)
*/
static void hd_times_out(unsigned long dummy)
{
- unsigned int dev;
-
do_hd = NULL;
if (!CURRENT)
@@ -531,11 +537,10 @@ static void hd_times_out(unsigned long dummy)
disable_irq(HD_IRQ);
local_irq_enable();
reset = 1;
- dev = DEVICE_NR(CURRENT->rq_dev);
- printk("hd%c: timeout\n", dev+'a');
+ printk("%s: timeout\n", CURRENT->rq_disk->disk_name);
if (++CURRENT->errors >= MAX_ERRORS) {
#ifdef DEBUG
- printk("hd%c: too many errors\n", dev+'a');
+ printk("%s: too many errors\n", CURRENT->rq_disk->disk_name);
#endif
end_request(CURRENT, 0);
}
@@ -544,18 +549,18 @@ static void hd_times_out(unsigned long dummy)
enable_irq(HD_IRQ);
}
-int do_special_op (unsigned int dev)
+int do_special_op(struct hd_i_struct *disk, struct request *req)
{
- if (recalibrate[dev]) {
- recalibrate[dev] = 0;
- hd_out(dev,hd_info[dev].sect,0,0,0,WIN_RESTORE,&recal_intr);
+ if (disk->recalibrate) {
+ disk->recalibrate = 0;
+ hd_out(disk, disk->sect,0,0,0,WIN_RESTORE,&recal_intr);
return reset;
}
- if (hd_info[dev].head > 16) {
- printk ("hd%c: cannot handle device with more than 16 heads - giving up\n", dev+'a');
- end_request(CURRENT, 0);
+ if (disk->head > 16) {
+ printk ("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
+ end_request(req, 0);
}
- special_op[dev] = 0;
+ disk->special_op = 0;
return 1;
}
@@ -571,7 +576,9 @@ int do_special_op (unsigned int dev)
*/
static void hd_request(void)
{
- unsigned int dev, block, nsect, sec, track, head, cyl;
+ unsigned int block, nsect, sec, track, head, cyl;
+ struct hd_i_struct *disk;
+ struct request *req;
if (do_hd)
return;
@@ -583,62 +590,58 @@ repeat:
do_hd = NULL;
return;
}
+ req = CURRENT;
if (reset) {
local_irq_disable();
reset_hd();
return;
}
- dev = DEVICE_NR(CURRENT->rq_dev);
- block = CURRENT->sector;
- nsect = CURRENT->nr_sectors;
- if (dev >= NR_HD) {
- printk("hd: bad disk number: %d\n", dev);
- end_request(CURRENT, 0);
- goto repeat;
- }
- if (block >= get_capacity(hd_gendisk[dev]) ||
- ((block+nsect) > get_capacity(hd_gendisk[dev]))) {
+ disk = req->rq_disk->private_data;
+ block = req->sector;
+ nsect = req->nr_sectors;
+ if (block >= get_capacity(req->rq_disk) ||
+ ((block+nsect) > get_capacity(req->rq_disk))) {
printk("%s: bad access: block=%d, count=%d\n",
- hd_gendisk[dev]->disk_name, block, nsect);
- end_request(CURRENT, 0);
+ req->rq_disk->disk_name, block, nsect);
+ end_request(req, 0);
goto repeat;
}
- if (special_op[dev]) {
- if (do_special_op(dev))
+ if (disk->special_op) {
+ if (do_special_op(disk, req))
goto repeat;
return;
}
- sec = block % hd_info[dev].sect + 1;
- track = block / hd_info[dev].sect;
- head = track % hd_info[dev].head;
- cyl = track / hd_info[dev].head;
+ sec = block % disk->sect + 1;
+ track = block / disk->sect;
+ head = track % disk->head;
+ cyl = track / disk->head;
#ifdef DEBUG
- printk("hd%c: %sing: CHS=%d/%d/%d, sectors=%d, buffer=0x%08lx\n",
- dev+'a', (CURRENT->cmd == READ)?"read":"writ",
- cyl, head, sec, nsect, (unsigned long) CURRENT->buffer);
+ printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n",
+ req->rq_disk->disk_name, (req->cmd == READ)?"read":"writ",
+ cyl, head, sec, nsect, req->buffer);
#endif
- if(CURRENT->flags & REQ_CMD) {
- switch (rq_data_dir(CURRENT)) {
+ if (req->flags & REQ_CMD) {
+ switch (rq_data_dir(req)) {
case READ:
- hd_out(dev,nsect,sec,head,cyl,WIN_READ,&read_intr);
+ hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr);
if (reset)
goto repeat;
break;
case WRITE:
- hd_out(dev,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
+ hd_out(disk,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
if (reset)
goto repeat;
if (wait_DRQ()) {
bad_rw_intr();
goto repeat;
}
- outsw(HD_DATA,CURRENT->buffer,256);
+ outsw(HD_DATA,req->buffer,256);
break;
default:
printk("unknown hd-command\n");
- end_request(CURRENT, 0);
+ end_request(req, 0);
break;
}
}
@@ -654,34 +657,19 @@ static void do_hd_request (request_queue_t * q)
static int hd_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg)
{
+ struct hd_i_struct *disk = inode->i_bdev->bd_disk->private_data;
struct hd_geometry *loc = (struct hd_geometry *) arg;
- int dev = DEVICE_NR(inode->i_rdev);
+ struct hd_geometry g;
- if (dev >= NR_HD)
+ if (cmd != HDIO_GETGEO)
return -EINVAL;
- switch (cmd) {
- case HDIO_GETGEO:
- {
- struct hd_geometry g;
- if (!loc) return -EINVAL;
- g.heads = hd_info[dev].head;
- g.sectors = hd_info[dev].sect;
- g.cylinders = hd_info[dev].cyl;
- g.start = get_start_sect(inode->i_bdev);
- return copy_to_user(loc, &g, sizeof g) ? -EFAULT : 0;
- }
-
- default:
- return -EINVAL;
- }
-}
-
-static int hd_open(struct inode * inode, struct file * filp)
-{
- int target = DEVICE_NR(inode->i_rdev);
- if (target >= NR_HD)
- return -ENODEV;
- return 0;
+ if (!loc)
+ return -EINVAL;
+ g.heads = disk->head;
+ g.sectors = disk->sect;
+ g.cylinders = disk->cyl;
+ g.start = get_start_sect(inode->i_bdev);
+ return copy_to_user(loc, &g, sizeof g) ? -EFAULT : 0;
}
/*
@@ -689,8 +677,6 @@ static int hd_open(struct inode * inode, struct file * filp)
* be forgotten about...
*/
-extern struct block_device_operations hd_fops;
-
static void hd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
void (*handler)(void) = do_hd;
@@ -704,7 +690,6 @@ static void hd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
static struct block_device_operations hd_fops = {
- .open = hd_open,
.ioctl = hd_ioctl,
};
@@ -721,15 +706,15 @@ static struct block_device_operations hd_fops = {
static int __init hd_init(void)
{
int drive;
- if (register_blkdev(MAJOR_NR,"hd",&hd_fops)) {
- printk("hd: unable to get major %d for hard disk\n",MAJOR_NR);
+ if (register_blkdev(HD_MAJOR,"hd")) {
+ printk("hd: unable to get major %d for hard disk\n",HD_MAJOR);
return -1;
}
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), do_hd_request, &hd_lock);
- blk_queue_max_sectors(BLK_DEFAULT_QUEUE(MAJOR_NR), 255);
+ blk_init_queue(&hd_queue, do_hd_request, &hd_lock);
+ blk_queue_max_sectors(&hd_queue, 255);
init_timer(&device_timer);
device_timer.function = hd_times_out;
- blk_queue_hardsect_size(QUEUE, 512);
+ blk_queue_hardsect_size(&hd_queue, 512);
#ifdef __i386__
if (!NR_HD) {
@@ -768,24 +753,22 @@ static int __init hd_init(void)
goto out;
for (drive=0 ; drive < NR_HD ; drive++) {
- struct gendisk *disk = alloc_disk();
+ struct gendisk *disk = alloc_disk(64);
+ struct hd_i_struct *p = &hd_info[drive];
if (!disk)
goto Enomem;
- disk->major = MAJOR_NR;
+ disk->major = HD_MAJOR;
disk->first_minor = drive << 6;
- disk->minor_shift = 6;
disk->fops = &hd_fops;
sprintf(disk->disk_name, "hd%c", 'a'+drive);
+ disk->private_data = p;
+ set_capacity(disk, p->head * p->sect * p->cyl);
+ disk->queue = &hd_queue;
+ p->unit = drive;
hd_gendisk[drive] = disk;
- }
- for (drive=0 ; drive < NR_HD ; drive++) {
- sector_t size = hd_info[drive].head *
- hd_info[drive].sect * hd_info[drive].cyl;
- set_capacity(hd_gendisk[drive], size);
- printk ("%s: %ldMB, CHS=%d/%d/%d\n",
- hd_gendisk[drive]->disk_name,
- size / 2048, hd_info[drive].cyl,
- hd_info[drive].head, hd_info[drive].sect);
+ printk ("%s: %luMB, CHS=%d/%d/%d\n",
+ disk->disk_name, (unsigned long)get_capacity(disk)/2048,
+ p->cyl, p->head, p->sect);
}
if (request_irq(HD_IRQ, hd_interrupt, SA_INTERRUPT, "hd", NULL)) {
@@ -849,11 +832,8 @@ static int __init hd_init(void)
goto out9;
}
- for(drive=0; drive < NR_HD; drive++) {
- struct hd_i_struct *p = hd_info + drive;
- set_capacity(hd_gendisk[drive], p->head * p->sect * p->cyl);
+ for(drive=0; drive < NR_HD; drive++)
add_disk(hd_gendisk[drive]);
- }
return 0;
out9:
@@ -879,8 +859,8 @@ out1:
NR_HD = 0;
out:
del_timer(&device_timer);
- unregister_blkdev(MAJOR_NR,"hd");
- blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
+ unregister_blkdev(HD_MAJOR,"hd");
+ blk_cleanup_queue(&hd_queue);
return -1;
Enomem:
while (drive--)
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index b4679f0c2d65..8cdf0e65f9c6 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -153,39 +153,17 @@ static struct file_system_type capifs_fs_type = {
.kill_sb = kill_anon_super,
};
-static spinlock_t entries_lock = SPIN_LOCK_UNLOCKED;
static struct vfsmount *capifs_mnt;
-static int entry_count = 0;
+static int entry_count;
-static struct vfsmount *grab_instance(void)
+static int grab_instance(void)
{
- struct vfsmount *mnt = NULL;
- spin_lock(&entries_lock);
- if (!capifs_mnt) {
- spin_unlock(&entries_lock);
- mnt = kern_mount(&capifs_fs_type);
- if (IS_ERR(mnt))
- return NULL;
- spin_lock(&entries_lock);
- if (!capifs_mnt)
- capifs_mnt = mnt;
- }
- mntget(capifs_mnt);
- entry_count++;
- spin_unlock(&entries_lock);
- mntput(mnt);
- return capifs_mnt;
+ return simple_pin_fs("capifs", &capifs_mnt, &entry_count);
}
static void drop_instance(void)
{
- struct vfsmount *mnt;
- spin_lock(&entries_lock);
- mnt = capifs_mnt;
- if (!--entry_count)
- capifs_mnt = NULL;
- spin_unlock(&entries_lock);
- mntput(mnt);
+ return simple_release_fs(&capifs_mnt, &entry_count);
}
static struct dentry *get_node(int type, int num)
@@ -207,7 +185,7 @@ void capifs_new_ncci(char type, unsigned int num, dev_t device)
struct dentry *dentry;
struct inode *inode;
- if (!grab_instance())
+ if (grab_instance() < 0)
return;
sb = capifs_mnt->mnt_sb;
inode = new_inode(sb);
@@ -232,7 +210,7 @@ void capifs_new_ncci(char type, unsigned int num, dev_t device)
void capifs_free_ncci(char type, unsigned int num)
{
- if (grab_instance()) {
+ if (grab_instance() == 0) {
struct dentry *dentry = get_node(type, num);
if (!IS_ERR(dentry)) {
struct inode *inode = dentry->d_inode;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 96cabf795823..cb667cdf69d1 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -450,11 +450,11 @@ static int __info(struct mapped_device *md, struct dm_ioctl *param)
if (dm_suspended(md))
param->flags |= DM_SUSPEND_FLAG;
- param->dev = MKDEV(disk->major, disk->first_minor);
- bdev = bdget(param->dev);
+ bdev = bdget_disk(disk, 0);
if (!bdev)
return -ENXIO;
+ param->dev = bdev->bd_dev;
param->open_count = bdev->bd_openers;
bdput(bdev);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 1dabc5dde4ce..f624018db4a7 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -345,26 +345,21 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
static int open_dev(struct dm_dev *d, dev_t dev)
{
static char *_claim_ptr = "I belong to device-mapper";
+ struct block_device *bdev;
int r;
if (d->bdev)
BUG();
- d->bdev = bdget(dev);
- if (!d->bdev)
- return -ENOMEM;
-
- r = blkdev_get(d->bdev, d->mode, 0, BDEV_RAW);
+ bdev = open_by_devnum(dev, d->mode, BDEV_RAW);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ r = bd_claim(bdev, _claim_ptr);
if (r)
- return r;
-
- r = bd_claim(d->bdev, _claim_ptr);
- if (r) {
- blkdev_put(d->bdev, BDEV_RAW);
- d->bdev = NULL;
- }
-
+ blkdev_put(bdev, BDEV_RAW);
+ else
+ d->bdev = bdev;
return r;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a1629dbda097..272bef26e781 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1046,12 +1046,9 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
int err = 0;
struct block_device *bdev;
- bdev = bdget(dev);
- if (!bdev)
- return -ENOMEM;
- err = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_RAW);
- if (err)
- return err;
+ bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE, BDEV_RAW);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
err = bd_claim(bdev, rdev);
if (err) {
blkdev_put(bdev, BDEV_RAW);
@@ -1687,7 +1684,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
del_timer_sync(&mddev->safemode_timer);
- invalidate_device(mk_kdev(disk->major, disk->first_minor), 1);
+ invalidate_partition(disk, 0);
if (ro) {
err = -ENXIO;
diff --git a/drivers/mtd/devices/blkmtd.c b/drivers/mtd/devices/blkmtd.c
index bb458defa98a..fe2102690862 100644
--- a/drivers/mtd/devices/blkmtd.c
+++ b/drivers/mtd/devices/blkmtd.c
@@ -1049,17 +1049,10 @@ extern dev_t name_to_dev_t(char *line) __init;
/* Startup */
static int __init init_blkmtd(void)
{
-#ifdef MODULE
- struct file *file = NULL;
- struct inode *inode;
-#endif
-
- int maj, min;
int i, blocksize, blocksize_bits;
loff_t size;
int readonly = 0;
int erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
- dev_t rdev;
struct block_device *bdev;
int err;
int mode;
@@ -1092,48 +1085,24 @@ static int __init init_blkmtd(void)
mode = (readonly) ? O_RDONLY : O_RDWR;
#ifdef MODULE
-
- file = filp_open(device, mode, 0);
- if(IS_ERR(file)) {
- printk("blkmtd: error, can't open device %s\n", device);
- DEBUG(2, "blkmtd: filp_open returned %ld\n", PTR_ERR(file));
- return 1;
- }
-
- /* determine is this is a block device and if so get its major and minor
- numbers */
- inode = file->f_dentry->d_inode;
- if(!S_ISBLK(inode->i_mode)) {
- printk("blkmtd: %s not a block device\n", device);
- filp_close(file, NULL);
- return 1;
- }
- rdev = inode->i_bdev->bd_dev;
- filp_close(file, NULL);
+ bdev = open_bdev_excl(device, mode, BDEV_RAW, NULL);
#else
- rdev = name_to_dev_t(device);
+ bdev = open_by_devnum(name_to_dev_t(device), FMODE_READ, BDEV_RAW);
#endif
- maj = MAJOR(rdev);
- min = MINOR(rdev);
- DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n", maj, min);
-
- if(!rdev) {
- printk("blkmtd: bad block device: `%s'\n", device);
+ if (IS_ERR(bdev)){
+ printk("blkmtd: error, can't open device %s\n", device);
+ DEBUG(2, "blkmtd: opening bdev returned %ld\n", PTR_ERR(bdev));
return 1;
}
- if(maj == MTD_BLOCK_MAJOR) {
+ DEBUG(1, "blkmtd: devname = %s\n", bdevname(bdev, b));
+
+ if(MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
printk("blkmtd: attempting to use an MTD device as a block device\n");
+ blkdev_put(bdev, BDEV_RAW);
return 1;
}
- /* get the block device */
- bdev = bdget(rdev);
- err = blkdev_get(bdev, mode, 0, BDEV_RAW);
- if (err)
- return 1;
-
- DEBUG(1, "blkmtd: devname = %s\n", bdevname(bdev, b));
blocksize = BLOCK_SIZE;
blocksize = bs ? bs : block_size(bdev);
diff --git a/drivers/mtd/maps/iq80321.c b/drivers/mtd/maps/iq80321.c
index 0562fc9ab9c2..06f1c9d8e9cd 100644
--- a/drivers/mtd/maps/iq80321.c
+++ b/drivers/mtd/maps/iq80321.c
@@ -67,17 +67,17 @@ static void iq80321_copy_to(struct map_info *map, unsigned long to, const void *
}
static struct map_info iq80321_map = {
- name = "IQ80321 flash",
- size = WINDOW_SIZE,
- buswidth = BUSWIDTH,
- read8 = iq80321_read8,
- read16 = iq80321_read16,
- read32 = iq80321_read32,
- copy_from = iq80321_copy_from,
- write8 = iq80321_write8,
- write16 = iq80321_write16,
- write32 = iq80321_write32,
- copy_to = iq80321_copy_to
+ .name = "IQ80321 flash",
+ .size = WINDOW_SIZE,
+ .buswidth = BUSWIDTH,
+ .read8 = iq80321_read8,
+ .read16 = iq80321_read16,
+ .read32 = iq80321_read32,
+ .copy_from = iq80321_copy_from,
+ .write8 = iq80321_write8,
+ .write16 = iq80321_write16,
+ .write32 = iq80321_write32,
+ .copy_to = iq80321_copy_to
};
static struct mtd_partition iq80321_partitions[4] = {
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 2dfda57eaebf..53f05b670bb6 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -45,7 +45,6 @@ static struct file_operations default_file_operations;
static struct inode_operations usbfs_dir_inode_operations;
static struct vfsmount *usbdevfs_mount;
static struct vfsmount *usbfs_mount;
-static spinlock_t mount_lock = SPIN_LOCK_UNLOCKED;
static int usbdevfs_mount_count; /* = 0 */
static int usbfs_mount_count; /* = 0 */
@@ -514,69 +513,20 @@ static struct file_system_type usb_fs_type = {
};
/* --------------------------------------------------------------------- */
-static int get_mount (struct file_system_type *fs_type, struct vfsmount **mount, int *mount_count)
-{
- struct vfsmount *mnt;
-
- spin_lock (&mount_lock);
- if (*mount) {
- mntget(*mount);
- ++(*mount_count);
- spin_unlock (&mount_lock);
- goto go_ahead;
- }
-
- spin_unlock (&mount_lock);
- mnt = kern_mount (fs_type);
- if (IS_ERR(mnt)) {
- err ("could not mount the fs...erroring out!\n");
- return -ENODEV;
- }
- spin_lock (&mount_lock);
- if (!*mount) {
- *mount = mnt;
- ++(*mount_count);
- spin_unlock (&mount_lock);
- goto go_ahead;
- }
- mntget(*mount);
- ++(*mount_count);
- spin_unlock (&mount_lock);
- mntput(mnt);
-
-go_ahead:
- dbg("mount_count = %d", *mount_count);
- return 0;
-}
-
-static void put_mount (struct vfsmount **mount, int *mount_count)
-{
- struct vfsmount *mnt;
-
- spin_lock (&mount_lock);
- mnt = *mount;
- --(*mount_count);
- if (!(*mount_count))
- *mount = NULL;
-
- spin_unlock (&mount_lock);
- mntput(mnt);
- dbg("mount_count = %d", *mount_count);
-}
static int create_special_files (void)
{
struct dentry *parent;
- int retval = 0;
+ int retval;
/* create the devices special file */
- retval = get_mount (&usbdevice_fs_type, &usbdevfs_mount, &usbdevfs_mount_count);
+ retval = simple_pin_fs("usbdevfs", &usbdevfs_mount, &usbdevfs_mount_count);
if (retval) {
err ("Unable to get usbdevfs mount");
goto exit;
}
- retval = get_mount (&usb_fs_type, &usbfs_mount, &usbfs_mount_count);
+ retval = simple_pin_fs("usbfs", &usbfs_mount, &usbfs_mount_count);
if (retval) {
err ("Unable to get usbfs mount");
goto error_clean_usbdevfs_mount;
@@ -611,10 +561,10 @@ error_remove_file:
devices_usbfs_dentry = NULL;
error_clean_mounts:
- put_mount (&usbfs_mount, &usbfs_mount_count);
+ simple_release_fs(&usbfs_mount, &usbfs_mount_count);
error_clean_usbdevfs_mount:
- put_mount (&usbdevfs_mount, &usbdevfs_mount_count);
+ simple_release_fs(&usbdevfs_mount, &usbdevfs_mount_count);
exit:
return retval;
@@ -628,8 +578,8 @@ static void remove_special_files (void)
fs_remove_file (devices_usbfs_dentry);
devices_usbdevfs_dentry = NULL;
devices_usbfs_dentry = NULL;
- put_mount (&usbdevfs_mount, &usbdevfs_mount_count);
- put_mount (&usbfs_mount, &usbfs_mount_count);
+ simple_release_fs(&usbdevfs_mount, &usbdevfs_mount_count);
+ simple_release_fs(&usbfs_mount, &usbfs_mount_count);
}
void usbfs_update_special (void)
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 89ca4cd5424a..993a69d81ad5 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -55,6 +55,10 @@
#include <asm/system.h>
#include <asm/uaccess.h>
+#ifdef __arm__
+#include <asm/mach-types.h>
+#endif
+
#include "cyber2000fb.h"
struct cfb_info {
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index c549fd26ed09..d9e70fb1c0e1 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -53,7 +53,7 @@ typedef struct {
static rwlock_t entries_lock __attribute__((unused)) = RW_LOCK_UNLOCKED;
static struct vfsmount *bm_mnt;
-static int entry_count = 0;
+static int entry_count;
/*
* Check if we support the binfmt
@@ -399,19 +399,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
static void bm_clear_inode(struct inode *inode)
{
- Node *e = inode->u.generic_ip;
-
- if (e) {
- struct vfsmount *mnt;
- write_lock(&entries_lock);
- list_del(&e->list);
- mnt = bm_mnt;
- if (!--entry_count)
- bm_mnt = NULL;
- write_unlock(&entries_lock);
- kfree(e);
- mntput(mnt);
- }
+ kfree(inode->u.generic_ip);
}
static void kill_node(Node *e)
@@ -430,6 +418,7 @@ static void kill_node(Node *e)
dentry->d_inode->i_nlink--;
d_drop(dentry);
dput(dentry);
+ simple_release_fs(&bm_mnt, &entry_count);
}
}
@@ -498,8 +487,6 @@ static struct file_operations bm_entry_operations = {
.write = bm_entry_write,
};
-static struct file_system_type bm_fs_type;
-
/* /register */
static ssize_t bm_register_write(struct file *file, const char *buffer,
@@ -507,7 +494,6 @@ static ssize_t bm_register_write(struct file *file, const char *buffer,
{
Node *e;
struct inode *inode;
- struct vfsmount *mnt = NULL;
struct dentry *root, *dentry;
struct super_block *sb = file->f_vfsmnt->mnt_sb;
int err = 0;
@@ -534,32 +520,22 @@ static ssize_t bm_register_write(struct file *file, const char *buffer,
if (!inode)
goto out2;
- write_lock(&entries_lock);
- if (!bm_mnt) {
- write_unlock(&entries_lock);
- mnt = kern_mount(&bm_fs_type);
- if (IS_ERR(mnt)) {
- err = PTR_ERR(mnt);
- iput(inode);
- inode = NULL;
- goto out2;
- }
- write_lock(&entries_lock);
- if (!bm_mnt)
- bm_mnt = mnt;
+ err = simple_pin_fs("binfmt_misc", &bm_mnt, &entry_count);
+ if (err) {
+ iput(inode);
+ inode = NULL;
+ goto out2;
}
- mntget(bm_mnt);
- entry_count++;
e->dentry = dget(dentry);
inode->u.generic_ip = e;
inode->i_fop = &bm_entry_operations;
- d_instantiate(dentry, inode);
+ write_lock(&entries_lock);
+ d_instantiate(dentry, inode);
list_add(&e->list, &entries);
write_unlock(&entries_lock);
- mntput(mnt);
err = 0;
out2:
dput(dentry);
@@ -630,64 +606,20 @@ static struct file_operations bm_status_operations = {
static struct super_operations s_ops = {
.statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
.clear_inode = bm_clear_inode,
};
static int bm_fill_super(struct super_block * sb, void * data, int silent)
{
- struct qstr names[2] = {{.name = "status"}, {.name = "register"}};
- struct inode * inode;
- struct dentry * dentry[3];
- int i;
-
- for (i=0; i<sizeof(names)/sizeof(names[0]); i++) {
- names[i].len = strlen(names[i].name);
- names[i].hash = full_name_hash(names[i].name, names[i].len);
- }
-
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = 0x42494e4d;
- sb->s_op = &s_ops;
-
- inode = bm_get_inode(sb, S_IFDIR | 0755);
- if (!inode)
- return -ENOMEM;
- inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- dentry[0] = d_alloc_root(inode);
- if (!dentry[0]) {
- iput(inode);
- return -ENOMEM;
- }
- dentry[1] = d_alloc(dentry[0], &names[0]);
- if (!dentry[1])
- goto out1;
- dentry[2] = d_alloc(dentry[0], &names[1]);
- if (!dentry[2])
- goto out2;
- inode = bm_get_inode(sb, S_IFREG | 0644);
- if (!inode)
- goto out3;
- inode->i_fop = &bm_status_operations;
- d_add(dentry[1], inode);
- inode = bm_get_inode(sb, S_IFREG | 0400);
- if (!inode)
- goto out3;
- inode->i_fop = &bm_register_operations;
- d_add(dentry[2], inode);
-
- sb->s_root = dentry[0];
- return 0;
-
-out3:
- dput(dentry[2]);
-out2:
- dput(dentry[1]);
-out1:
- dput(dentry[0]);
- return -ENOMEM;
+ static struct tree_descr bm_files[] = {
+ [1] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO},
+ [2] = {"register", &bm_register_operations, S_IWUSR},
+ /* last one */ {""}
+ };
+ int err = simple_fill_super(sb, 0x42494e4d, bm_files);
+ if (!err)
+ sb->s_op = &s_ops;
+ return err;
}
static struct super_block *bm_get_sb(struct file_system_type *fs_type,
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 595d89d64e7e..7b53dd72cb26 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -437,6 +437,23 @@ void bd_release(struct block_device *bdev)
}
/*
+ * Tries to open block device by device number. Use it ONLY if you
+ * really do not have anything better - i.e. when you are behind a
+ * truly sucky interface and all you are given is a device number. _Never_
+ * to be used for internal purposes. If you ever need it - reconsider
+ * your API.
+ */
+struct block_device *open_by_devnum(dev_t dev, unsigned mode, int kind)
+{
+ struct block_device *bdev = bdget(dev);
+ int err = -ENOMEM;
+ int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
+ if (bdev)
+ err = blkdev_get(bdev, mode, flags, kind);
+ return err ? ERR_PTR(err) : bdev;
+}
+
+/*
* This routine checks whether a removable media has been changed,
* and invalidates all buffer-cache-entries in that case. This
* is a relatively slow routine, so we have to try to minimize using
@@ -518,7 +535,7 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
} else {
struct hd_struct *p;
struct block_device *whole;
- whole = bdget(MKDEV(disk->major, disk->first_minor));
+ whole = bdget_disk(disk, 0);
ret = -ENOMEM;
if (!whole)
goto out_first;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index da7f57d10e87..a4b37b8d9772 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -384,20 +384,16 @@ void ext3_update_dynamic_rev(struct super_block *sb)
static struct block_device *ext3_blkdev_get(dev_t dev)
{
struct block_device *bdev;
- int err = -ENODEV;
char b[BDEVNAME_SIZE];
- bdev = bdget(dev);
- if (bdev == NULL)
- goto fail;
- err = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_FS);
- if (err < 0)
+ bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE, BDEV_FS);
+ if (IS_ERR(bdev))
goto fail;
return bdev;
fail:
- printk(KERN_ERR "EXT3: failed to open journal device %s: %d\n",
- __bdevname(dev, b), err);
+ printk(KERN_ERR "EXT3: failed to open journal device %s: %ld\n",
+ __bdevname(dev, b), PTR_ERR(bdev));
return NULL;
}
diff --git a/fs/inode.c b/fs/inode.c
index 6e9b869fe9d2..9440ffe76b08 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -365,16 +365,6 @@ int __invalidate_device(struct block_device *bdev, int do_sync)
return res;
}
-int invalidate_device(kdev_t dev, int do_sync)
-{
- int res = 0;
- struct block_device *bdev = bdget(kdev_t_to_nr(dev));
- if (bdev)
- res = __invalidate_device(bdev, do_sync);
- bdput(bdev);
- return res;
-}
-
static int can_unuse(struct inode *inode)
{
if (inode->i_state)
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 2d18d1d64875..bd3ca7cf8f3d 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1105,13 +1105,10 @@ int lmLogOpen(struct super_block *sb, struct jfs_log ** logptr)
*/
externalLog:
- if (!(bdev = bdget(JFS_SBI(sb)->logdev))) {
- rc = ENODEV;
- goto free;
- }
-
- if ((rc = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_FS))) {
- rc = -rc;
+ bdev = open_by_devnum(JFS_SBI(sb)->logdev,
+ FMODE_READ|FMODE_WRITE, BDEV_FS);
+ if (IS_ERR(bdev)) {
+ rc = -PTR_ERR(bdev);
goto free;
}
diff --git a/fs/libfs.c b/fs/libfs.c
index ea0942673d3d..860892e92b54 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -4,8 +4,11 @@
*/
#include <linux/pagemap.h>
+#include <linux/mount.h>
#include <linux/vfs.h>
+extern struct vfsmount *do_kern_mount(const char *, int, char *, void *);
+
int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
@@ -332,3 +335,94 @@ int simple_commit_write(struct file *file, struct page *page,
set_page_dirty(page);
return 0;
}
+
+int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
+{
+ static struct super_operations s_ops = {statfs:simple_statfs};
+ struct inode *inode;
+ struct dentry *root;
+ struct dentry *dentry;
+ int i;
+
+ s->s_blocksize = PAGE_CACHE_SIZE;
+ s->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ s->s_magic = magic;
+ s->s_op = &s_ops;
+
+ inode = new_inode(s);
+ if (!inode)
+ return -ENOMEM;
+ inode->i_mode = S_IFDIR | 0755;
+ inode->i_uid = inode->i_gid = 0;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ root = d_alloc_root(inode);
+ if (!root) {
+ iput(inode);
+ return -ENOMEM;
+ }
+ for (i = 0; !files->name || files->name[0]; i++, files++) {
+ struct qstr name;
+ if (!files->name)
+ continue;
+ name.name = files->name;
+ name.len = strlen(name.name);
+ name.hash = full_name_hash(name.name, name.len);
+ dentry = d_alloc(root, &name);
+ if (!dentry)
+ goto out;
+ inode = new_inode(s);
+ if (!inode)
+ goto out;
+ inode->i_mode = S_IFREG | files->mode;
+ inode->i_uid = inode->i_gid = 0;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_fop = files->ops;
+ inode->i_ino = i;
+ d_add(dentry, inode);
+ }
+ s->s_root = root;
+ return 0;
+out:
+ d_genocide(root);
+ dput(root);
+ return -ENOMEM;
+}
+
+static spinlock_t pin_fs_lock = SPIN_LOCK_UNLOCKED;
+
+int simple_pin_fs(char *name, struct vfsmount **mount, int *count)
+{
+ struct vfsmount *mnt = NULL;
+ spin_lock(&pin_fs_lock);
+ if (unlikely(!*mount)) {
+ spin_unlock(&pin_fs_lock);
+ mnt = do_kern_mount(name, 0, name, NULL);
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
+ spin_lock(&pin_fs_lock);
+ if (!*mount)
+ *mount = mnt;
+ }
+ mntget(*mount);
+ ++*count;
+ spin_unlock(&pin_fs_lock);
+ mntput(mnt);
+ return 0;
+}
+
+void simple_release_fs(struct vfsmount **mount, int *count)
+{
+ struct vfsmount *mnt;
+ spin_lock(&pin_fs_lock);
+ mnt = *mount;
+ if (!--*count)
+ *mount = NULL;
+ spin_unlock(&pin_fs_lock);
+ mntput(mnt);
+}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index ce3b5d31ab2b..34413fc2df84 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -48,7 +48,6 @@ enum {
NFSD_List,
NFSD_Fh,
NFSD_Threads,
- NFSD_END
};
/*
@@ -204,22 +203,6 @@ static struct file_operations exports_operations = {
.release = exports_release,
};
-/*
- * Description of fs contents.
- */
-static struct { char *name; struct file_operations *ops; int mode; } files[] = {
- [NFSD_Svc] = {".svc", &transaction_ops, S_IWUSR},
- [NFSD_Add] = {".add", &transaction_ops, S_IWUSR},
- [NFSD_Del] = {".del", &transaction_ops, S_IWUSR},
- [NFSD_Export] = {".export", &transaction_ops, S_IWUSR},
- [NFSD_Unexport] = {".unexport", &transaction_ops, S_IWUSR},
- [NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR},
- [NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR},
- [NFSD_List] = {"exports", &exports_operations, S_IRUGO},
- [NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR},
- [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR},
-};
-
/*----------------------------------------------------------------------------*/
/*
* payload - write methods
@@ -431,64 +414,22 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
* populating the filesystem.
*/
-static struct super_operations s_ops = {
- .statfs = simple_statfs,
-};
-
static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
{
- struct inode *inode;
- struct dentry *root;
- struct dentry *dentry;
- int i;
-
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = 0x6e667364;
- sb->s_op = &s_ops;
-
- inode = new_inode(sb);
- if (!inode)
- return -ENOMEM;
- inode->i_mode = S_IFDIR | 0755;
- inode->i_uid = inode->i_gid = 0;
- inode->i_blksize = PAGE_CACHE_SIZE;
- inode->i_blocks = 0;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- root = d_alloc_root(inode);
- if (!root) {
- iput(inode);
- return -ENOMEM;
- }
- for (i = NFSD_Svc; i < NFSD_END; i++) {
- struct qstr name;
- name.name = files[i].name;
- name.len = strlen(name.name);
- name.hash = full_name_hash(name.name, name.len);
- dentry = d_alloc(root, &name);
- if (!dentry)
- goto out;
- inode = new_inode(sb);
- if (!inode)
- goto out;
- inode->i_mode = S_IFREG | files[i].mode;
- inode->i_uid = inode->i_gid = 0;
- inode->i_blksize = PAGE_CACHE_SIZE;
- inode->i_blocks = 0;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- inode->i_fop = files[i].ops;
- inode->i_ino = i;
- d_add(dentry, inode);
- }
- sb->s_root = root;
- return 0;
-
-out:
- d_genocide(root);
- dput(root);
- return -ENOMEM;
+ static struct tree_descr nfsd_files[] = {
+ [NFSD_Svc] = {".svc", &transaction_ops, S_IWUSR},
+ [NFSD_Add] = {".add", &transaction_ops, S_IWUSR},
+ [NFSD_Del] = {".del", &transaction_ops, S_IWUSR},
+ [NFSD_Export] = {".export", &transaction_ops, S_IWUSR},
+ [NFSD_Unexport] = {".unexport", &transaction_ops, S_IWUSR},
+ [NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR},
+ [NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR},
+ [NFSD_List] = {"exports", &exports_operations, S_IRUGO},
+ [NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR},
+ [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR},
+ /* last one */ {""}
+ };
+ return simple_fill_super(sb, 0x6e667364, nfsd_files);
}
static struct super_block *nfsd_get_sb(struct file_system_type *fs_type,
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index f185f0284dc3..a8530c42211f 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -311,7 +311,7 @@ void register_disk(struct gendisk *disk)
if (!get_capacity(disk))
return;
- bdev = bdget(MKDEV(disk->major, disk->first_minor));
+ bdev = bdget_disk(disk, 0);
if (blkdev_get(bdev, FMODE_READ, 0, BDEV_RAW) < 0)
return;
state = check_partition(disk, bdev);
@@ -336,13 +336,12 @@ void register_disk(struct gendisk *disk)
int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
{
- kdev_t dev = to_kdev_t(bdev->bd_dev);
struct parsed_partitions *state;
int p, res;
if (bdev->bd_part_count)
return -EBUSY;
- res = invalidate_device(dev, 1);
+ res = invalidate_partition(disk, 0);
if (res)
return res;
bdev->bd_invalidated = 0;
@@ -391,18 +390,14 @@ fail:
void del_gendisk(struct gendisk *disk)
{
- int max_p = disk->minors;
- kdev_t devp;
int p;
/* invalidate stuff */
- for (p = max_p - 1; p > 0; p--) {
- devp = mk_kdev(disk->major,disk->first_minor + p);
- invalidate_device(devp, 1);
+ for (p = disk->minors - 1; p > 0; p--) {
+ invalidate_partition(disk, p);
delete_partition(disk, p);
}
- devp = mk_kdev(disk->major,disk->first_minor);
- invalidate_device(devp, 1);
+ invalidate_partition(disk, 0);
disk->capacity = 0;
disk->flags &= ~GENHD_FL_UP;
unlink_gendisk(disk);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 986a877f0bf4..2c6b2708515c 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1911,21 +1911,16 @@ static int journal_init_dev( struct super_block *super,
/* there is no "jdev" option and journal is on separate device */
if( ( !jdev_name || !jdev_name[ 0 ] ) ) {
- journal -> j_dev_bd = bdget(jdev);
- if( journal -> j_dev_bd )
- result = blkdev_get( journal -> j_dev_bd,
- blkdev_mode, 0,
- BDEV_FS );
- else
- result = -ENOMEM;
- if( result != 0 )
+ journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode, BDEV_FS);
+ if (IS_ERR(journal->j_dev_bd)) {
+ result = PTR_ERR(journal->j_dev_bd);
+ journal->j_dev_bd = NULL;
printk( "sh-458: journal_init_dev: cannot init journal device\n '%s': %i",
- bdevname(journal->j_dev_bd, b), result );
-
- else if (jdev != super->s_dev) {
+ __bdevname(jdev, b), result );
+ return result;
+ } else if (jdev != super->s_dev)
set_blocksize(journal->j_dev_bd, super->s_blocksize);
- }
- return result;
+ return 0;
}
journal -> j_dev_file = filp_open( jdev_name, 0, 0 );
diff --git a/include/asm-arm/arch-iop3xx/iop310-irqs.h b/include/asm-arm/arch-iop3xx/iop310-irqs.h
new file mode 100644
index 000000000000..f468a285832f
--- /dev/null
+++ b/include/asm-arm/arch-iop3xx/iop310-irqs.h
@@ -0,0 +1,80 @@
+/*
+ * linux/include/asm-arm/arch-iop310/irqs.h
+ *
+ * Author: Nicolas Pitre
+ * Copyright: (C) 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 06/13/01: Added 80310 on-chip interrupt sources <dsaxena@mvista.com>
+ *
+ */
+#include <linux/config.h>
+
+/*
+ * XS80200 specific IRQs
+ */
+#define IRQ_XS80200_BCU 0 /* Bus Control Unit */
+#define IRQ_XS80200_PMU 1 /* Performance Monitoring Unit */
+#define IRQ_XS80200_EXTIRQ 2 /* external IRQ signal */
+#define IRQ_XS80200_EXTFIQ 3 /* external IRQ signal */
+
+#define NR_XS80200_IRQS 4
+
+#define XSCALE_PMU_IRQ IRQ_XS80200_PMU
+
+/*
+ * IOP80310 chipset interrupts
+ */
+#define IOP310_IRQ_OFS NR_XS80200_IRQS
+#define IOP310_IRQ(x) (IOP310_IRQ_OFS + (x))
+
+/*
+ * On FIQ1ISR register
+ */
+#define IRQ_IOP310_DMA0 IOP310_IRQ(0) /* DMA Channel 0 */
+#define IRQ_IOP310_DMA1 IOP310_IRQ(1) /* DMA Channel 1 */
+#define IRQ_IOP310_DMA2 IOP310_IRQ(2) /* DMA Channel 2 */
+#define IRQ_IOP310_PMON IOP310_IRQ(3) /* Bus performance Unit */
+#define IRQ_IOP310_AAU IOP310_IRQ(4) /* Application Accelator Unit */
+
+/*
+ * On FIQ2ISR register
+ */
+#define IRQ_IOP310_I2C IOP310_IRQ(5) /* I2C unit */
+#define IRQ_IOP310_MU IOP310_IRQ(6) /* messaging unit */
+
+#define NR_IOP310_IRQS (IOP310_IRQ(6) + 1)
+
+#define NR_IRQS NR_IOP310_IRQS
+
+
+/*
+ * Interrupts available on the Cyclone IQ80310 board
+ */
+#ifdef CONFIG_ARCH_IQ80310
+
+#define IQ80310_IRQ_OFS NR_IOP310_IRQS
+#define IQ80310_IRQ(y) ((IQ80310_IRQ_OFS) + (y))
+
+#define IRQ_IQ80310_TIMER IQ80310_IRQ(0) /* Timer Interrupt */
+#define IRQ_IQ80310_I82559 IQ80310_IRQ(1) /* I82559 Ethernet Interrupt */
+#define IRQ_IQ80310_UART1 IQ80310_IRQ(2) /* UART1 Interrupt */
+#define IRQ_IQ80310_UART2 IQ80310_IRQ(3) /* UART2 Interrupt */
+#define IRQ_IQ80310_INTD IQ80310_IRQ(4) /* PCI INTD */
+
+
+/*
+ * ONLY AVAILABLE ON REV F OR NEWER BOARDS!
+ */
+#define IRQ_IQ80310_INTA IQ80310_IRQ(5) /* PCI INTA */
+#define IRQ_IQ80310_INTB IQ80310_IRQ(6) /* PCI INTB */
+#define IRQ_IQ80310_INTC IQ80310_IRQ(7) /* PCI INTC */
+
+#undef NR_IRQS
+#define NR_IRQS (IQ80310_IRQ(7) + 1)
+
+#endif // CONFIG_ARCH_IQ80310
+
diff --git a/include/asm-arm/bugs.h b/include/asm-arm/bugs.h
index 2f7b33b10334..14a4e05eee8c 100644
--- a/include/asm-arm/bugs.h
+++ b/include/asm-arm/bugs.h
@@ -1,7 +1,7 @@
/*
* linux/include/asm-arm/bugs.h
*
- * Copyright (C) 1995 Russell King
+ * Copyright (C) 1995-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,6 +12,6 @@
#include <asm/proc-fns.h>
-#define check_bugs() cpu_check_bugs()
+#define check_bugs() do { } while (0)
#endif
diff --git a/include/asm-arm/cpu-multi26.h b/include/asm-arm/cpu-multi26.h
index fc8d9a215ebc..415ce60471dd 100644
--- a/include/asm-arm/cpu-multi26.h
+++ b/include/asm-arm/cpu-multi26.h
@@ -19,14 +19,12 @@ struct task_struct;
* relies on it.
*/
extern struct processor {
- /* check for any bugs */
- void (*_check_bugs)(void);
/* Set up any processor specifics */
void (*_proc_init)(void);
/* Disable any processor specifics */
void (*_proc_fin)(void);
/* set the MEMC hardware mappings */
- void (*_set_pgd)(pgd_t *pgd);
+ void (*_switch_mm)(pgd_t *pgd);
/* XCHG */
unsigned long (*_xchg_1)(unsigned long x, volatile void *ptr);
unsigned long (*_xchg_4)(unsigned long x, volatile void *ptr);
@@ -36,11 +34,10 @@ extern const struct processor arm2_processor_functions;
extern const struct processor arm250_processor_functions;
extern const struct processor arm3_processor_functions;
-#define cpu_check_bugs() processor._check_bugs()
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_do_idle() do { } while (0)
-#define cpu_switch_mm(pgd,mm) processor._set_pgd(pgd)
+#define cpu_switch_mm(pgd,mm) processor._switch_mm(pgd)
#define cpu_xchg_1(x,ptr) processor._xchg_1(x,ptr)
#define cpu_xchg_4(x,ptr) processor._xchg_4(x,ptr)
diff --git a/include/asm-arm/cpu-multi32.h b/include/asm-arm/cpu-multi32.h
index b34bcec8c3f7..00f936197ad9 100644
--- a/include/asm-arm/cpu-multi32.h
+++ b/include/asm-arm/cpu-multi32.h
@@ -24,10 +24,6 @@ extern struct processor {
*/
void (*_data_abort)(unsigned long pc);
/*
- * check for any bugs
- */
- void (*_check_bugs)(void);
- /*
* Set up any processor specifics
*/
void (*_proc_init)(void);
@@ -46,96 +42,36 @@ extern struct processor {
/*
* Processor architecture specific
*/
- struct { /* CACHE */
- /*
- * flush all caches
- */
- void (*clean_invalidate_all)(void);
- /*
- * flush a specific page or pages
- */
- void (*clean_invalidate_range)(unsigned long address, unsigned long end, int flags);
- } cache;
-
- struct { /* D-cache */
- /*
- * invalidate the specified data range
- */
- void (*invalidate_range)(unsigned long start, unsigned long end);
- /*
- * clean specified data range
- */
- void (*clean_range)(unsigned long start, unsigned long end);
- /*
- * obsolete flush cache entry
- */
- void (*clean_page)(void *virt_page);
- /*
- * clean a virtual address range from the
- * D-cache without flushing the cache.
- */
- void (*clean_entry)(unsigned long start);
- } dcache;
-
- struct { /* I-cache */
- /*
- * invalidate the I-cache for the specified range
- */
- void (*invalidate_range)(unsigned long start, unsigned long end);
- /*
- * invalidate the I-cache for the specified virtual page
- */
- void (*invalidate_page)(void *virt_page);
- } icache;
+ /*
+ * clean a virtual address range from the
+ * D-cache without flushing the cache.
+ */
+ void (*dcache_clean_area)(void *addr, int size);
- struct { /* PageTable */
- /*
- * Set the page table
- */
- void (*set_pgd)(unsigned long pgd_phys, struct mm_struct *mm);
- /*
- * Set a PMD (handling IMP bit 4)
- */
- void (*flush_pmd)(pmd_t *pmdp);
- /*
- * Set a PTE
- */
- void (*set_pte)(pte_t *ptep, pte_t pte);
- } pgtable;
+ /*
+ * Set the page table
+ */
+ void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
+ /*
+ * Set a PTE
+ */
+ void (*set_pte)(pte_t *ptep, pte_t pte);
} processor;
-extern const struct processor arm6_processor_functions;
-extern const struct processor arm7_processor_functions;
-extern const struct processor sa110_processor_functions;
-
-#define cpu_check_bugs() processor._check_bugs()
-#define cpu_proc_init() processor._proc_init()
-#define cpu_proc_fin() processor._proc_fin()
-#define cpu_reset(addr) processor.reset(addr)
-#define cpu_do_idle() processor._do_idle()
-
-#define cpu_cache_clean_invalidate_all() processor.cache.clean_invalidate_all()
-#define cpu_cache_clean_invalidate_range(s,e,f) processor.cache.clean_invalidate_range(s,e,f)
-
-#define cpu_dcache_clean_page(vp) processor.dcache.clean_page(vp)
-#define cpu_dcache_clean_entry(addr) processor.dcache.clean_entry(addr)
-#define cpu_dcache_clean_range(s,e) processor.dcache.clean_range(s,e)
-#define cpu_dcache_invalidate_range(s,e) processor.dcache.invalidate_range(s,e)
-
-#define cpu_icache_invalidate_range(s,e) processor.icache.invalidate_range(s,e)
-#define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp)
-
-#define cpu_set_pgd(pgd,mm) processor.pgtable.set_pgd(pgd,mm)
-#define cpu_flush_pmd(pmdp) processor.pgtable.flush_pmd(pmdp)
-#define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte)
+#define cpu_proc_init() processor._proc_init()
+#define cpu_proc_fin() processor._proc_fin()
+#define cpu_reset(addr) processor.reset(addr)
+#define cpu_do_idle() processor._do_idle()
+#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
+#define cpu_set_pte(ptep, pte) processor.set_pte(ptep, pte)
-#define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm)
+#define cpu_switch_mm(pgd,mm) processor.switch_mm(__virt_to_phys((unsigned long)(pgd)),mm)
#define cpu_get_pgd() \
({ \
unsigned long pg; \
- __asm__("mrc p15, 0, %0, c2, c0, 0" \
- : "=r" (pg)); \
+ __asm__("mrc p15, 0, %0, c2, c0, 0" \
+ : "=r" (pg) : : "cc"); \
pg &= ~0x3fff; \
(pgd_t *)phys_to_virt(pg); \
})
diff --git a/include/asm-arm/cpu-single.h b/include/asm-arm/cpu-single.h
index aa42f706c55f..cc213ad7167f 100644
--- a/include/asm-arm/cpu-single.h
+++ b/include/asm-arm/cpu-single.h
@@ -22,21 +22,12 @@
* function pointers for this lot. Otherwise, we can optimise the
* table away.
*/
-#define cpu_check_bugs __cpu_fn(CPU_NAME,_check_bugs)
#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init)
#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin)
#define cpu_reset __cpu_fn(CPU_NAME,_reset)
#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle)
-#define cpu_cache_clean_invalidate_all __cpu_fn(CPU_NAME,_cache_clean_invalidate_all)
-#define cpu_cache_clean_invalidate_range __cpu_fn(CPU_NAME,_cache_clean_invalidate_range)
-#define cpu_dcache_invalidate_range __cpu_fn(CPU_NAME,_dcache_invalidate_range)
-#define cpu_dcache_clean_range __cpu_fn(CPU_NAME,_dcache_clean_range)
-#define cpu_dcache_clean_page __cpu_fn(CPU_NAME,_dcache_clean_page)
-#define cpu_dcache_clean_entry __cpu_fn(CPU_NAME,_dcache_clean_entry)
-#define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range)
-#define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page)
-#define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd)
-#define cpu_flush_pmd __cpu_fn(CPU_NAME,_flush_pmd)
+#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area)
+#define cpu__switch_mm __cpu_fn(CPU_NAME,_switch_mm)
#define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte)
#ifndef __ASSEMBLY__
@@ -47,36 +38,22 @@
struct mm_struct;
/* declare all the functions as extern */
-extern void cpu_data_abort(unsigned long pc);
-extern void cpu_check_bugs(void);
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
-
-extern void cpu_cache_clean_invalidate_all(void);
-extern void cpu_cache_clean_invalidate_range(unsigned long address, unsigned long end, int flags);
-
-extern void cpu_dcache_invalidate_range(unsigned long start, unsigned long end);
-extern void cpu_dcache_clean_range(unsigned long start, unsigned long end);
-extern void cpu_dcache_clean_page(void *virt_page);
-extern void cpu_dcache_clean_entry(unsigned long address);
-
-extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end);
-extern void cpu_icache_invalidate_page(void *virt_page);
-
-extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm);
-extern void cpu_flush_pmd(pmd_t *pmdp);
+extern void cpu_dcache_clean_area(void *, int);
+extern void cpu__switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte(pte_t *ptep, pte_t pte);
extern volatile void cpu_reset(unsigned long addr);
-#define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm)
+#define cpu_switch_mm(pgd,mm) cpu__switch_mm(__virt_to_phys((unsigned long)(pgd)),mm)
#define cpu_get_pgd() \
({ \
unsigned long pg; \
- __asm__("mrc p15, 0, %0, c2, c0, 0" \
- : "=r" (pg)); \
+ __asm__("mrc p15, 0, %0, c2, c0, 0" \
+ : "=r" (pg) : : "cc"); \
pg &= ~0x3fff; \
(pgd_t *)phys_to_virt(pg); \
})
diff --git a/include/asm-arm/hardirq.h b/include/asm-arm/hardirq.h
index df9f03a8dcf1..a9d82bc0757d 100644
--- a/include/asm-arm/hardirq.h
+++ b/include/asm-arm/hardirq.h
@@ -77,11 +77,14 @@ typedef struct {
#endif
#ifndef CONFIG_SMP
+/*
+ * Some compilers get the use of "%?" wrong in the asm below.
+ */
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
- __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
+ __asm__("bl __do_softirq": : : "lr", "cc");/* out of line */\
preempt_enable_no_resched(); \
} while (0)
diff --git a/include/asm-arm/proc-armv/cache.h b/include/asm-arm/proc-armv/cache.h
index 4bc1a79e72d3..250a69e335ff 100644
--- a/include/asm-arm/proc-armv/cache.h
+++ b/include/asm-arm/proc-armv/cache.h
@@ -11,64 +11,236 @@
#include <asm/glue.h>
/*
+ * Cache Model
+ * ===========
+ */
+#undef _CACHE
+#undef MULTI_CACHE
+
+#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v3
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM720T)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v4
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
+ defined(CONFIG_CPU_ARM1020)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_ARM926T)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE arm926
+# endif
+#endif
+
+#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v4wb
+# endif
+#endif
+
+#if defined(CONFIG_CPU_XSCALE)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE xscale
+# endif
+#endif
+
+#if !defined(_CACHE) && !defined(MULTI_CACHE)
+#error Unknown cache maintainence model
+#endif
+
+/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user.
*/
#define PG_dcache_dirty PG_arch_1
/*
- * Cache handling for 32-bit ARM processors.
+ * MM Cache Management
+ * ===================
+ *
+ * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
+ * implement these methods.
+ *
+ * Start addresses are inclusive and end addresses are exclusive;
+ * start addresses should be rounded down, end addresses up.
+ *
+ * See linux/Documentation/cachetlb.txt for more information.
+ * Please note that the implementation of these, and the required
+ * effects are cache-type (VIVT/VIPT/PIPT) specific.
+ *
+ * flush_cache_kern_all()
+ *
+ * Unconditionally clean and invalidate the entire cache.
*
- * Note that on ARM, we have a more accurate specification than that
- * Linux's "flush". We therefore do not use "flush" here, but instead
- * use:
+ * flush_cache_user_mm(mm)
*
- * clean: the act of pushing dirty cache entries out to memory.
- * invalidate: the act of discarding data held within the cache,
- * whether it is dirty or not.
+ * Clean and invalidate all user space cache entries
+ * before a change of page tables.
+ *
+ * flush_cache_user_range(start, end, flags)
+ *
+ * Clean and invalidate a range of cache entries in the
+ * specified address space before a change of page tables.
+ * - start - user start address (inclusive, page aligned)
+ * - end - user end address (exclusive, page aligned)
+ * - flags - vma->vm_flags field
+ *
+ * coherent_kern_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * DMA Cache Coherency
+ * ===================
+ *
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+
+struct cpu_cache_fns {
+ void (*flush_kern_all)(void);
+ void (*flush_user_all)(void);
+ void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
+
+ void (*coherent_kern_range)(unsigned long, unsigned long);
+ void (*flush_kern_dcache_page)(void *);
+
+ void (*dma_inv_range)(unsigned long, unsigned long);
+ void (*dma_clean_range)(unsigned long, unsigned long);
+ void (*dma_flush_range)(unsigned long, unsigned long);
+};
+
+/*
+ * Select the calling method
*/
+#ifdef MULTI_CACHE
+
+extern struct cpu_cache_fns cpu_cache;
+
+#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
+#define __cpuc_flush_user_all cpu_cache.flush_user_all
+#define __cpuc_flush_user_range cpu_cache.flush_user_range
+#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
+#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
/*
- * Generic I + D cache
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
*/
-#define flush_cache_all() \
- do { \
- cpu_cache_clean_invalidate_all(); \
- } while (0)
-
-/* This is always called for current->mm */
-#define flush_cache_mm(_mm) \
- do { \
- if ((_mm) == current->active_mm) \
- cpu_cache_clean_invalidate_all(); \
- } while (0)
-
-#define flush_cache_range(_vma,_start,_end) \
- do { \
- if ((_vma)->vm_mm == current->active_mm) \
- cpu_cache_clean_invalidate_range((_start), (_end), 1); \
- } while (0)
-
-#define flush_cache_page(_vma,_vmaddr) \
- do { \
- if ((_vma)->vm_mm == current->active_mm) { \
- cpu_cache_clean_invalidate_range((_vmaddr), \
- (_vmaddr) + PAGE_SIZE, \
- ((_vma)->vm_flags & VM_EXEC)); \
- } \
- } while (0)
+#define dmac_inv_range cpu_cache.dma_inv_range
+#define dmac_clean_range cpu_cache.dma_clean_range
+#define dmac_flush_range cpu_cache.dma_flush_range
+
+#else
+
+#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
+#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
+#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
+#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
+#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
+
+extern void __cpuc_flush_kern_all(void);
+extern void __cpuc_flush_user_all(void);
+extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
+extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
+extern void __cpuc_flush_dcache_page(void *);
/*
- * D cache only
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
*/
+#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
+#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
+#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
+
+extern void dmac_inv_range(unsigned long, unsigned long);
+extern void dmac_clean_range(unsigned long, unsigned long);
+extern void dmac_flush_range(unsigned long, unsigned long);
+
+#endif
+
+/*
+ * Convert calls to our calling convention.
+ */
+#define flush_cache_all() __cpuc_flush_kern_all()
+
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+ if (current->active_mm == mm)
+ __cpuc_flush_user_all();
+}
-#define invalidate_dcache_range(_s,_e) cpu_dcache_invalidate_range((_s),(_e))
-#define clean_dcache_range(_s,_e) cpu_dcache_clean_range((_s),(_e))
-#define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0)
+static inline void
+flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ if (current->active_mm == vma->vm_mm)
+ __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
+ vma->vm_flags);
+}
+
+static inline void
+flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
+{
+ if (current->active_mm == vma->vm_mm) {
+ unsigned long addr = user_addr & PAGE_MASK;
+ __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+ }
+}
-#define clean_dcache_area(start,size) \
- cpu_cache_clean_invalidate_range((unsigned long)start, \
- ((unsigned long)start) + size, 0);
+/*
+ * Perform necessary cache operations to ensure that data previously
+ * stored within this range of addresses can be executed by the CPU.
+ */
+#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
+
+/*
+ * Perform necessary cache operations to ensure that the TLB will
+ * see data written in the specified area.
+ */
+#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
/*
* flush_dcache_page is used when the kernel has written to the page
@@ -104,18 +276,3 @@ static inline void flush_dcache_page(struct page *page)
* duplicate cache flushing elsewhere performed by flush_dcache_page().
*/
#define flush_icache_page(vma,page) do { } while (0)
-
-/*
- * I cache coherency stuff.
- *
- * This *is not* just icache. It is to make data written to memory
- * consistent such that instructions fetched from the region are what
- * we expect.
- *
- * This generally means that we have to clean out the Dcache and write
- * buffers, and maybe flush the Icache in the specified range.
- */
-#define flush_icache_range(_s,_e) \
- do { \
- cpu_icache_invalidate_range((_s), (_e)); \
- } while (0)
diff --git a/include/asm-arm/proc-armv/locks.h b/include/asm-arm/proc-armv/locks.h
index d96dede22cc2..13248f903677 100644
--- a/include/asm-arm/proc-armv/locks.h
+++ b/include/asm-arm/proc-armv/locks.h
@@ -67,7 +67,7 @@
" blle " #wake \
: \
: "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
+ : "ip", "lr", "cc", "memory"); \
})
/*
@@ -133,7 +133,7 @@
" bleq " #wake \
: \
: "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
+ : "ip", "lr", "cc", "memory"); \
})
#endif
diff --git a/include/asm-arm/proc-armv/pgalloc.h b/include/asm-arm/proc-armv/pgalloc.h
index 3263c346ccba..0e65ab7362e4 100644
--- a/include/asm-arm/proc-armv/pgalloc.h
+++ b/include/asm-arm/proc-armv/pgalloc.h
@@ -6,6 +6,7 @@
* Page table allocation/freeing primitives for 32-bit ARM processors.
*/
#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
#include "pgtable.h"
/*
@@ -92,7 +93,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
- cpu_flush_pmd(pmdp);
+ flush_pmd_entry(pmdp);
}
static inline void
@@ -105,5 +106,5 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
- cpu_flush_pmd(pmdp);
+ flush_pmd_entry(pmdp);
}
diff --git a/include/asm-arm/proc-armv/pgtable.h b/include/asm-arm/proc-armv/pgtable.h
index 53f2b3da4d16..616d80d69b1f 100644
--- a/include/asm-arm/proc-armv/pgtable.h
+++ b/include/asm-arm/proc-armv/pgtable.h
@@ -51,6 +51,7 @@
#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
#define PMD_SECT_UNCACHED (0)
+#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
@@ -120,14 +121,19 @@
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
-#define set_pmd(pmdp,pmd) do { *pmdp = pmd; cpu_flush_pmd(pmdp); } while (0)
-static inline void pmd_clear(pmd_t *pmdp)
-{
- pmdp[0] = __pmd(0);
- pmdp[1] = __pmd(0);
- cpu_flush_pmd(pmdp);
-}
+#define set_pmd(pmdp,pmd) \
+ do { \
+ *pmdp = pmd; \
+ flush_pmd_entry(pmdp); \
+ } while (0)
+
+#define pmd_clear(pmdp) \
+ do { \
+ pmdp[0] = __pmd(0); \
+ pmdp[1] = __pmd(0); \
+ clean_pmd_entry(pmdp); \
+ } while (0)
static inline pte_t *pmd_page_kernel(pmd_t pmd)
{
diff --git a/include/asm-arm/proc-armv/system.h b/include/asm-arm/proc-armv/system.h
index 1fb05ebbaa68..2983649418db 100644
--- a/include/asm-arm/proc-armv/system.h
+++ b/include/asm-arm/proc-armv/system.h
@@ -15,12 +15,16 @@
#define set_cr(x) \
__asm__ __volatile__( \
"mcr p15, 0, %0, c1, c0, 0 @ set CR" \
- : : "r" (x))
+ : : "r" (x) : "cc")
-#define get_cr(x) \
+#define get_cr() \
+ ({ \
+ unsigned int __val; \
__asm__ __volatile__( \
"mrc p15, 0, %0, c1, c0, 0 @ get CR" \
- : "=r" (x))
+ : "=r" (__val) : : "cc"); \
+ __val; \
+ })
#define CR_M (1 << 0) /* MMU enable */
#define CR_A (1 << 1) /* Alignment abort enable */
@@ -48,16 +52,6 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
#endif
/*
- * Save the current interrupt enable state.
- */
-#define local_save_flags(x) \
- ({ \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_save_flags" \
- : "=r" (x) : : "memory"); \
- })
-
-/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_irq_save(x) \
@@ -70,7 +64,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %1" \
: "=r" (x), "=r" (temp) \
: \
- : "memory"); \
+ : "memory", "cc"); \
})
/*
@@ -85,7 +79,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
- : "memory"); \
+ : "memory", "cc"); \
})
/*
@@ -100,7 +94,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
- : "memory"); \
+ : "memory", "cc"); \
})
/*
@@ -115,7 +109,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
- : "memory"); \
+ : "memory", "cc"); \
})
/*
@@ -130,7 +124,17 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
- : "memory"); \
+ : "memory", "cc"); \
+ })
+
+/*
+ * Save the current interrupt enable state.
+ */
+#define local_save_flags(x) \
+ ({ \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ local_save_flags" \
+ : "=r" (x) : : "memory", "cc"); \
})
/*
@@ -141,7 +145,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
"msr cpsr_c, %0 @ local_irq_restore\n" \
: \
: "r" (x) \
- : "memory")
+ : "memory", "cc")
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
@@ -186,12 +190,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
: "=&r" (ret)
: "r" (x), "r" (ptr)
- : "memory");
+ : "memory", "cc");
break;
case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
: "=&r" (ret)
: "r" (x), "r" (ptr)
- : "memory");
+ : "memory", "cc");
break;
#endif
default: __bad_xchg(ptr, size), ret = 0;
diff --git a/include/asm-arm/proc-armv/tlbflush.h b/include/asm-arm/proc-armv/tlbflush.h
index d063ede9c50f..278c0624c11f 100644
--- a/include/asm-arm/proc-armv/tlbflush.h
+++ b/include/asm-arm/proc-armv/tlbflush.h
@@ -20,6 +20,7 @@
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
+#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
/*
@@ -65,7 +66,7 @@
# define v4_always_flags (-1UL)
#endif
-#define v4wbi_tlb_flags (TLB_WB | \
+#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE)
@@ -84,7 +85,7 @@
# define v4wbi_always_flags (-1UL)
#endif
-#define v4wb_tlb_flags (TLB_WB | \
+#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE)
@@ -287,6 +288,41 @@ static inline void flush_tlb_kernel_page(unsigned long kaddr)
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
}
+/*
+ * flush_pmd_entry
+ *
+ * Flush a PMD entry (word aligned, or double-word aligned) to
+ * RAM if the TLB for the CPU we are running on requires this.
+ * This is typically used when we are creating PMD entries.
+ *
+ * clean_pmd_entry
+ *
+ * Clean (but don't drain the write buffer) if the CPU requires
+ * these operations. This is typically used when we are removing
+ * PMD entries.
+ */
+static inline void flush_pmd_entry(pmd_t *pmd)
+{
+ const unsigned int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_DCLEAN))
+ asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
+ : : "r" (pmd));
+ if (tlb_flag(TLB_WB))
+ asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
+ : : "r" (zero));
+}
+
+static inline void clean_pmd_entry(pmd_t *pmd)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_DCLEAN))
+ asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
+ : : "r" (pmd));
+}
+
#undef tlb_flag
#undef always_tlb_flags
#undef possible_tlb_flags
diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h
index a762a23a7560..a3116e9f3758 100644
--- a/include/asm-arm/procinfo.h
+++ b/include/asm-arm/procinfo.h
@@ -14,6 +14,7 @@
struct cpu_tlb_fns;
struct cpu_user_fns;
+struct cpu_cache_fns;
struct processor;
/*
@@ -37,13 +38,14 @@ struct proc_info_list {
struct processor *proc;
struct cpu_tlb_fns *tlb;
struct cpu_user_fns *user;
+ struct cpu_cache_fns *cache;
};
extern unsigned int elf_hwcap;
#endif /* __ASSEMBLY__ */
-#define PROC_INFO_SZ 44
+#define PROC_INFO_SZ 48
#define HWCAP_SWP 1
#define HWCAP_HALF 2
diff --git a/include/asm-arm/setup.h b/include/asm-arm/setup.h
index f3319e9b4f29..9d92daa4173e 100644
--- a/include/asm-arm/setup.h
+++ b/include/asm-arm/setup.h
@@ -202,4 +202,17 @@ struct meminfo {
extern struct meminfo meminfo;
+/*
+ * Early command line parameters.
+ */
+struct early_params {
+ const char *arg;
+ void (*fn)(char **p);
+};
+
+#define __early_param(name,fn) \
+static struct early_params __early_##fn \
+__attribute__((section("__early_param"), unused)) = \
+ { name, fn }
+
#endif
diff --git a/include/asm-arm/tlb.h b/include/asm-arm/tlb.h
index af703647cefd..f0ab134d99cf 100644
--- a/include/asm-arm/tlb.h
+++ b/include/asm-arm/tlb.h
@@ -18,6 +18,7 @@
#define __ASMARM_TLB_H
#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
/*
* TLB handling. This allows us to remove pages from the page
@@ -26,6 +27,7 @@
struct mmu_gather {
struct mm_struct *mm;
unsigned int freed;
+ unsigned int fullmm;
unsigned int flushes;
unsigned int avoided_flushes;
@@ -41,6 +43,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
tlb->mm = mm;
tlb->freed = 0;
+ tlb->fullmm = full_mm_flush;
return tlb;
}
@@ -68,7 +71,13 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
}
#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
-#define tlb_start_vma(tlb,vma) do { } while (0)
+
+#define tlb_start_vma(tlb,vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+
#define tlb_end_vma(tlb,vma) do { } while (0)
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ce89b15c54a1..572b92e6f443 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1041,6 +1041,7 @@ extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern int blkdev_open(struct inode *, struct file *);
extern int blkdev_close(struct inode *, struct file *);
+extern struct block_device *open_by_devnum(dev_t, unsigned, int);
extern struct file_operations def_blk_fops;
extern struct address_space_operations def_blk_aops;
extern struct file_operations def_chr_fops;
@@ -1104,7 +1105,7 @@ extern int fs_may_remount_ro(struct super_block *);
extern int check_disk_change(struct block_device *);
extern int invalidate_inodes(struct super_block *);
extern int __invalidate_device(struct block_device *, int);
-extern int invalidate_device(kdev_t, int);
+extern int invalidate_partition(struct gendisk *, int);
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end);
unsigned long invalidate_inode_pages(struct address_space *mapping);
@@ -1292,6 +1293,10 @@ extern struct dentry *simple_lookup(struct inode *, struct dentry *);
extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *);
extern struct file_operations simple_dir_operations;
extern struct inode_operations simple_dir_inode_operations;
+struct tree_descr { char *name; struct file_operations *ops; int mode; };
+extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
+extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count);
+extern void simple_release_fs(struct vfsmount **mount, int *count);
#ifdef CONFIG_BLK_DEV_INITRD
extern unsigned int real_root_dev;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 096643e667ae..35e93e35455d 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -367,6 +367,11 @@ extern void blk_register_region(dev_t dev, unsigned long range,
void *data);
extern void blk_unregister_region(dev_t dev, unsigned long range);
+static inline struct block_device *bdget_disk(struct gendisk *disk, int index)
+{
+ return bdget(MKDEV(disk->major, disk->first_minor) + index);
+}
+
#endif
#endif
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 0902e7813348..8c8cff48895d 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -188,7 +188,6 @@ EXPORT_SYMBOL(files_lock);
EXPORT_SYMBOL(check_disk_change);
EXPORT_SYMBOL(invalidate_bdev);
EXPORT_SYMBOL(invalidate_inodes);
-EXPORT_SYMBOL(invalidate_device);
EXPORT_SYMBOL(__invalidate_device);
EXPORT_SYMBOL(invalidate_inode_pages);
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
@@ -209,6 +208,7 @@ EXPORT_SYMBOL(bd_claim);
EXPORT_SYMBOL(bd_release);
EXPORT_SYMBOL(open_bdev_excl);
EXPORT_SYMBOL(close_bdev_excl);
+EXPORT_SYMBOL(open_by_devnum);
EXPORT_SYMBOL(__brelse);
EXPORT_SYMBOL(__bforget);
EXPORT_SYMBOL(ll_rw_block);
@@ -313,6 +313,9 @@ EXPORT_SYMBOL(simple_readpage);
EXPORT_SYMBOL(simple_prepare_write);
EXPORT_SYMBOL(simple_commit_write);
EXPORT_SYMBOL(simple_empty);
+EXPORT_SYMBOL(simple_fill_super);
+EXPORT_SYMBOL(simple_pin_fs);
+EXPORT_SYMBOL(simple_release_fs);
EXPORT_SYMBOL(fd_install);
EXPORT_SYMBOL(put_unused_fd);
EXPORT_SYMBOL(get_sb_bdev);
diff --git a/kernel/suspend.c b/kernel/suspend.c
index f8b76113ecd2..5008bbe76555 100644
--- a/kernel/suspend.c
+++ b/kernel/suspend.c
@@ -1161,15 +1161,14 @@ static int read_suspend_image(const char * specialfile, int noresume)
struct block_device *bdev;
printk("Resuming from device %s\n",
__bdevname(resume_device, b));
- bdev = bdget(resume_device);
- if (!bdev) {
- printk("No such block device ?!\n");
- BUG();
+ bdev = open_by_devnum(resume_device, FMODE_READ, BDEV_RAW);
+ if (IS_ERR(bdev)) {
+ error = PTR_ERR(bdev);
+ } else {
+ set_blocksize(bdev, PAGE_SIZE);
+ error = __read_suspend_image(bdev, cur, noresume);
+ blkdev_put(bdev, BDEV_RAW);
}
- blkdev_get(bdev, FMODE_READ, O_RDONLY, BDEV_RAW);
- set_blocksize(bdev, PAGE_SIZE);
- error = __read_suspend_image(bdev, cur, noresume);
- blkdev_put(bdev, BDEV_RAW);
} else error = -ENOMEM;
if (scratch_page)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0b115e9076e3..879fbc8cdf73 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -29,7 +29,6 @@
#include <linux/sunrpc/rpc_pipe_fs.h>
static struct vfsmount *rpc_mount;
-static spinlock_t rpc_mount_lock = SPIN_LOCK_UNLOCKED;
static int rpc_mount_count;
static struct file_system_type rpc_pipe_fs_type;
@@ -379,46 +378,13 @@ static struct rpc_filelist authfiles[] = {
static int
rpc_get_mount(void)
{
- struct vfsmount * mnt = NULL;
-
- spin_lock(&rpc_mount_lock);
- if (rpc_mount)
- goto out_get;
- spin_unlock(&rpc_mount_lock);
- mnt = kern_mount(&rpc_pipe_fs_type);
- if (IS_ERR(mnt))
- return -ENODEV;
- spin_lock(&rpc_mount_lock);
- if (!rpc_mount) {
- rpc_mount = mnt;
- mnt = NULL;
- goto out_dontget;
- }
-out_get:
- mntget(rpc_mount);
-out_dontget:
- ++rpc_mount_count;
- spin_unlock(&rpc_mount_lock);
- if (mnt)
- mntput(mnt);
- return 0;
+ return simple_pin_fs("rpc_pipefs", &rpc_mount, &rpc_mount_count);
}
static void
rpc_put_mount(void)
{
- struct vfsmount *mnt;
-
- spin_lock(&rpc_mount_lock);
- mnt = rpc_mount;
- --rpc_mount_count;
- if (rpc_mount_count == 0)
- rpc_mount = NULL;
- else
- mnt = NULL;
- spin_unlock(&rpc_mount_lock);
- if (mnt)
- mntput(mnt);
+ simple_release_fs(&rpc_mount, &rpc_mount_count);
}
static int