summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@shinybook.infradead.org>2004-07-15 20:32:25 +0100
committerDavid Woodhouse <dwmw2@shinybook.infradead.org>2004-07-15 20:32:25 +0100
commit67d4878e4e61ae4f73855ecc73e9e4d4fc18f2fb (patch)
tree5ea3e25e5d8c1c5ce16e09921e5ff764b17a64c7 /include
parent7df80b4c8964e427f9eae9ac0b54484aa7eddd7a (diff)
NOR flash drivers update
- Handle cached access to flash chips on supporting platforms - Handle arrangements of chips larger than a single bus width - Clean up the AMD/Fujitsu chip driver - Update board 'mapping' drivers to match - New mapping drivers for new platforms.
Diffstat (limited to 'include')
-rw-r--r--include/linux/mtd/cfi.h511
-rw-r--r--include/linux/mtd/flashchip.h15
-rw-r--r--include/linux/mtd/gen_probe.h6
-rw-r--r--include/linux/mtd/map.h364
-rw-r--r--include/linux/mtd/physmap.h61
5 files changed, 574 insertions, 383 deletions
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index fa32bf254d95..12781628ccfa 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -1,7 +1,7 @@
/* Common Flash Interface structures
* See http://support.intel.com/design/flash/technote/index.htm
- * $Id: cfi.h,v 1.35 2003/05/28 15:37:32 dwmw2 Exp $
+ * $Id: cfi.h,v 1.44 2004/07/13 22:32:52 dwmw2 Exp $
*/
#ifndef __MTD_CFI_H__
@@ -13,200 +13,74 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/mtd/flashchip.h>
+#include <linux/mtd/map.h>
#include <linux/mtd/cfi_endian.h>
-/*
- * You can optimize the code size and performance by defining only
- * the geometry(ies) available on your hardware.
- * CFIDEV_INTERLEAVE_n, where represents the interleave (number of chips to fill the bus width)
- * CFIDEV_BUSWIDTH_n, where n is the bus width in bytes (1, 2, 4 or 8 bytes)
- *
- * By default, all (known) geometries are supported.
- */
-
-#ifndef CONFIG_MTD_CFI_GEOMETRY
-
-/* The default case - support all but 64-bit, which has
- a performance penalty */
-
-#define CFIDEV_INTERLEAVE_1 (1)
-#define CFIDEV_INTERLEAVE_2 (2)
-#define CFIDEV_INTERLEAVE_4 (4)
-
-#define CFIDEV_BUSWIDTH_1 (1)
-#define CFIDEV_BUSWIDTH_2 (2)
-#define CFIDEV_BUSWIDTH_4 (4)
-
-typedef __u32 cfi_word;
-
-#else
-
-/* Explicitly configured buswidth/interleave support */
-
#ifdef CONFIG_MTD_CFI_I1
-#define CFIDEV_INTERLEAVE_1 (1)
-#endif
-#ifdef CONFIG_MTD_CFI_I2
-#define CFIDEV_INTERLEAVE_2 (2)
-#endif
-#ifdef CONFIG_MTD_CFI_I4
-#define CFIDEV_INTERLEAVE_4 (4)
-#endif
-#ifdef CONFIG_MTD_CFI_I8
-#define CFIDEV_INTERLEAVE_8 (8)
-#endif
-
-#ifdef CONFIG_MTD_CFI_B1
-#define CFIDEV_BUSWIDTH_1 (1)
-#endif
-#ifdef CONFIG_MTD_CFI_B2
-#define CFIDEV_BUSWIDTH_2 (2)
-#endif
-#ifdef CONFIG_MTD_CFI_B4
-#define CFIDEV_BUSWIDTH_4 (4)
-#endif
-#ifdef CONFIG_MTD_CFI_B8
-#define CFIDEV_BUSWIDTH_8 (8)
-#endif
-
-/* pick the largest necessary */
-#ifdef CONFIG_MTD_CFI_B8
-typedef __u64 cfi_word;
-
-/* This only works if asm/io.h is included first */
-#ifndef __raw_readll
-#define __raw_readll(addr) (*(volatile __u64 *)(addr))
-#endif
-#ifndef __raw_writell
-#define __raw_writell(v, addr) (*(volatile __u64 *)(addr) = (v))
-#endif
-#define CFI_WORD_64
-#else /* CONFIG_MTD_CFI_B8 */
-/* All others can use 32-bits. It's probably more efficient than
- the smaller types anyway */
-typedef __u32 cfi_word;
-#endif /* CONFIG_MTD_CFI_B8 */
-
-#endif
-
-/*
- * The following macros are used to select the code to execute:
- * cfi_buswidth_is_*()
- * cfi_interleave_is_*()
- * [where * is either 1, 2, 4, or 8]
- * Those macros should be used with 'if' statements. If only one of few
- * geometry arrangements are selected, they expand to constants thus allowing
- * the compiler (most of them being 0) to optimize away all the unneeded code,
- * while still validating the syntax (which is not possible with embedded
- * #if ... #endif constructs).
- * The exception to this is the 64-bit versions, which need an extension
- * to the cfi_word type, and cause compiler warnings about shifts being
- * out of range.
- */
-
-#ifdef CFIDEV_INTERLEAVE_1
-# ifdef CFIDEV_INTERLEAVE
-# undef CFIDEV_INTERLEAVE
-# define CFIDEV_INTERLEAVE (cfi->interleave)
-# else
-# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_1
-# endif
-# define cfi_interleave_is_1() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_1)
+#define cfi_interleave(cfi) 1
+#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
#else
-# define cfi_interleave_is_1() (0)
+#define cfi_interleave_is_1(cfi) (0)
#endif
-#ifdef CFIDEV_INTERLEAVE_2
-# ifdef CFIDEV_INTERLEAVE
-# undef CFIDEV_INTERLEAVE
-# define CFIDEV_INTERLEAVE (cfi->interleave)
+#ifdef CONFIG_MTD_CFI_I2
+# ifdef cfi_interleave
+# undef cfi_interleave
+# define cfi_interleave(cfi) ((cfi)->interleave)
# else
-# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_2
+# define cfi_interleave(cfi) 2
# endif
-# define cfi_interleave_is_2() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_2)
+#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
#else
-# define cfi_interleave_is_2() (0)
+#define cfi_interleave_is_2(cfi) (0)
#endif
-#ifdef CFIDEV_INTERLEAVE_4
-# ifdef CFIDEV_INTERLEAVE
-# undef CFIDEV_INTERLEAVE
-# define CFIDEV_INTERLEAVE (cfi->interleave)
+#ifdef CONFIG_MTD_CFI_I4
+# ifdef cfi_interleave
+# undef cfi_interleave
+# define cfi_interleave(cfi) ((cfi)->interleave)
# else
-# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_4
+# define cfi_interleave(cfi) 4
# endif
-# define cfi_interleave_is_4() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_4)
+#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
#else
-# define cfi_interleave_is_4() (0)
+#define cfi_interleave_is_4(cfi) (0)
#endif
-#ifdef CFIDEV_INTERLEAVE_8
-# ifdef CFIDEV_INTERLEAVE
-# undef CFIDEV_INTERLEAVE
-# define CFIDEV_INTERLEAVE (cfi->interleave)
+#ifdef CONFIG_MTD_CFI_I8
+# ifdef cfi_interleave
+# undef cfi_interleave
+# define cfi_interleave(cfi) ((cfi)->interleave)
# else
-# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_8
+# define cfi_interleave(cfi) 8
# endif
-# define cfi_interleave_is_8() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_8)
+#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
#else
-# define cfi_interleave_is_8() (0)
+#define cfi_interleave_is_8(cfi) (0)
#endif
-#ifndef CFIDEV_INTERLEAVE
-#error You must define at least one interleave to support!
+static inline int cfi_interleave_supported(int i)
+{
+ switch (i) {
+#ifdef CONFIG_MTD_CFI_I1
+ case 1:
#endif
-
-#ifdef CFIDEV_BUSWIDTH_1
-# ifdef CFIDEV_BUSWIDTH
-# undef CFIDEV_BUSWIDTH
-# define CFIDEV_BUSWIDTH (map->buswidth)
-# else
-# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_1
-# endif
-# define cfi_buswidth_is_1() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_1)
-#else
-# define cfi_buswidth_is_1() (0)
+#ifdef CONFIG_MTD_CFI_I2
+ case 2:
#endif
-
-#ifdef CFIDEV_BUSWIDTH_2
-# ifdef CFIDEV_BUSWIDTH
-# undef CFIDEV_BUSWIDTH
-# define CFIDEV_BUSWIDTH (map->buswidth)
-# else
-# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_2
-# endif
-# define cfi_buswidth_is_2() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_2)
-#else
-# define cfi_buswidth_is_2() (0)
+#ifdef CONFIG_MTD_CFI_I4
+ case 4:
#endif
-
-#ifdef CFIDEV_BUSWIDTH_4
-# ifdef CFIDEV_BUSWIDTH
-# undef CFIDEV_BUSWIDTH
-# define CFIDEV_BUSWIDTH (map->buswidth)
-# else
-# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_4
-# endif
-# define cfi_buswidth_is_4() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_4)
-#else
-# define cfi_buswidth_is_4() (0)
+#ifdef CONFIG_MTD_CFI_I8
+ case 8:
#endif
+ return 1;
-#ifdef CFIDEV_BUSWIDTH_8
-# ifdef CFIDEV_BUSWIDTH
-# undef CFIDEV_BUSWIDTH
-# define CFIDEV_BUSWIDTH (map->buswidth)
-# else
-# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_8
-# endif
-# define cfi_buswidth_is_8() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_8)
-#else
-# define cfi_buswidth_is_8() (0)
-#endif
+ default:
+ return 0;
+ }
+}
-#ifndef CFIDEV_BUSWIDTH
-#error You must define at least one bus width to support!
-#endif
/* NB: these values must represents the number of bytes needed to meet the
* device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
@@ -223,64 +97,84 @@ typedef __u32 cfi_word;
/* Basic Query Structure */
struct cfi_ident {
- __u8 qry[3];
- __u16 P_ID;
- __u16 P_ADR;
- __u16 A_ID;
- __u16 A_ADR;
- __u8 VccMin;
- __u8 VccMax;
- __u8 VppMin;
- __u8 VppMax;
- __u8 WordWriteTimeoutTyp;
- __u8 BufWriteTimeoutTyp;
- __u8 BlockEraseTimeoutTyp;
- __u8 ChipEraseTimeoutTyp;
- __u8 WordWriteTimeoutMax;
- __u8 BufWriteTimeoutMax;
- __u8 BlockEraseTimeoutMax;
- __u8 ChipEraseTimeoutMax;
- __u8 DevSize;
- __u16 InterfaceDesc;
- __u16 MaxBufWriteSize;
- __u8 NumEraseRegions;
- __u32 EraseRegionInfo[0]; /* Not host ordered */
+ uint8_t qry[3];
+ uint16_t P_ID;
+ uint16_t P_ADR;
+ uint16_t A_ID;
+ uint16_t A_ADR;
+ uint8_t VccMin;
+ uint8_t VccMax;
+ uint8_t VppMin;
+ uint8_t VppMax;
+ uint8_t WordWriteTimeoutTyp;
+ uint8_t BufWriteTimeoutTyp;
+ uint8_t BlockEraseTimeoutTyp;
+ uint8_t ChipEraseTimeoutTyp;
+ uint8_t WordWriteTimeoutMax;
+ uint8_t BufWriteTimeoutMax;
+ uint8_t BlockEraseTimeoutMax;
+ uint8_t ChipEraseTimeoutMax;
+ uint8_t DevSize;
+ uint16_t InterfaceDesc;
+ uint16_t MaxBufWriteSize;
+ uint8_t NumEraseRegions;
+ uint32_t EraseRegionInfo[0]; /* Not host ordered */
} __attribute__((packed));
/* Extended Query Structure for both PRI and ALT */
struct cfi_extquery {
- __u8 pri[3];
- __u8 MajorVersion;
- __u8 MinorVersion;
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
} __attribute__((packed));
/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
struct cfi_pri_intelext {
- __u8 pri[3];
- __u8 MajorVersion;
- __u8 MinorVersion;
- __u32 FeatureSupport;
- __u8 SuspendCmdSupport;
- __u16 BlkStatusRegMask;
- __u8 VccOptimal;
- __u8 VppOptimal;
- __u8 NumProtectionFields;
- __u16 ProtRegAddr;
- __u8 FactProtRegSize;
- __u8 UserProtRegSize;
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
+ uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
+ block follows - FIXME - not currently supported */
+ uint8_t SuspendCmdSupport;
+ uint16_t BlkStatusRegMask;
+ uint8_t VccOptimal;
+ uint8_t VppOptimal;
+ uint8_t NumProtectionFields;
+ uint16_t ProtRegAddr;
+ uint8_t FactProtRegSize;
+ uint8_t UserProtRegSize;
+} __attribute__((packed));
+
+/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
+
+struct cfi_pri_amdstd {
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
+ uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
+ uint8_t EraseSuspend;
+ uint8_t BlkProt;
+ uint8_t TmpBlkUnprotect;
+ uint8_t BlkProtUnprot;
+ uint8_t SimultaneousOps;
+ uint8_t BurstMode;
+ uint8_t PageMode;
+ uint8_t VppMin;
+ uint8_t VppMax;
+ uint8_t TopBottom;
} __attribute__((packed));
struct cfi_pri_query {
- __u8 NumFields;
- __u32 ProtField[1]; /* Not host ordered */
+ uint8_t NumFields;
+ uint32_t ProtField[1]; /* Not host ordered */
} __attribute__((packed));
struct cfi_bri_query {
- __u8 PageModeReadCap;
- __u8 NumFields;
- __u32 ConfField[1]; /* Not host ordered */
+ uint8_t PageModeReadCap;
+ uint8_t NumFields;
+ uint32_t ConfField[1]; /* Not host ordered */
} __attribute__((packed));
#define P_ID_NONE 0
@@ -288,8 +182,10 @@ struct cfi_bri_query {
#define P_ID_AMD_STD 2
#define P_ID_INTEL_STD 3
#define P_ID_AMD_EXT 4
+#define P_ID_ST_ADV 32
#define P_ID_MITSUBISHI_STD 256
#define P_ID_MITSUBISHI_EXT 257
+#define P_ID_SST_PAGE 258
#define P_ID_RESERVED 65535
@@ -297,14 +193,13 @@ struct cfi_bri_query {
#define CFI_MODE_JEDEC 0
struct cfi_private {
- __u16 cmdset;
+ uint16_t cmdset;
void *cmdset_priv;
int interleave;
int device_type;
int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
int addr_unlock1;
int addr_unlock2;
- int fast_prog;
struct mtd_info *(*cmdset_setup)(struct map_info *);
struct cfi_ident *cfiq; /* For now only one. We insist that all devs
must be of the same type. */
@@ -315,107 +210,81 @@ struct cfi_private {
struct flchip chips[0]; /* per-chip data structure for each chip */
};
-#define MAX_CFI_CHIPS 8 /* Entirely arbitrary to avoid realloc() */
-
/*
* Returns the command address according to the given geometry.
*/
-static inline __u32 cfi_build_cmd_addr(__u32 cmd_ofs, int interleave, int type)
+static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
{
return (cmd_ofs * type) * interleave;
}
/*
- * Transforms the CFI command for the given geometry (bus width & interleave.
+ * Transforms the CFI command for the given geometry (bus width & interleave).
+ * It looks too long to be inline, but in the common case it should almost all
+ * get optimised away.
*/
-static inline cfi_word cfi_build_cmd(u_char cmd, struct map_info *map, struct cfi_private *cfi)
+static inline map_word cfi_build_cmd(u_char cmd, struct map_info *map, struct cfi_private *cfi)
{
- cfi_word val = 0;
-
- if (cfi_buswidth_is_1()) {
- /* 1 x8 device */
- val = cmd;
- } else if (cfi_buswidth_is_2()) {
- if (cfi_interleave_is_1()) {
- /* 1 x16 device in x16 mode */
- val = cpu_to_cfi16(cmd);
- } else if (cfi_interleave_is_2()) {
- /* 2 (x8, x16 or x32) devices in x8 mode */
- val = cpu_to_cfi16((cmd << 8) | cmd);
- }
- } else if (cfi_buswidth_is_4()) {
- if (cfi_interleave_is_1()) {
- /* 1 x32 device in x32 mode */
- val = cpu_to_cfi32(cmd);
- } else if (cfi_interleave_is_2()) {
- /* 2 x16 device in x16 mode */
- val = cpu_to_cfi32((cmd << 16) | cmd);
- } else if (cfi_interleave_is_4()) {
- /* 4 (x8, x16 or x32) devices in x8 mode */
- val = (cmd << 16) | cmd;
- val = cpu_to_cfi32((val << 8) | val);
- }
-#ifdef CFI_WORD_64
- } else if (cfi_buswidth_is_8()) {
- if (cfi_interleave_is_1()) {
- /* 1 x64 device in x64 mode */
- val = cpu_to_cfi64(cmd);
- } else if (cfi_interleave_is_2()) {
- /* 2 x32 device in x32 mode */
- val = cmd;
- val = cpu_to_cfi64((val << 32) | val);
- } else if (cfi_interleave_is_4()) {
- /* 4 (x16, x32 or x64) devices in x16 mode */
- val = (cmd << 16) | cmd;
- val = cpu_to_cfi64((val << 32) | val);
- } else if (cfi_interleave_is_8()) {
- /* 8 (x8, x16 or x32) devices in x8 mode */
- val = (cmd << 8) | cmd;
- val = (val << 16) | val;
- val = (val << 32) | val;
- val = cpu_to_cfi64(val);
- }
-#endif /* CFI_WORD_64 */
- }
- return val;
-}
-#define CMD(x) cfi_build_cmd((x), map, cfi)
-
-/*
- * Read a value according to the bus width.
- */
-
-static inline cfi_word cfi_read(struct map_info *map, __u32 addr)
-{
- if (cfi_buswidth_is_1()) {
- return map_read8(map, addr);
- } else if (cfi_buswidth_is_2()) {
- return map_read16(map, addr);
- } else if (cfi_buswidth_is_4()) {
- return map_read32(map, addr);
- } else if (cfi_buswidth_is_8()) {
- return map_read64(map, addr);
+ map_word val = { {0} };
+ int wordwidth, words_per_bus, chip_mode, chips_per_word;
+ unsigned long onecmd;
+ int i;
+
+ /* We do it this way to give the compiler a fighting chance
+ of optimising away all the crap for 'bankwidth' larger than
+ an unsigned long, in the common case where that support is
+ disabled */
+ if (map_bankwidth_is_large(map)) {
+ wordwidth = sizeof(unsigned long);
+ words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
} else {
- return 0;
+ wordwidth = map_bankwidth(map);
+ words_per_bus = 1;
+ }
+
+ chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
+ chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
+
+ /* First, determine what the bit-pattern should be for a single
+ device, according to chip mode and endianness... */
+ switch (chip_mode) {
+ default: BUG();
+ case 1:
+ onecmd = cmd;
+ break;
+ case 2:
+ onecmd = cpu_to_cfi16(cmd);
+ break;
+ case 4:
+ onecmd = cpu_to_cfi32(cmd);
+ break;
}
-}
-/*
- * Write a value according to the bus width.
- */
+ /* Now replicate it across the size of an unsigned long, or
+ just to the bus width as appropriate */
+ switch (chips_per_word) {
+ default: BUG();
+#if BITS_PER_LONG >= 64
+ case 8:
+ onecmd |= (onecmd << (chip_mode * 32));
+#endif
+ case 4:
+ onecmd |= (onecmd << (chip_mode * 16));
+ case 2:
+ onecmd |= (onecmd << (chip_mode * 8));
+ case 1:
+ ;
+ }
-static inline void cfi_write(struct map_info *map, cfi_word val, __u32 addr)
-{
- if (cfi_buswidth_is_1()) {
- map_write8(map, val, addr);
- } else if (cfi_buswidth_is_2()) {
- map_write16(map, val, addr);
- } else if (cfi_buswidth_is_4()) {
- map_write32(map, val, addr);
- } else if (cfi_buswidth_is_8()) {
- map_write64(map, val, addr);
+ /* And finally, for the multi-word case, replicate it
+ in all words in the structure */
+ for (i=0; i < words_per_bus; i++) {
+ val.x[i] = onecmd;
}
+
+ return val;
}
+#define CMD(x) cfi_build_cmd((x), map, cfi)
/*
* Sends a CFI command to a bank of flash for the given geometry.
@@ -424,35 +293,36 @@ static inline void cfi_write(struct map_info *map, cfi_word val, __u32 addr)
* If prev_val is non-null, it will be set to the value at the command address,
* before the command was written.
*/
-static inline __u32 cfi_send_gen_cmd(u_char cmd, __u32 cmd_addr, __u32 base,
+static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
struct map_info *map, struct cfi_private *cfi,
- int type, cfi_word *prev_val)
+ int type, map_word *prev_val)
{
- cfi_word val;
- __u32 addr = base + cfi_build_cmd_addr(cmd_addr, CFIDEV_INTERLEAVE, type);
+ map_word val;
+ uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
val = cfi_build_cmd(cmd, map, cfi);
if (prev_val)
- *prev_val = cfi_read(map, addr);
+ *prev_val = map_read(map, addr);
- cfi_write(map, val, addr);
+ map_write(map, val, addr);
return addr - base;
}
-static inline __u8 cfi_read_query(struct map_info *map, __u32 addr)
+static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
- if (cfi_buswidth_is_1()) {
- return map_read8(map, addr);
- } else if (cfi_buswidth_is_2()) {
- return cfi16_to_cpu(map_read16(map, addr));
- } else if (cfi_buswidth_is_4()) {
- return cfi32_to_cpu(map_read32(map, addr));
- } else if (cfi_buswidth_is_8()) {
- return cfi64_to_cpu(map_read64(map, addr));
+ map_word val = map_read(map, addr);
+
+ if (map_bankwidth_is_1(map)) {
+ return val.x[0];
+ } else if (map_bankwidth_is_2(map)) {
+ return cfi16_to_cpu(val.x[0]);
} else {
- return 0;
+ /* No point in a 64-bit byteswap since that would just be
+ swapping the responses from different chips, and we are
+ only interested in one chip (a representative sample) */
+ return cfi32_to_cpu(val.x[0]);
}
}
@@ -480,4 +350,19 @@ static inline void cfi_spin_unlock(spinlock_t *mutex)
spin_unlock_bh(mutex);
}
+struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
+ const char* name);
+
+struct cfi_fixup {
+ uint16_t mfr;
+ uint16_t id;
+ void (*fixup)(struct map_info *map, void* param);
+ void* param;
+};
+
+#define CFI_MFR_ANY 0xffff
+#define CFI_ID_ANY 0xffff
+
+void cfi_fixup(struct map_info *map, struct cfi_fixup* fixups);
+
#endif /* __MTD_CFI_H__ */
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index 7e042bf5fd0b..c3ac4df7273f 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -6,7 +6,7 @@
*
* (C) 2000 Red Hat. GPLd.
*
- * $Id: flashchip.h,v 1.9 2003/04/30 11:15:22 dwmw2 Exp $
+ * $Id: flashchip.h,v 1.14 2004/06/15 16:44:59 nico Exp $
*
*/
@@ -43,7 +43,8 @@ typedef enum {
/* NOTE: confusingly, this can be used to refer to more than one chip at a time,
- if they're interleaved. */
+ if they're interleaved. This can even refer to individual partitions on
+ the same physical chip when present. */
struct flchip {
unsigned long start; /* Offset within the map */
@@ -61,6 +62,7 @@ struct flchip {
int write_suspended:1;
int erase_suspended:1;
+ unsigned long in_progress_block_addr;
spinlock_t *mutex;
spinlock_t _spinlock; /* We do it like this because sometimes they'll be shared. */
@@ -69,8 +71,17 @@ struct flchip {
int word_write_time;
int buffer_write_time;
int erase_time;
+
+ void *priv;
};
+/* This is used to handle contention on write/erase operations
+ between partitions of the same physical chip. */
+struct flchip_shared {
+ spinlock_t lock;
+ struct flchip *writing;
+ struct flchip *erasing;
+};
#endif /* __MTD_FLASHCHIP_H__ */
diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h
index 43c3a08a4032..2d66b33f805d 100644
--- a/include/linux/mtd/gen_probe.h
+++ b/include/linux/mtd/gen_probe.h
@@ -1,7 +1,7 @@
/*
* (C) 2001, 2001 Red Hat, Inc.
* GPL'd
- * $Id: gen_probe.h,v 1.1 2001/09/02 18:50:13 dwmw2 Exp $
+ * $Id: gen_probe.h,v 1.2 2003/11/08 00:51:21 dsaxena Exp $
*/
#ifndef __LINUX_MTD_GEN_PROBE_H__
@@ -10,12 +10,12 @@
#include <linux/mtd/flashchip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
+#include <asm/bitops.h>
struct chip_probe {
char *name;
int (*probe_chip)(struct map_info *map, __u32 base,
- struct flchip *chips, struct cfi_private *cfi);
-
+ unsigned long *chip_map, struct cfi_private *cfi);
};
struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 0c933f9bf1fe..ed41daec7cc6 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -1,6 +1,6 @@
/* Overhauled routines for dealing with different mmap regions of flash */
-/* $Id: map.h,v 1.34 2003/05/28 12:42:22 dwmw2 Exp $ */
+/* $Id: map.h,v 1.43 2004/07/14 13:30:27 dwmw2 Exp $ */
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__
@@ -8,17 +8,163 @@
#include <linux/config.h>
#include <linux/types.h>
#include <linux/list.h>
+#include <asm/unaligned.h>
#include <asm/system.h>
#include <asm/io.h>
+#include <asm/bug.h>
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
+#define map_bankwidth(map) 1
+#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
+#define map_bankwidth_is_large(map) (0)
+#define map_words(map) (1)
+#define MAX_MAP_BANKWIDTH 1
+#else
+#define map_bankwidth_is_1(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# else
+# define map_bankwidth(map) 2
+# define map_bankwidth_is_large(map) (0)
+# define map_words(map) (1)
+# endif
+#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 2
+#else
+#define map_bankwidth_is_2(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# else
+# define map_bankwidth(map) 4
+# define map_bankwidth_is_large(map) (0)
+# define map_words(map) (1)
+# endif
+#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 4
+#else
+#define map_bankwidth_is_4(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# if BITS_PER_LONG < 64
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
+# endif
+# else
+# define map_bankwidth(map) 8
+# define map_bankwidth_is_large(map) (BITS_PER_LONG < 64)
+# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
+# endif
+#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 8
+#else
+#define map_bankwidth_is_8(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
+# else
+# define map_bankwidth(map) 16
+# define map_bankwidth_is_large(map) (1)
+# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
+# endif
+#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 16
+#else
+#define map_bankwidth_is_16(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
+# else
+# define map_bankwidth(map) 32
+# define map_bankwidth_is_large(map) (1)
+# define map_words(map) (map_bankwidth(map) / sizeof(unsigned long))
+# endif
+#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 32
+#else
+#define map_bankwidth_is_32(map) (0)
+#endif
+
+#ifndef map_bankwidth
+#error "No bus width supported. What's the point?"
+#endif
+
+static inline int map_bankwidth_supported(int w)
+{
+ switch (w) {
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
+ case 1:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
+ case 2:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
+ case 4:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
+ case 8:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
+ case 16:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
+ case 32:
+#endif
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+#define MAX_MAP_LONGS ( ((MAX_MAP_BANKWIDTH*8) + BITS_PER_LONG - 1) / BITS_PER_LONG )
+
+typedef union {
+ unsigned long x[MAX_MAP_LONGS];
+} map_word;
/* The map stuff is very simple. You fill in your struct map_info with
a handful of routines for accessing the device, making sure they handle
paging etc. correctly if your device needs it. Then you pass it off
- to a chip driver which deals with a mapped device - generally either
- do_cfi_probe() or do_ram_probe(), either of which will return a
- struct mtd_info if they liked what they saw. At which point, you
- fill in the mtd->module with your own module address, and register
- it.
+ to a chip probe routine -- either JEDEC or CFI probe or both -- via
+ do_map_probe(). If a chip is recognised, the probe code will invoke the
+ appropriate chip driver (if present) and return a struct mtd_info.
+ At which point, you fill in the mtd->module with your own module
+ address, and register it with the MTD core code. Or you could partition
+ it and register the partitions instead, or keep it for your own private
+ use; whatever.
The mtd->priv field will point to the struct map_info, and any further
private data required by the chip driver is linked from the
@@ -36,28 +182,29 @@ struct map_info {
unsigned long virt;
void *cached;
- int buswidth; /* in octets */
+ int bankwidth; /* in octets. This isn't necessarily the width
+ of actual bus cycles -- it's the repeat interval
+ in bytes, before you are talking to the first chip again.
+ */
#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
- u8 (*read8)(struct map_info *, unsigned long);
- u16 (*read16)(struct map_info *, unsigned long);
- u32 (*read32)(struct map_info *, unsigned long);
- u64 (*read64)(struct map_info *, unsigned long);
- /* If it returned a 'long' I'd call it readl.
- * It doesn't.
- * I won't.
- * dwmw2 */
-
+ map_word (*read)(struct map_info *, unsigned long);
void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t);
- void (*write8)(struct map_info *, u8, unsigned long);
- void (*write16)(struct map_info *, u16, unsigned long);
- void (*write32)(struct map_info *, u32, unsigned long);
- void (*write64)(struct map_info *, u64, unsigned long);
+
+ void (*write)(struct map_info *, const map_word, unsigned long);
void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t);
/* We can perhaps put in 'point' and 'unpoint' methods, if we really
want to enable XIP for non-linear mappings. Not yet though. */
#endif
+ /* It's possible for the map driver to use cached memory in its
+ copy_from implementation (and _only_ with copy_from). However,
+ when the chip driver knows some flash area has changed contents,
+ it will signal it to the map driver through this routine to let
+ the map driver invalidate the corresponding cache as needed.
+ If there is no cache to care about this can be set to NULL. */
+ void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
+
/* set_vpp() must handle being reentered -- enable, enable, disable
must leave it enabled. */
void (*set_vpp)(struct map_info *, int);
@@ -85,86 +232,173 @@ void map_destroy(struct mtd_info *mtd);
#define ENABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 1); } while(0)
#define DISABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 0); } while(0)
-#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
-#define map_read8(map, ofs) (map)->read8(map, ofs)
-#define map_read16(map, ofs) (map)->read16(map, ofs)
-#define map_read32(map, ofs) (map)->read32(map, ofs)
-#define map_read64(map, ofs) (map)->read64(map, ofs)
-#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
-#define map_write8(map, datum, ofs) (map)->write8(map, datum, ofs)
-#define map_write16(map, datum, ofs) (map)->write16(map, datum, ofs)
-#define map_write32(map, datum, ofs) (map)->write32(map, datum, ofs)
-#define map_write64(map, datum, ofs) (map)->write64(map, datum, ofs)
-#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
+#define INVALIDATE_CACHED_RANGE(map, from, size) \
+ do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
-extern void simple_map_init(struct map_info *);
-#define map_is_linear(map) (map->phys != NO_XIP)
-#else
-static inline u8 map_read8(struct map_info *map, unsigned long ofs)
+static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
{
- return __raw_readb(map->virt + ofs);
+ int i;
+ for (i=0; i<map_words(map); i++) {
+ if (val1.x[i] != val2.x[i])
+ return 0;
+ }
+ return 1;
}
-static inline u16 map_read16(struct map_info *map, unsigned long ofs)
+static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
{
- return __raw_readw(map->virt + ofs);
+ map_word r;
+ int i;
+
+ for (i=0; i<map_words(map); i++) {
+ r.x[i] = val1.x[i] & val2.x[i];
+ }
+ return r;
}
-static inline u32 map_read32(struct map_info *map, unsigned long ofs)
+static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
{
- return __raw_readl(map->virt + ofs);
+ map_word r;
+ int i;
+
+ for (i=0; i<map_words(map); i++) {
+ r.x[i] = val1.x[i] | val2.x[i];
+ }
+ return r;
}
+#define map_word_andequal(m, a, b, z) map_word_equal(m, z, map_word_and(m, a, b))
-static inline u64 map_read64(struct map_info *map, unsigned long ofs)
+static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
{
-#ifndef CONFIG_MTD_CFI_B8 /* 64-bit mappings */
- BUG();
+ int i;
+
+ for (i=0; i<map_words(map); i++) {
+ if (val1.x[i] & val2.x[i])
+ return 1;
+ }
return 0;
-#else
- return __raw_readll(map->virt + ofs);
-#endif
}
-static inline void map_write8(struct map_info *map, u8 datum, unsigned long ofs)
+static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
- __raw_writeb(datum, map->virt + ofs);
- mb();
+ map_word r;
+
+ if (map_bankwidth_is_1(map))
+ r.x[0] = *(unsigned char *)ptr;
+ else if (map_bankwidth_is_2(map))
+ r.x[0] = get_unaligned((uint16_t *)ptr);
+ else if (map_bankwidth_is_4(map))
+ r.x[0] = get_unaligned((uint32_t *)ptr);
+#if BITS_PER_LONG >= 64
+ else if (map_bankwidth_is_8(map))
+ r.x[0] = get_unaligned((uint64_t *)ptr);
+#endif
+ else if (map_bankwidth_is_large(map))
+ memcpy(r.x, ptr, map->bankwidth);
+
+ return r;
}
-static inline void map_write16(struct map_info *map, u16 datum, unsigned long ofs)
+static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len)
{
- __raw_writew(datum, map->virt + ofs);
- mb();
+ int i;
+
+ if (map_bankwidth_is_large(map)) {
+ char *dest = (char *)&orig;
+ memcpy(dest+start, buf, len);
+ } else {
+ for (i=start; i < start+len; i++) {
+ int bitpos;
+#ifdef __LITTLE_ENDIAN
+ bitpos = i*8;
+#else /* __BIG_ENDIAN */
+ bitpos = (map_bankwidth(map)-1-i)*8;
+#endif
+ orig.x[0] &= ~(0xff << bitpos);
+ orig.x[0] |= buf[i] << bitpos;
+ }
+ }
+ return orig;
}
-static inline void map_write32(struct map_info *map, u32 datum, unsigned long ofs)
+static inline map_word map_word_ff(struct map_info *map)
{
- __raw_writel(datum, map->virt + ofs);
- mb();
+ map_word r;
+ int i;
+
+ for (i=0; i<map_words(map); i++) {
+ r.x[i] = ~0UL;
+ }
+ return r;
+}
+static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
+{
+ map_word r;
+
+ if (map_bankwidth_is_1(map))
+ r.x[0] = __raw_readb(map->virt + ofs);
+ else if (map_bankwidth_is_2(map))
+ r.x[0] = __raw_readw(map->virt + ofs);
+ else if (map_bankwidth_is_4(map))
+ r.x[0] = __raw_readl(map->virt + ofs);
+#if BITS_PER_LONG >= 64
+ else if (map_bankwidth_is_8(map))
+ r.x[0] = __raw_readq(map->virt + ofs);
+#endif
+ else if (map_bankwidth_is_large(map))
+ memcpy_fromio(r.x, map->virt+ofs, map->bankwidth);
+
+ return r;
}
-static inline void map_write64(struct map_info *map, u64 datum, unsigned long ofs)
+static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
{
-#ifndef CONFIG_MTD_CFI_B8 /* 64-bit mappings */
- BUG();
-#else
- __raw_writell(datum, map->virt + ofs);
+ if (map_bankwidth_is_1(map))
+ __raw_writeb(datum.x[0], map->virt + ofs);
+ else if (map_bankwidth_is_2(map))
+ __raw_writew(datum.x[0], map->virt + ofs);
+ else if (map_bankwidth_is_4(map))
+ __raw_writel(datum.x[0], map->virt + ofs);
+#if BITS_PER_LONG >= 64
+ else if (map_bankwidth_is_8(map))
+ __raw_writeq(datum.x[0], map->virt + ofs);
+#endif
+ else if (map_bankwidth_is_large(map))
+ memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
mb();
-#endif /* CFI_B8 */
}
-static inline void map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
- memcpy_fromio(to, map->virt + from, len);
+ if (map->cached)
+ memcpy(to, (char *)map->cached + from, len);
+ else
+ memcpy_fromio(to, map->virt + from, len);
}
-static inline void map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
memcpy_toio(map->virt + to, from, len);
}
-#define simple_map_init(map) do { } while (0)
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+#define map_read(map, ofs) (map)->read(map, ofs)
+#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
+#define map_write(map, datum, ofs) (map)->write(map, datum, ofs)
+#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
+
+extern void simple_map_init(struct map_info *);
+#define map_is_linear(map) (map->phys != NO_XIP)
+
+#else
+#define map_read(map, ofs) inline_map_read(map, ofs)
+#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len)
+#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs)
+#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len)
+
+
+#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth))
#define map_is_linear(map) (1)
#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
new file mode 100644
index 000000000000..d522d43d410d
--- /dev/null
+++ b/include/linux/mtd/physmap.h
@@ -0,0 +1,61 @@
+/*
+ * For boards with physically mapped flash and using
+ * drivers/mtd/maps/physmap.c mapping driver.
+ *
+ * $Id: physmap.h,v 1.2 2004/07/14 17:48:46 dwmw2 Exp $
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MTD_PHYSMAP__
+
+#include <linux/config.h>
+
+#if defined(CONFIG_MTD_PHYSMAP)
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+/*
+ * The map_info for physmap. Board can override size, buswidth, phys,
+ * (*set_vpp)(), etc in their initial setup routine.
+ */
+extern struct map_info physmap_map;
+
+/*
+ * Board needs to specify the exact mapping during their setup time.
+ */
+static inline void physmap_configure(unsigned long addr, unsigned long size, int buswidth, void (*set_vpp)(struct map_info *, int) )
+{
+ physmap_map.phys = addr;
+ physmap_map.size = size;
+ physmap_map.buswidth = buswidth;
+ physmap_map.set_vpp = set_vpp;
+}
+
+#if defined(CONFIG_MTD_PARTITIONS)
+
+/*
+ * Machines that wish to do flash partition may want to call this function in
+ * their setup routine.
+ *
+ * physmap_set_partitions(mypartitions, num_parts);
+ *
+ * Note that one can always override this hard-coded partition with
+ * command line partition (you need to enable CONFIG_MTD_CMDLINE_PARTS).
+ */
+void physmap_set_partitions(struct mtd_partition *parts, int num_parts);
+
+#endif /* defined(CONFIG_MTD_PARTITIONS) */
+#endif /* defined(CONFIG_MTD) */
+
+#endif /* __LINUX_MTD_PHYSMAP__ */
+