summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/amigaffs.h11
-rw-r--r--include/linux/bio.h26
-rw-r--r--include/linux/blkdev.h33
-rw-r--r--include/linux/blkdev.h.orig371
-rw-r--r--include/linux/fd.h6
-rw-r--r--include/linux/fs.h14
-rw-r--r--include/linux/highmem.h3
-rw-r--r--include/linux/iso_fs.h6
-rw-r--r--include/linux/qnx4_fs.h2
9 files changed, 445 insertions, 27 deletions
diff --git a/include/linux/amigaffs.h b/include/linux/amigaffs.h
index 3a264a7e9063..39bbdbbe4f79 100644
--- a/include/linux/amigaffs.h
+++ b/include/linux/amigaffs.h
@@ -31,7 +31,7 @@ affs_bread(struct super_block *sb, int block)
{
pr_debug(KERN_DEBUG "affs_bread: %d\n", block);
if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size)
- return bread(sb->s_dev, block, sb->s_blocksize);
+ return sb_bread(sb, block);
return NULL;
}
static inline struct buffer_head *
@@ -39,7 +39,7 @@ affs_getblk(struct super_block *sb, int block)
{
pr_debug(KERN_DEBUG "affs_getblk: %d\n", block);
if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size)
- return getblk(sb->s_dev, block, sb->s_blocksize);
+ return sb_getblk(sb, block);
return NULL;
}
static inline struct buffer_head *
@@ -48,10 +48,11 @@ affs_getzeroblk(struct super_block *sb, int block)
struct buffer_head *bh;
pr_debug(KERN_DEBUG "affs_getzeroblk: %d\n", block);
if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size) {
- bh = getblk(sb->s_dev, block, sb->s_blocksize);
- wait_on_buffer(bh);
+ bh = sb_getblk(sb, block);
+ lock_buffer(bh);
memset(bh->b_data, 0 , sb->s_blocksize);
mark_buffer_uptodate(bh, 1);
+ unlock_buffer(bh);
return bh;
}
return NULL;
@@ -62,7 +63,7 @@ affs_getemptyblk(struct super_block *sb, int block)
struct buffer_head *bh;
pr_debug(KERN_DEBUG "affs_getemptyblk: %d\n", block);
if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size) {
- bh = getblk(sb->s_dev, block, sb->s_blocksize);
+ bh = sb_getblk(sb, block);
wait_on_buffer(bh);
mark_buffer_uptodate(bh, 1);
return bh;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a7c0c2576381..8bbacfeebeeb 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -20,6 +20,12 @@
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H
+/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
+#include <asm/io.h>
+#ifndef BIO_VMERGE_BOUNDARY
+#define BIO_VMERGE_BOUNDARY 0
+#endif
+
#define BIO_DEBUG
#ifdef BIO_DEBUG
@@ -61,7 +67,17 @@ struct bio {
unsigned short bi_vcnt; /* how many bio_vec's */
unsigned short bi_idx; /* current index into bvl_vec */
- unsigned short bi_hw_seg; /* actual mapped segments */
+
+ /* Number of segments in this BIO after
+ * physical address coalescing is performed.
+ */
+ unsigned short bi_phys_segments;
+
+ /* Number of segments after physical and DMA remapping
+ * hardware coalescing is performed.
+ */
+ unsigned short bi_hw_segments;
+
unsigned int bi_size; /* residual I/O count */
unsigned int bi_max; /* max bvl_vecs we can hold,
used as index into pool */
@@ -128,10 +144,13 @@ struct bio {
/*
* merge helpers etc
*/
+
#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
#define __BVEC_START(bio) bio_iovec_idx((bio), 0)
-#define BIO_CONTIG(bio, nxt) \
- BIOVEC_MERGEABLE(__BVEC_END((bio)), __BVEC_START((nxt)))
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
+ ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -174,6 +193,7 @@ extern void bio_put(struct bio *);
extern int bio_endio(struct bio *, int, int);
struct request_queue;
+extern inline int bio_phys_segments(struct request_queue *, struct bio *);
extern inline int bio_hw_segments(struct request_queue *, struct bio *);
extern inline void __bio_clone(struct bio *, struct bio *);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fad87a308171..620b149ec617 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -41,8 +41,19 @@ struct request {
* touch them
*/
unsigned long hard_nr_sectors;
- unsigned short nr_segments;
+
+ /* Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+ /* Number of scatter-gather addr+len pairs after
+ * physical and DMA remapping hardware coalescing is performed.
+ * This is the number of scatter-gather entries the driver
+ * will actually have to deal with after DMA mapping is done.
+ */
unsigned short nr_hw_segments;
+
unsigned int current_nr_sectors;
unsigned int hard_cur_sectors;
void *special;
@@ -146,6 +157,7 @@ struct request_queue
* queue needs bounce pages for pages above this limit
*/
unsigned long bounce_pfn;
+ int bounce_gfp;
/*
* This is used to remove the plug when tq_disk runs.
@@ -166,7 +178,8 @@ struct request_queue
* queue settings
*/
unsigned short max_sectors;
- unsigned short max_segments;
+ unsigned short max_phys_segments;
+ unsigned short max_hw_segments;
unsigned short hardsect_size;
unsigned int max_segment_size;
@@ -202,19 +215,22 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_HIGH (blk_max_low_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
#ifdef CONFIG_HIGHMEM
-extern void create_bounce(unsigned long pfn, struct bio **bio_orig);
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
+extern void init_emergency_isa_pool(void);
extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
{
- create_bounce(q->bounce_pfn, bio);
+ create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
}
#else /* CONFIG_HIGHMEM */
#define blk_queue_bounce(q, bio) do { } while (0)
+#define init_emergency_isa_pool() do { } while (0)
#endif /* CONFIG_HIGHMEM */
@@ -257,7 +273,8 @@ extern struct request *blk_get_request(request_queue_t *, int, int);
extern void blk_put_request(struct request *);
extern void blk_plug_device(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
-extern inline int blk_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
extern int block_ioctl(kdev_t, unsigned int, unsigned long);
@@ -270,7 +287,8 @@ extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64);
extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
-extern void blk_queue_max_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
@@ -284,7 +302,8 @@ extern int * blksize_size[MAX_BLKDEV];
extern int * max_readahead[MAX_BLKDEV];
-#define MAX_SEGMENTS 128
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
#define MAX_SECTORS 255
#define MAX_SEGMENT_SIZE 65536
diff --git a/include/linux/blkdev.h.orig b/include/linux/blkdev.h.orig
new file mode 100644
index 000000000000..620b149ec617
--- /dev/null
+++ b/include/linux/blkdev.h.orig
@@ -0,0 +1,371 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+#include <linux/tqueue.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+
+#include <asm/scatterlist.h>
+
+struct request_queue;
+typedef struct request_queue request_queue_t;
+struct elevator_s;
+typedef struct elevator_s elevator_t;
+
+struct request_list {
+ unsigned int count;
+ struct list_head free;
+ wait_queue_head_t wait;
+};
+
+struct request {
+ struct list_head queuelist; /* looking for ->queue? you must _not_
+ * access it directly, use
+ * blkdev_dequeue_request! */
+ int elevator_sequence;
+
+ unsigned char cmd[16];
+
+ unsigned long flags; /* see REQ_ bits below */
+
+ int rq_status; /* should split this into a few status bits */
+ kdev_t rq_dev;
+ int errors;
+ sector_t sector;
+ unsigned long nr_sectors;
+ unsigned long hard_sector; /* the hard_* are block layer
+ * internals, no driver should
+ * touch them
+ */
+ unsigned long hard_nr_sectors;
+
+ /* Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+ /* Number of scatter-gather addr+len pairs after
+ * physical and DMA remapping hardware coalescing is performed.
+ * This is the number of scatter-gather entries the driver
+ * will actually have to deal with after DMA mapping is done.
+ */
+ unsigned short nr_hw_segments;
+
+ unsigned int current_nr_sectors;
+ unsigned int hard_cur_sectors;
+ void *special;
+ char *buffer;
+ struct completion *waiting;
+ struct bio *bio, *biotail;
+ request_queue_t *q;
+ struct request_list *rl;
+};
+
+/*
+ * first three bits match BIO_RW* bits, important
+ */
+enum rq_flag_bits {
+ __REQ_RW, /* not set, read. set, write */
+ __REQ_RW_AHEAD, /* READA */
+ __REQ_BARRIER, /* may not be passed */
+ __REQ_CMD, /* is a regular fs rw request */
+ __REQ_NOMERGE, /* don't touch this for merging */
+ __REQ_STARTED, /* drive already may have started this one */
+ __REQ_DONTPREP, /* don't call prep for this one */
+ /*
+ * for IDE
+ */
+ __REQ_DRIVE_CMD,
+ __REQ_DRIVE_TASK,
+
+ __REQ_PC, /* packet command (special) */
+ __REQ_BLOCK_PC, /* queued down pc from block layer */
+ __REQ_SENSE, /* sense retrival */
+
+ __REQ_SPECIAL, /* driver special command */
+
+ __REQ_NR_BITS, /* stops here */
+};
+
+#define REQ_RW (1 << __REQ_RW)
+#define REQ_RW_AHEAD (1 << __REQ_RW_AHEAD)
+#define REQ_BARRIER (1 << __REQ_BARRIER)
+#define REQ_CMD (1 << __REQ_CMD)
+#define REQ_NOMERGE (1 << __REQ_NOMERGE)
+#define REQ_STARTED (1 << __REQ_STARTED)
+#define REQ_DONTPREP (1 << __REQ_DONTPREP)
+#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD)
+#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
+#define REQ_PC (1 << __REQ_PC)
+#define REQ_SENSE (1 << __REQ_SENSE)
+#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC)
+#define REQ_SPECIAL (1 << __REQ_SPECIAL)
+
+#include <linux/elevator.h>
+
+typedef int (merge_request_fn) (request_queue_t *, struct request *,
+ struct bio *);
+typedef int (merge_requests_fn) (request_queue_t *, struct request *,
+ struct request *);
+typedef void (request_fn_proc) (request_queue_t *q);
+typedef request_queue_t * (queue_proc) (kdev_t dev);
+typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
+typedef int (prep_rq_fn) (request_queue_t *, struct request *);
+typedef void (unplug_device_fn) (void *q);
+
+enum blk_queue_state {
+ Queue_down,
+ Queue_up,
+};
+
+/*
+ * Default nr free requests per queue, ll_rw_blk will scale it down
+ * according to available RAM at init time
+ */
+#define QUEUE_NR_REQUESTS 8192
+
+struct request_queue
+{
+ /*
+ * the queue request freelist, one for reads and one for writes
+ */
+ struct request_list rq[2];
+
+ /*
+ * Together with queue_head for cacheline sharing
+ */
+ struct list_head queue_head;
+ elevator_t elevator;
+
+ request_fn_proc *request_fn;
+ merge_request_fn *back_merge_fn;
+ merge_request_fn *front_merge_fn;
+ merge_requests_fn *merge_requests_fn;
+ make_request_fn *make_request_fn;
+ prep_rq_fn *prep_rq_fn;
+
+ /*
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
+ */
+ void *queuedata;
+
+ /*
+ * queue needs bounce pages for pages above this limit
+ */
+ unsigned long bounce_pfn;
+ int bounce_gfp;
+
+ /*
+ * This is used to remove the plug when tq_disk runs.
+ */
+ struct tq_struct plug_tq;
+
+ /*
+ * various queue flags, see QUEUE_* below
+ */
+ unsigned long queue_flags;
+
+ /*
+ * protects queue structures from reentrancy
+ */
+ spinlock_t *queue_lock;
+
+ /*
+ * queue settings
+ */
+ unsigned short max_sectors;
+ unsigned short max_phys_segments;
+ unsigned short max_hw_segments;
+ unsigned short hardsect_size;
+ unsigned int max_segment_size;
+
+ unsigned long seg_boundary_mask;
+
+ wait_queue_head_t queue_wait;
+};
+
+#define RQ_INACTIVE (-1)
+#define RQ_ACTIVE 1
+#define RQ_SCSI_BUSY 0xffff
+#define RQ_SCSI_DONE 0xfffe
+#define RQ_SCSI_DISCONNECTING 0xffe0
+
+#define QUEUE_FLAG_PLUGGED 0 /* queue is plugged */
+#define QUEUE_FLAG_NOSPLIT 1 /* can process bio over several goes */
+#define QUEUE_FLAG_CLUSTER 2 /* cluster several segments into 1 */
+
+#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+#define blk_mark_plugged(q) set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+#define blk_queue_empty(q) elv_queue_empty(q)
+#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
+
+#define rq_data_dir(rq) ((rq)->flags & 1)
+
+/*
+ * noop, requests are automagically marked as active/inactive by I/O
+ * scheduler -- see elv_next_request
+ */
+#define blk_queue_headactive(q, head_active)
+
+extern unsigned long blk_max_low_pfn, blk_max_pfn;
+
+#define BLK_BOUNCE_HIGH (blk_max_low_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
+
+#ifdef CONFIG_HIGHMEM
+
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
+extern void init_emergency_isa_pool(void);
+
+extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
+{
+ create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+#define blk_queue_bounce(q, bio) do { } while (0)
+#define init_emergency_isa_pool() do { } while (0)
+
+#endif /* CONFIG_HIGHMEM */
+
+#define rq_for_each_bio(bio, rq) \
+ if ((rq->bio)) \
+ for (bio = (rq)->bio; bio; bio = bio->bi_next)
+
+struct blk_dev_struct {
+ /*
+ * queue_proc has to be atomic
+ */
+ request_queue_t request_queue;
+ queue_proc *queue;
+ void *data;
+};
+
+struct sec_size {
+ unsigned block_size;
+ unsigned block_size_bits;
+};
+
+/*
+ * Used to indicate the default queue for drivers that don't bother
+ * to implement multiple queues. We have this access macro here
+ * so as to eliminate the need for each and every block device
+ * driver to know about the internal structure of blk_dev[].
+ */
+#define BLK_DEFAULT_QUEUE(_MAJOR) &blk_dev[_MAJOR].request_queue
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern void grok_partitions(kdev_t dev, long size);
+extern int wipe_partitions(kdev_t dev);
+extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
+extern void generic_make_request(struct bio *bio);
+extern inline request_queue_t *blk_get_queue(kdev_t dev);
+extern void blkdev_release_request(struct request *);
+extern void blk_attempt_remerge(request_queue_t *, struct request *);
+extern struct request *blk_get_request(request_queue_t *, int, int);
+extern void blk_put_request(struct request *);
+extern void blk_plug_device(request_queue_t *);
+extern void blk_recount_segments(request_queue_t *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
+
+extern int block_ioctl(kdev_t, unsigned int, unsigned long);
+
+/*
+ * Access functions for manipulating queue properties
+ */
+extern int blk_init_queue(request_queue_t *, request_fn_proc *, spinlock_t *);
+extern void blk_cleanup_queue(request_queue_t *);
+extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
+extern void blk_queue_bounce_limit(request_queue_t *, u64);
+extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
+extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
+extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
+extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
+extern void blk_dump_rq_flags(struct request *, char *);
+extern void generic_unplug_device(void *);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * max_readahead[MAX_BLKDEV];
+
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
+#define MAX_SECTORS 255
+
+#define MAX_SEGMENT_SIZE 65536
+
+/* read-ahead in pages.. */
+#define MAX_READAHEAD 31
+#define MIN_READAHEAD 3
+
+#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
+#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
+#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
+#define blkdev_next_request(req) blkdev_entry_to_request((req)->queuelist.next)
+#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queuelist.prev)
+
+extern void drive_stat_acct(struct request *, int, int);
+
+extern inline void blk_clear(int major)
+{
+ blk_size[major] = NULL;
+#if 0
+ blk_size_in_bytes[major] = NULL;
+#endif
+ blksize_size[major] = NULL;
+ max_readahead[major] = NULL;
+ read_ahead[major] = 0;
+}
+
+extern inline int get_hardsect_size(kdev_t dev)
+{
+ request_queue_t *q = blk_get_queue(dev);
+ int retval = 512;
+
+ if (q && q->hardsect_size)
+ retval = q->hardsect_size;
+
+ return retval;
+}
+
+#define blk_finished_io(nsects) do { } while (0)
+#define blk_started_io(nsects) do { } while (0)
+
+extern inline unsigned int blksize_bits(unsigned int size)
+{
+ unsigned int bits = 8;
+ do {
+ bits++;
+ size >>= 1;
+ } while (size > 256);
+ return bits;
+}
+
+extern inline unsigned int block_size(kdev_t dev)
+{
+ int retval = BLOCK_SIZE;
+ int major = MAJOR(dev);
+
+ if (blksize_size[major]) {
+ int minor = MINOR(dev);
+ if (blksize_size[major][minor])
+ retval = blksize_size[major][minor];
+ }
+ return retval;
+}
+
+#endif
diff --git a/include/linux/fd.h b/include/linux/fd.h
index c0ed2792ba8b..187785b83958 100644
--- a/include/linux/fd.h
+++ b/include/linux/fd.h
@@ -369,10 +369,4 @@ struct floppy_raw_cmd {
#define FDEJECT _IO(2, 0x5a)
/* eject the disk */
-
-#ifdef __KERNEL__
-/* eject the boot floppy (if we need the drive for a different root floppy) */
-void floppy_eject(void);
-#endif
-
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7f52b46d619f..b1e59c161107 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -21,7 +21,6 @@
#include <linux/cache.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/bio.h>
#include <asm/atomic.h>
#include <asm/bitops.h>
@@ -1363,6 +1362,7 @@ extern struct buffer_head * get_hash_table(kdev_t, sector_t, int);
extern struct buffer_head * getblk(kdev_t, sector_t, int);
extern void ll_rw_block(int, int, struct buffer_head * bh[]);
extern int submit_bh(int, struct buffer_head *);
+struct bio;
extern int submit_bio(int, struct bio *);
extern int is_read_only(kdev_t);
extern void __brelse(struct buffer_head *);
@@ -1379,6 +1379,18 @@ static inline void bforget(struct buffer_head *buf)
}
extern int set_blocksize(kdev_t, int);
extern struct buffer_head * bread(kdev_t, int, int);
+static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
+{
+ return bread(sb->s_dev, block, sb->s_blocksize);
+}
+static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
+{
+ return getblk(sb->s_dev, block, sb->s_blocksize);
+}
+static inline struct buffer_head * sb_get_hash_table(struct super_block *sb, int block)
+{
+ return get_hash_table(sb->s_dev, block, sb->s_blocksize);
+}
extern void wakeup_bdflush(void);
extern void put_unused_buffer_head(struct buffer_head * bh);
extern struct buffer_head * get_unused_buffer_head(int async);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 157c3b62fc34..7aa92d2c257a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -2,6 +2,7 @@
#define _LINUX_HIGHMEM_H
#include <linux/config.h>
+#include <linux/bio.h>
#include <asm/pgalloc.h>
#ifdef CONFIG_HIGHMEM
@@ -13,7 +14,7 @@ extern struct page *highmem_start_page;
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
-extern void create_bounce(unsigned long pfn, struct bio **bio_orig);
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
static inline char *bh_kmap(struct buffer_head *bh)
{
diff --git a/include/linux/iso_fs.h b/include/linux/iso_fs.h
index 82dde80812ae..9cdfbaea416d 100644
--- a/include/linux/iso_fs.h
+++ b/include/linux/iso_fs.h
@@ -219,7 +219,7 @@ int get_joliet_filename(struct iso_directory_record *, unsigned char *, struct i
int get_acorn_filename(struct iso_directory_record *, char *, struct inode *);
extern struct dentry *isofs_lookup(struct inode *, struct dentry *);
-extern struct buffer_head *isofs_bread(struct inode *, unsigned int, unsigned int);
+extern struct buffer_head *isofs_bread(struct inode *, unsigned int);
extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
extern struct inode_operations isofs_dir_inode_operations;
@@ -230,11 +230,11 @@ extern struct address_space_operations isofs_symlink_aops;
#ifdef LEAK_CHECK
#define free_s leak_check_free_s
#define malloc leak_check_malloc
-#define bread leak_check_bread
+#define sb_bread leak_check_bread
#define brelse leak_check_brelse
extern void * leak_check_malloc(unsigned int size);
extern void leak_check_free_s(void * obj, int size);
-extern struct buffer_head * leak_check_bread(int dev, int block, int size);
+extern struct buffer_head * leak_check_bread(struct super_block *sb, int block);
extern void leak_check_brelse(struct buffer_head * bh);
#endif /* LEAK_CHECK */
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h
index dd9b7cb6efb5..55ba2f99d9a2 100644
--- a/include/linux/qnx4_fs.h
+++ b/include/linux/qnx4_fs.h
@@ -118,7 +118,7 @@ extern int qnx4_unlink(struct inode *dir, struct dentry *dentry);
extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry);
extern int qnx4_sync_file(struct file *file, struct dentry *dentry, int);
extern int qnx4_sync_inode(struct inode *inode);
-extern int qnx4_get_block(struct inode *inode, long iblock, struct buffer_head *bh, int create);
+extern int qnx4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create);
#endif /* __KERNEL__ */