1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
|
#ifndef _BLK_H
#define _BLK_H
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/compiler.h>
extern void set_device_ro(kdev_t dev,int flag);
extern void add_blkdev_randomness(int major);
#ifdef CONFIG_BLK_DEV_RAM
extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
extern int rd_image_start; /* starting block # of image */
#ifdef CONFIG_BLK_DEV_INITRD
#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
extern unsigned long initrd_start,initrd_end;
extern int initrd_below_start_ok; /* 1 if it is not an error if initrd_start < memory_start */
void initrd_init(void);
#endif /* CONFIG_BLK_DEV_INITRD */
#endif
/*
* end_request() and friends. Must be called with the request queue spinlock
* acquired. All functions called within end_request() _must_be_ atomic.
*
* Several drivers define their own end_request and call
* end_that_request_first() and end_that_request_last()
* for parts of the original function. This prevents
* code duplication in drivers.
*/
extern int end_that_request_first(struct request *, int, int);
extern void end_that_request_last(struct request *);
struct request *elv_next_request(request_queue_t *q);
static inline void blkdev_dequeue_request(struct request *req)
{
list_del(&req->queuelist);
if (req->q)
elv_remove_request(req->q, req);
}
#define _elv_add_request_core(q, rq, where, plug) \
do { \
if ((plug)) \
blk_plug_device((q)); \
(q)->elevator.elevator_add_req_fn((q), (rq), (where)); \
} while (0)
#define _elv_add_request(q, rq, back, p) do { \
if ((back)) \
_elv_add_request_core((q), (rq), (q)->queue_head.prev, (p)); \
else \
_elv_add_request_core((q), (rq), &(q)->queue_head, (p)); \
} while (0)
#define elv_add_request(q, rq, back) _elv_add_request((q), (rq), (back), 1)
#if defined(MAJOR_NR) || defined(IDE_DRIVER)
#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
#if !defined(IDE_DRIVER)
#ifndef QUEUE
# define QUEUE (&blk_dev[MAJOR_NR].request_queue)
#endif
#ifndef CURRENT
# define CURRENT elv_next_request(QUEUE)
#endif
#endif /* !defined(IDE_DRIVER) */
/*
* If we have our own end_request, we do not want to include this mess
*/
#ifndef LOCAL_END_REQUEST
static inline void end_request(struct request *req, int uptodate)
{
if (end_that_request_first(req, uptodate, req->hard_cur_sectors))
return;
add_blkdev_randomness(major(req->rq_dev));
blkdev_dequeue_request(req);
end_that_request_last(req);
}
#endif /* !LOCAL_END_REQUEST */
#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */
#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
#endif /* _BLK_H */
|