summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blkdev.h28
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/compat_ioctl.h2
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/elevator.h64
-rw-r--r--include/linux/fb.h84
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/i2o.h14
-rw-r--r--include/linux/jbd.h1
-rw-r--r--include/linux/jiffies.h384
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/key-ui.h97
-rw-r--r--include/linux/key.h284
-rw-r--r--include/linux/keyctl.h39
-rw-r--r--include/linux/kfifo.h157
-rw-r--r--include/linux/mbcache.h2
-rw-r--r--include/linux/mmzone.h29
-rw-r--r--include/linux/module.h34
-rw-r--r--include/linux/nodemask.h326
-rw-r--r--include/linux/pagevec.h6
-rw-r--r--include/linux/pktcdvd.h275
-rw-r--r--include/linux/pm.h5
-rw-r--r--include/linux/posix_acl.h1
-rw-r--r--include/linux/prctl.h1
-rw-r--r--include/linux/reiserfs_fs.h48
-rw-r--r--include/linux/sched.h19
-rw-r--r--include/linux/security.h42
-rw-r--r--include/linux/smb_mount.h5
-rw-r--r--include/linux/syscalls.h15
-rw-r--r--include/linux/threads.h2
-rw-r--r--include/linux/time.h274
-rw-r--r--include/linux/times.h74
-rw-r--r--include/linux/timex.h62
-rw-r--r--include/linux/types.h14
-rw-r--r--include/linux/wait.h80
-rw-r--r--include/linux/writeback.h6
-rw-r--r--include/linux/xattr.h16
38 files changed, 1938 insertions, 571 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4efe45d1af7e..b2059869cb92 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -19,8 +19,8 @@
struct request_queue;
typedef struct request_queue request_queue_t;
-struct elevator_s;
-typedef struct elevator_s elevator_t;
+struct elevator_queue;
+typedef struct elevator_queue elevator_t;
struct request_pm_state;
#define BLKDEV_MIN_RQ 4
@@ -52,6 +52,20 @@ struct as_io_context {
sector_t seek_mean;
};
+struct cfq_queue;
+struct cfq_io_context {
+ void (*dtor)(struct cfq_io_context *);
+ void (*exit)(struct cfq_io_context *);
+
+ struct io_context *ioc;
+
+ /*
+ * circular list of cfq_io_contexts belonging to a process io context
+ */
+ struct list_head list;
+ struct cfq_queue *cfqq;
+};
+
/*
* This is the per-process I/O subsystem state. It is refcounted and
* kmalloc'ed. Currently all fields are modified in process io context
@@ -67,7 +81,10 @@ struct io_context {
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
+ spinlock_t lock;
+
struct as_io_context *aic;
+ struct cfq_io_context *cic;
};
void put_io_context(struct io_context *ioc);
@@ -80,6 +97,7 @@ struct request_list {
int count[2];
mempool_t *rq_pool;
wait_queue_head_t wait[2];
+ wait_queue_head_t drain;
};
#define BLK_MAX_CDB 16
@@ -279,7 +297,7 @@ struct request_queue
*/
struct list_head queue_head;
struct request *last_merge;
- elevator_t elevator;
+ elevator_t *elevator;
/*
* the queue request freelist, one for reads and one for writes
@@ -342,6 +360,7 @@ struct request_queue
unsigned long nr_requests; /* Max # of requests */
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
+ unsigned int nr_batching;
unsigned short max_sectors;
unsigned short max_hw_sectors;
@@ -381,6 +400,7 @@ struct request_queue
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
#define QUEUE_FLAG_ORDERED 8 /* supports ordered writes */
+#define QUEUE_FLAG_DRAIN 9 /* draining queue for sched switch */
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
@@ -617,6 +637,8 @@ extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(request_queue_t *);
extern void __generic_unplug_device(request_queue_t *);
extern long nr_blockdev_pages(void);
+extern void blk_wait_queue_drained(request_queue_t *);
+extern void blk_finish_queue_drain(request_queue_t *);
int blk_get_queue(request_queue_t *);
request_queue_t *blk_alloc_queue(int);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 367a8a313506..47fb6a02d630 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -155,7 +155,6 @@ void invalidate_bdev(struct block_device *, int);
int sync_blockdev(struct block_device *bdev);
void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
-void wake_up_buffer(struct buffer_head *bh);
int fsync_bdev(struct block_device *);
struct super_block *freeze_bdev(struct block_device *);
void thaw_bdev(struct block_device *, struct super_block *);
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index bcc9410761d9..1c1f5efffd64 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -499,6 +499,7 @@ struct cdrom_generic_command
#define GPMODE_VENDOR_PAGE 0x00
#define GPMODE_R_W_ERROR_PAGE 0x01
#define GPMODE_WRITE_PARMS_PAGE 0x05
+#define GPMODE_WCACHING_PAGE 0x08
#define GPMODE_AUDIO_CTL_PAGE 0x0e
#define GPMODE_POWER_PAGE 0x1a
#define GPMODE_FAULT_FAIL_PAGE 0x1c
@@ -947,6 +948,8 @@ struct cdrom_device_info {
__u8 reserved : 6; /* not used yet */
int cdda_method; /* see flags */
__u8 last_sense;
+ __u8 media_written; /* dirty flag, DVD+RW bookkeeping */
+ unsigned short mmc3_profile; /* current MMC3 profile */
int for_data;
int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 77f59742c407..71da7d1260cd 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -382,6 +382,8 @@ COMPATIBLE_IOCTL(CDROMREADALL)
COMPATIBLE_IOCTL(DVD_READ_STRUCT)
COMPATIBLE_IOCTL(DVD_WRITE_STRUCT)
COMPATIBLE_IOCTL(DVD_AUTH)
+/* pktcdvd */
+COMPATIBLE_IOCTL(PACKET_CTRL_CMD)
/* Big L */
ULONG_IOCTL(LOOP_SET_FD)
ULONG_IOCTL(LOOP_CHANGE_FD)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b378e57b2743..f4bc1ac23daa 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -28,7 +28,7 @@ struct vfsmount;
* "quick string" -- eases parameter passing, but more importantly
* saves "metadata" about the string (ie length and the hash).
*
- * hash comes first so it snuggles against d_parent and d_bucket in the
+ * hash comes first so it snuggles against d_parent in the
* dentry.
*/
struct qstr {
@@ -91,7 +91,6 @@ struct dentry {
* so they all fit in a 16-byte range, with 16-byte alignment.
*/
struct dentry *d_parent; /* parent directory */
- struct hlist_head *d_bucket; /* lookup hash bucket */
struct qstr d_name;
struct list_head d_lru; /* LRU list */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 27e8183f4776..8cf0e3f290bf 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -22,9 +22,9 @@ typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int);
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_init_fn) (request_queue_t *, elevator_t *);
-typedef void (elevator_exit_fn) (request_queue_t *, elevator_t *);
+typedef void (elevator_exit_fn) (elevator_t *);
-struct elevator_s
+struct elevator_ops
{
elevator_merge_fn *elevator_merge_fn;
elevator_merged_fn *elevator_merged_fn;
@@ -48,12 +48,32 @@ struct elevator_s
elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn;
+};
- void *elevator_data;
+#define ELV_NAME_MAX (16)
- struct kobject kobj;
+/*
+ * identifies an elevator type, such as AS or deadline
+ */
+struct elevator_type
+{
+ struct list_head list;
+ struct elevator_ops ops;
+ struct elevator_type *elevator_type;
struct kobj_type *elevator_ktype;
- const char *elevator_name;
+ char elevator_name[ELV_NAME_MAX];
+ struct module *elevator_owner;
+};
+
+/*
+ * each queue has an elevator_queue assoicated with it
+ */
+struct elevator_queue
+{
+ struct elevator_ops *ops;
+ void *elevator_data;
+ struct kobject kobj;
+ struct elevator_type *elevator_type;
};
/*
@@ -79,28 +99,19 @@ extern int elv_set_request(request_queue_t *, struct request *, int);
extern void elv_put_request(request_queue_t *, struct request *);
/*
- * noop I/O scheduler. always merges, always inserts new request at tail
- */
-extern elevator_t elevator_noop;
-
-/*
- * deadline i/o scheduler. uses request time outs to prevent indefinite
- * starvation
+ * io scheduler registration
*/
-extern elevator_t iosched_deadline;
+extern int elv_register(struct elevator_type *);
+extern void elv_unregister(struct elevator_type *);
/*
- * anticipatory I/O scheduler
+ * io scheduler sysfs switching
*/
-extern elevator_t iosched_as;
+extern ssize_t elv_iosched_show(request_queue_t *, char *);
+extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t);
-/*
- * completely fair queueing I/O scheduler
- */
-extern elevator_t iosched_cfq;
-
-extern int elevator_init(request_queue_t *, elevator_t *);
-extern void elevator_exit(request_queue_t *);
+extern int elevator_init(request_queue_t *, char *);
+extern void elevator_exit(elevator_t *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
extern int elv_try_merge(struct request *, struct bio *);
extern int elv_try_last_merge(request_queue_t *, struct bio *);
@@ -119,4 +130,13 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *);
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
+/*
+ * return values from elevator_may_queue_fn
+ */
+enum {
+ ELV_MQUEUE_MAY,
+ ELV_MQUEUE_NO,
+ ELV_MQUEUE_MUST,
+};
+
#endif
diff --git a/include/linux/fb.h b/include/linux/fb.h
index c38132231c64..a09f31d7c6ee 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -318,6 +318,7 @@ struct fb_cursor {
struct fbcurpos hot; /* cursor hot spot */
struct fb_image image; /* Cursor image */
/* all fields below are for fbcon use only */
+ int flash; /* cursor blink */
char *data; /* copy of bitmap */
};
@@ -555,6 +556,82 @@ struct fb_ops {
int (*fb_mmap)(struct fb_info *info, struct file *file, struct vm_area_struct *vma);
};
+#ifdef CONFIG_FB_TILEBLITTING
+
+#define FB_TILE_CURSOR_NONE 0
+#define FB_TILE_CURSOR_UNDERLINE 1
+#define FB_TILE_CURSOR_LOWER_THIRD 2
+#define FB_TILE_CURSOR_LOWER_HALF 3
+#define FB_TILE_CURSOR_TWO_THIRDS 4
+#define FB_TILE_CURSOR_BLOCK 5
+
+struct fb_tilemap {
+ __u32 width; /* width of each tile in pixels */
+ __u32 height; /* height of each tile in scanlines */
+ __u32 depth; /* color depth of each tile */
+ __u32 length; /* number of tiles in the map */
+ __u8 *data; /* actual tile map: a bitmap array, packed
+ to the nearest byte */
+};
+
+struct fb_tilerect {
+ __u32 sx; /* origin in the x-axis */
+ __u32 sy; /* origin in the y-axis */
+ __u32 width; /* number of tiles in the x-axis */
+ __u32 height; /* number of tiles in the y-axis */
+ __u32 index; /* what tile to use: index to tile map */
+ __u32 fg; /* foreground color */
+ __u32 bg; /* background color */
+ __u32 rop; /* raster operation */
+};
+
+struct fb_tilearea {
+ __u32 sx; /* source origin in the x-axis */
+ __u32 sy; /* source origin in the y-axis */
+ __u32 dx; /* destination origin in the x-axis */
+ __u32 dy; /* destination origin in the y-axis */
+ __u32 width; /* number of tiles in the x-axis */
+ __u32 height; /* number of tiles in the y-axis */
+};
+
+struct fb_tileblit {
+ __u32 sx; /* origin in the x-axis */
+ __u32 sy; /* origin in the y-axis */
+ __u32 width; /* number of tiles in the x-axis */
+ __u32 height; /* number of tiles in the y-axis */
+ __u32 fg; /* foreground color */
+ __u32 bg; /* background color */
+ __u32 length; /* number of tiles to draw */
+ __u32 *indices; /* array of indices to tile map */
+};
+
+struct fb_tilecursor {
+ __u32 sx; /* cursor position in the x-axis */
+ __u32 sy; /* cursor position in the y-axis */
+ __u32 mode; /* 0 = erase, 1 = draw */
+ __u32 shape; /* see FB_TILE_CURSOR_* */
+ __u32 fg; /* foreground color */
+ __u32 bg; /* background color */
+};
+
+struct fb_tile_ops {
+ /* set tile characteristics */
+ void (*fb_settile)(struct fb_info *info, struct fb_tilemap *map);
+
+ /* all dimensions from hereon are in terms of tiles */
+
+ /* move a rectangular region of tiles from one area to another*/
+ void (*fb_tilecopy)(struct fb_info *info, struct fb_tilearea *area);
+ /* fill a rectangular region with a tile */
+ void (*fb_tilefill)(struct fb_info *info, struct fb_tilerect *rect);
+ /* copy an array of tiles */
+ void (*fb_tileblit)(struct fb_info *info, struct fb_tileblit *blit);
+ /* cursor */
+ void (*fb_tilecursor)(struct fb_info *info,
+ struct fb_tilecursor *cursor);
+};
+#endif /* CONFIG_FB_TILEBLITTING */
+
/* FBINFO_* = fb_info.flags bit flags */
#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */
#define FBINFO_HWACCEL_DISABLED 0x0002
@@ -586,6 +663,7 @@ struct fb_ops {
from userspace */
#define FBINFO_MISC_MODESWITCH 0x20000 /* mode switch */
#define FBINFO_MISC_MODESWITCHLATE 0x40000 /* init hardware later */
+#define FBINFO_MISC_TILEBLITTING 0x80000 /* use tile blitting */
struct fb_info {
int node;
@@ -601,6 +679,10 @@ struct fb_info {
struct fb_cmap cmap; /* Current cmap */
struct list_head modelist; /* mode list */
struct fb_ops *fbops;
+ struct device *device;
+#ifdef CONFIG_FB_TILEBLITTING
+ struct fb_tile_ops *tileops; /* Tile Blitting */
+#endif
char __iomem *screen_base; /* Virtual address */
unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
int currcon; /* Current VC. */
@@ -608,7 +690,7 @@ struct fb_info {
#define FBINFO_STATE_RUNNING 0
#define FBINFO_STATE_SUSPENDED 1
u32 state; /* Hardware state i.e suspend */
-
+ void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4f6fe6b575a8..6768655fd11d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -758,6 +758,7 @@ struct super_block {
int s_need_sync_fs;
atomic_t s_active;
void *s_security;
+ struct xattr_handler **s_xattr;
struct list_head s_dirty; /* dirty inodes */
struct list_head s_io; /* parked for writeback */
@@ -981,7 +982,8 @@ struct super_operations {
#define I_DIRTY_SYNC 1 /* Not dirty enough for O_DATASYNC */
#define I_DIRTY_DATASYNC 2 /* Data-related inode changes pending */
#define I_DIRTY_PAGES 4 /* Data-related inode changes pending */
-#define I_LOCK 8
+#define __I_LOCK 3
+#define I_LOCK (1 << __I_LOCK)
#define I_FREEING 16
#define I_CLEAR 32
#define I_NEW 64
@@ -1254,7 +1256,6 @@ extern struct block_device *bdget(dev_t);
extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
-extern int blkdev_open(struct inode *, struct file *);
extern struct block_device *open_by_devnum(dev_t, unsigned);
extern struct file_operations def_blk_fops;
extern struct address_space_operations def_blk_aops;
@@ -1339,7 +1340,9 @@ extern sector_t bmap(struct inode *, sector_t);
extern int setattr_mask(unsigned int);
extern int notify_change(struct dentry *, struct iattr *);
extern int permission(struct inode *, int, struct nameidata *);
-extern int vfs_permission(struct inode *, int);
+extern int generic_permission(struct inode *, int,
+ int (*check_acl)(struct inode *, int));
+
extern int get_write_access(struct inode *);
extern int deny_write_access(struct file *);
static inline void put_write_access(struct inode * inode)
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index a68437640135..3481de9c5837 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -147,10 +147,10 @@ struct i2o_controller {
struct pci_dev *pdev; /* PCI device */
- int short_req:1; /* use small block sizes */
- int no_quiesce:1; /* dont quiesce before reset */
- int raptor:1; /* split bar */
- int promise:1; /* Promise controller */
+ unsigned int short_req:1; /* use small block sizes */
+ unsigned int no_quiesce:1; /* dont quiesce before reset */
+ unsigned int raptor:1; /* split bar */
+ unsigned int promise:1; /* Promise controller */
#ifdef CONFIG_MTRR
int mtrr_reg0;
@@ -180,9 +180,9 @@ struct i2o_controller {
struct i2o_dma in_queue; /* inbound message queue Host->IOP */
struct i2o_dma out_queue; /* outbound message queue IOP->Host */
- int battery:1; /* Has a battery backup */
- int io_alloc:1; /* An I/O resource was allocated */
- int mem_alloc:1; /* A memory resource was allocated */
+ unsigned int battery:1; /* Has a battery backup */
+ unsigned int io_alloc:1; /* An I/O resource was allocated */
+ unsigned int mem_alloc:1; /* A memory resource was allocated */
struct resource io_resource; /* I/O resource allocated to the IOP */
struct resource mem_resource; /* Mem resource allocated to the IOP */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index e65b90f1962c..dfdd307872bb 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -299,6 +299,7 @@ enum jbd_state_bits {
BH_JBDDirty, /* Is dirty but journaled */
BH_State, /* Pins most journal_head state */
BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
+ BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
};
BUFFER_FNS(JBD, jbd)
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 459630f41680..d45eff83b906 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -3,10 +3,72 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <asm/system.h>
+#include <linux/time.h>
+#include <linux/timex.h>
#include <asm/param.h> /* for HZ */
+#include <asm/div64.h>
+
+#ifndef div_long_long_rem
+#define div_long_long_rem(dividend,divisor,remainder) \
+({ \
+ u64 result = dividend; \
+ *remainder = do_div(result,divisor); \
+ result; \
+})
+#endif
+
+/*
+ * The following defines establish the engineering parameters of the PLL
+ * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
+ * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
+ * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
+ * nearest power of two in order to avoid hardware multiply operations.
+ */
+#if HZ >= 12 && HZ < 24
+# define SHIFT_HZ 4
+#elif HZ >= 24 && HZ < 48
+# define SHIFT_HZ 5
+#elif HZ >= 48 && HZ < 96
+# define SHIFT_HZ 6
+#elif HZ >= 96 && HZ < 192
+# define SHIFT_HZ 7
+#elif HZ >= 192 && HZ < 384
+# define SHIFT_HZ 8
+#elif HZ >= 384 && HZ < 768
+# define SHIFT_HZ 9
+#elif HZ >= 768 && HZ < 1536
+# define SHIFT_HZ 10
+#else
+# error You lose.
+#endif
+
+/* LATCH is used in the interval timer and ftape setup. */
+#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
+
+/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
+ * improve accuracy by shifting LSH bits, hence calculating:
+ * (NOM << LSH) / DEN
+ * This however means trouble for large NOM, because (NOM << LSH) may no
+ * longer fit in 32 bits. The following way of calculating this gives us
+ * some slack, under the following conditions:
+ * - (NOM / DEN) fits in (32 - LSH) bits.
+ * - (NOM % DEN) fits in (32 - LSH) bits.
+ */
+#define SH_DIV(NOM,DEN,LSH) ( ((NOM / DEN) << LSH) \
+ + (((NOM % DEN) << LSH) + DEN / 2) / DEN)
+
+/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
+#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
+
+/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
+#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
+
+/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+
+/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
+/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
+#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
/*
* The 64-bit value is not volatile - you MUST NOT read it
@@ -50,4 +112,320 @@ static inline u64 get_jiffies_64(void)
((long)(a) - (long)(b) >= 0))
#define time_before_eq(a,b) time_after_eq(b,a)
+/*
+ * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * so jiffies wrap bugs show up earlier.
+ */
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+
+/*
+ * Change timeval to jiffies, trying to avoid the
+ * most obvious overflows..
+ *
+ * And some not so obvious.
+ *
+ * Note that we don't want to return MAX_LONG, because
+ * for various timeout reasons we often end up having
+ * to wait "jiffies+1" in order to guarantee that we wait
+ * at _least_ "jiffies" - so "jiffies+1" had better still
+ * be positive.
+ */
+#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
+
+/*
+ * We want to do realistic conversions of time so we need to use the same
+ * values the update wall clock code uses as the jiffies size. This value
+ * is: TICK_NSEC (which is defined in timex.h). This
+ * is a constant and is in nanoseconds. We will used scaled math
+ * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
+ * NSEC_JIFFIE_SC. Note that these defines contain nothing but
+ * constants and so are computed at compile time. SHIFT_HZ (computed in
+ * timex.h) adjusts the scaling for different HZ values.
+
+ * Scaled math??? What is that?
+ *
+ * Scaled math is a way to do integer math on values that would,
+ * otherwise, either overflow, underflow, or cause undesired div
+ * instructions to appear in the execution path. In short, we "scale"
+ * up the operands so they take more bits (more precision, less
+ * underflow), do the desired operation and then "scale" the result back
+ * by the same amount. If we do the scaling by shifting we avoid the
+ * costly mpy and the dastardly div instructions.
+
+ * Suppose, for example, we want to convert from seconds to jiffies
+ * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The
+ * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
+ * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
+ * might calculate at compile time, however, the result will only have
+ * about 3-4 bits of precision (less for smaller values of HZ).
+ *
+ * So, we scale as follows:
+ * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
+ * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
+ * Then we make SCALE a power of two so:
+ * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
+ * Now we define:
+ * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
+ * jiff = (sec * SEC_CONV) >> SCALE;
+ *
+ * Often the math we use will expand beyond 32-bits so we tell C how to
+ * do this and pass the 64-bit result of the mpy through the ">> SCALE"
+ * which should take the result back to 32-bits. We want this expansion
+ * to capture as much precision as possible. At the same time we don't
+ * want to overflow so we pick the SCALE to avoid this. In this file,
+ * that means using a different scale for each range of HZ values (as
+ * defined in timex.h).
+ *
+ * For those who want to know, gcc will give a 64-bit result from a "*"
+ * operator if the result is a long long AND at least one of the
+ * operands is cast to long long (usually just prior to the "*" so as
+ * not to confuse it into thinking it really has a 64-bit operand,
+ * which, buy the way, it can do, but it take more code and at least 2
+ * mpys).
+
+ * We also need to be aware that one second in nanoseconds is only a
+ * couple of bits away from overflowing a 32-bit word, so we MUST use
+ * 64-bits to get the full range time in nanoseconds.
+
+ */
+
+/*
+ * Here are the scales we will use. One for seconds, nanoseconds and
+ * microseconds.
+ *
+ * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
+ * check if the sign bit is set. If not, we bump the shift count by 1.
+ * (Gets an extra bit of precision where we can use it.)
+ * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
+ * Haven't tested others.
+
+ * Limits of cpp (for #if expressions) only long (no long long), but
+ * then we only need the most signicant bit.
+ */
+
+#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
+#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
+#undef SEC_JIFFIE_SC
+#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
+#endif
+#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
+#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
+#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+
+#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+#define USEC_CONVERSION \
+ ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+/*
+ * USEC_ROUND is used in the timeval to jiffie conversion. See there
+ * for more details. It is the scaled resolution rounding value. Note
+ * that it is a 64-bit value. Since, when it is applied, we are already
+ * in jiffies (albit scaled), it is nothing but the bits we will shift
+ * off.
+ */
+#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
+/*
+ * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
+ * into seconds. The 64-bit case will overflow if we are not careful,
+ * so use the messy SH_DIV macro to do it. Still all constants.
+ */
+#if BITS_PER_LONG < 64
+# define MAX_SEC_IN_JIFFIES \
+ (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
+#else /* take care of overflow on 64 bits machines */
+# define MAX_SEC_IN_JIFFIES \
+ (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
+
+#endif
+
+/*
+ * Convert jiffies to milliseconds and back.
+ *
+ * Avoid unnecessary multiplications/divisions in the
+ * two most common HZ cases:
+ */
+static inline unsigned int jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= 1000 && !(1000 % HZ)
+ return (1000 / HZ) * j;
+#elif HZ > 1000 && !(HZ % 1000)
+ return (j + (HZ / 1000) - 1)/(HZ / 1000);
+#else
+ return (j * 1000) / HZ;
+#endif
+}
+
+static inline unsigned int jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= 1000 && !(1000 % HZ)
+ return (1000000 / HZ) * j;
+#elif HZ > 1000 && !(HZ % 1000)
+ return (j*1000 + (HZ - 1000))/(HZ / 1000);
+#else
+ return (j * 1000000) / HZ;
+#endif
+}
+
+static inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+ if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= 1000 && !(1000 % HZ)
+ return (m + (1000 / HZ) - 1) / (1000 / HZ);
+#elif HZ > 1000 && !(HZ % 1000)
+ return m * (HZ / 1000);
+#else
+ return (m * HZ + 999) / 1000;
+#endif
+}
+
+/*
+ * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
+ * that a remainder subtract here would not do the right thing as the
+ * resolution values don't fall on second boundries. I.e. the line:
+ * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
+ *
+ * Rather, we just shift the bits off the right.
+ *
+ * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
+ * value to a scaled second value.
+ */
+static __inline__ unsigned long
+timespec_to_jiffies(const struct timespec *value)
+{
+ unsigned long sec = value->tv_sec;
+ long nsec = value->tv_nsec + TICK_NSEC - 1;
+
+ if (sec >= MAX_SEC_IN_JIFFIES){
+ sec = MAX_SEC_IN_JIFFIES;
+ nsec = 0;
+ }
+ return (((u64)sec * SEC_CONVERSION) +
+ (((u64)nsec * NSEC_CONVERSION) >>
+ (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+
+}
+
+static __inline__ void
+jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
+{
+ /*
+ * Convert jiffies to nanoseconds and separate with
+ * one divide.
+ */
+ u64 nsec = (u64)jiffies * TICK_NSEC;
+ value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
+}
+
+/* Same for "timeval"
+ *
+ * Well, almost. The problem here is that the real system resolution is
+ * in nanoseconds and the value being converted is in micro seconds.
+ * Also for some machines (those that use HZ = 1024, in-particular),
+ * there is a LARGE error in the tick size in microseconds.
+
+ * The solution we use is to do the rounding AFTER we convert the
+ * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
+ * Instruction wise, this should cost only an additional add with carry
+ * instruction above the way it was done above.
+ */
+static __inline__ unsigned long
+timeval_to_jiffies(const struct timeval *value)
+{
+ unsigned long sec = value->tv_sec;
+ long usec = value->tv_usec;
+
+ if (sec >= MAX_SEC_IN_JIFFIES){
+ sec = MAX_SEC_IN_JIFFIES;
+ usec = 0;
+ }
+ return (((u64)sec * SEC_CONVERSION) +
+ (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
+ (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
+}
+
+static __inline__ void
+jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
+{
+ /*
+ * Convert jiffies to nanoseconds and separate with
+ * one divide.
+ */
+ u64 nsec = (u64)jiffies * TICK_NSEC;
+ value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
+ value->tv_usec /= NSEC_PER_USEC;
+}
+
+/*
+ * Convert jiffies/jiffies_64 to clock_t and back.
+ */
+static inline clock_t jiffies_to_clock_t(long x)
+{
+#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+ return x / (HZ / USER_HZ);
+#else
+ u64 tmp = (u64)x * TICK_NSEC;
+ do_div(tmp, (NSEC_PER_SEC / USER_HZ));
+ return (long)tmp;
+#endif
+}
+
+static inline unsigned long clock_t_to_jiffies(unsigned long x)
+{
+#if (HZ % USER_HZ)==0
+ if (x >= ~0UL / (HZ / USER_HZ))
+ return ~0UL;
+ return x * (HZ / USER_HZ);
+#else
+ u64 jif;
+
+ /* Don't worry about loss of precision here .. */
+ if (x >= ~0UL / HZ * USER_HZ)
+ return ~0UL;
+
+ /* .. but do try to contain it here */
+ jif = x * (u64) HZ;
+ do_div(jif, USER_HZ);
+ return jif;
+#endif
+}
+
+static inline u64 jiffies_64_to_clock_t(u64 x)
+{
+#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+ do_div(x, HZ / USER_HZ);
+#else
+ /*
+ * There are better ways that don't overflow early,
+ * but even this doesn't overflow in hundreds of years
+ * in 64 bits, so..
+ */
+ x *= TICK_NSEC;
+ do_div(x, (NSEC_PER_SEC / USER_HZ));
+#endif
+ return x;
+}
+
+static inline u64 nsec_to_clock_t(u64 x)
+{
+#if (NSEC_PER_SEC % USER_HZ) == 0
+ do_div(x, (NSEC_PER_SEC / USER_HZ));
+#elif (USER_HZ % 512) == 0
+ x *= USER_HZ/512;
+ do_div(x, (NSEC_PER_SEC / 512));
+#else
+ /*
+ * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
+ * overflow after 64.99 years.
+ * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
+ */
+ x *= 9;
+ do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
+ / USER_HZ));
+#endif
+ return x;
+}
+
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 40307a28bb4b..cf2f984e1b95 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -136,6 +136,7 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in
extern int panic_on_oops;
extern int tainted;
extern const char *print_tainted(void);
+extern void add_taint(unsigned);
/* Values used for system_state */
extern enum system_states {
@@ -150,6 +151,8 @@ extern enum system_states {
#define TAINT_FORCED_MODULE (1<<1)
#define TAINT_UNSAFE_SMP (1<<2)
#define TAINT_FORCED_RMMOD (1<<3)
+#define TAINT_MACHINE_CHECK (1<<4)
+#define TAINT_BAD_PAGE (1<<5)
extern void dump_stack(void);
diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h
new file mode 100644
index 000000000000..60cc7b762e78
--- /dev/null
+++ b/include/linux/key-ui.h
@@ -0,0 +1,97 @@
+/* key-ui.h: key userspace interface stuff for use by keyfs
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_KEY_UI_H
+#define _LINUX_KEY_UI_H
+
+#include <linux/key.h>
+
+/* the key tree */
+extern struct rb_root key_serial_tree;
+extern spinlock_t key_serial_lock;
+
+/* required permissions */
+#define KEY_VIEW 0x01 /* require permission to view attributes */
+#define KEY_READ 0x02 /* require permission to read content */
+#define KEY_WRITE 0x04 /* require permission to update / modify */
+#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */
+#define KEY_LINK 0x10 /* require permission to link */
+#define KEY_ALL 0x1f /* all the above permissions */
+
+/*
+ * the keyring payload contains a list of the keys to which the keyring is
+ * subscribed
+ */
+struct keyring_list {
+ unsigned maxkeys; /* max keys this list can hold */
+ unsigned nkeys; /* number of keys currently held */
+ struct key *keys[0];
+};
+
+
+/*
+ * check to see whether permission is granted to use a key in the desired way
+ */
+static inline int key_permission(const struct key *key, key_perm_t perm)
+{
+ key_perm_t kperm;
+
+ if (key->uid == current->fsuid)
+ kperm = key->perm >> 16;
+ else if (key->gid != -1 &&
+ key->perm & KEY_GRP_ALL &&
+ in_group_p(key->gid)
+ )
+ kperm = key->perm >> 8;
+ else
+ kperm = key->perm;
+
+ kperm = kperm & perm & KEY_ALL;
+
+ return kperm == perm;
+}
+
+/*
+ * check to see whether permission is granted to use a key in at least one of
+ * the desired ways
+ */
+static inline int key_any_permission(const struct key *key, key_perm_t perm)
+{
+ key_perm_t kperm;
+
+ if (key->uid == current->fsuid)
+ kperm = key->perm >> 16;
+ else if (key->gid != -1 &&
+ key->perm & KEY_GRP_ALL &&
+ in_group_p(key->gid)
+ )
+ kperm = key->perm >> 8;
+ else
+ kperm = key->perm;
+
+ kperm = kperm & perm & KEY_ALL;
+
+ return kperm != 0;
+}
+
+
+extern struct key *lookup_user_key(key_serial_t id, int create, int part,
+ key_perm_t perm);
+
+extern long join_session_keyring(const char *name);
+
+extern struct key_type *key_type_lookup(const char *type);
+extern void key_type_put(struct key_type *ktype);
+
+#define key_negative_timeout 60 /* default timeout on a negative key's existence */
+
+
+#endif /* _LINUX_KEY_UI_H */
diff --git a/include/linux/key.h b/include/linux/key.h
new file mode 100644
index 000000000000..e914be777c4a
--- /dev/null
+++ b/include/linux/key.h
@@ -0,0 +1,284 @@
+/* key.h: authentication token and access key management
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * See Documentation/keys.txt for information on keys/keyrings.
+ */
+
+#ifndef _LINUX_KEY_H
+#define _LINUX_KEY_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+#ifdef __KERNEL__
+
+/* key handle serial number */
+typedef int32_t key_serial_t;
+
+/* key handle permissions mask */
+typedef uint32_t key_perm_t;
+
+struct key;
+
+#ifdef CONFIG_KEYS
+
+#undef KEY_DEBUGGING
+
+#define KEY_USR_VIEW 0x00010000 /* user can view a key's attributes */
+#define KEY_USR_READ 0x00020000 /* user can read key payload / view keyring */
+#define KEY_USR_WRITE 0x00040000 /* user can update key payload / add link to keyring */
+#define KEY_USR_SEARCH 0x00080000 /* user can find a key in search / search a keyring */
+#define KEY_USR_LINK 0x00100000 /* user can create a link to a key/keyring */
+#define KEY_USR_ALL 0x001f0000
+
+#define KEY_GRP_VIEW 0x00000100 /* group permissions... */
+#define KEY_GRP_READ 0x00000200
+#define KEY_GRP_WRITE 0x00000400
+#define KEY_GRP_SEARCH 0x00000800
+#define KEY_GRP_LINK 0x00001000
+#define KEY_GRP_ALL 0x00001f00
+
+#define KEY_OTH_VIEW 0x00000001 /* third party permissions... */
+#define KEY_OTH_READ 0x00000002
+#define KEY_OTH_WRITE 0x00000004
+#define KEY_OTH_SEARCH 0x00000008
+#define KEY_OTH_LINK 0x00000010
+#define KEY_OTH_ALL 0x0000001f
+
+struct seq_file;
+struct user_struct;
+
+struct key_type;
+struct key_owner;
+struct keyring_list;
+struct keyring_name;
+
+/*****************************************************************************/
+/*
+ * authentication token / access credential / keyring
+ * - types of key include:
+ * - keyrings
+ * - disk encryption IDs
+ * - Kerberos TGTs and tickets
+ */
+struct key {
+ atomic_t usage; /* number of references */
+ key_serial_t serial; /* key serial number */
+ struct rb_node serial_node;
+ struct key_type *type; /* type of key */
+ rwlock_t lock; /* examination vs change lock */
+ struct rw_semaphore sem; /* change vs change sem */
+ struct key_user *user; /* owner of this key */
+ time_t expiry; /* time at which key expires (or 0) */
+ uid_t uid;
+ gid_t gid;
+ key_perm_t perm; /* access permissions */
+ unsigned short quotalen; /* length added to quota */
+ unsigned short datalen; /* payload data length */
+ unsigned short flags; /* status flags (change with lock writelocked) */
+#define KEY_FLAG_INSTANTIATED 0x00000001 /* set if key has been instantiated */
+#define KEY_FLAG_DEAD 0x00000002 /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED 0x00000004 /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA 0x00000008 /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT 0x00000010 /* set if key is being constructed in userspace */
+#define KEY_FLAG_NEGATIVE 0x00000020 /* set if key is negative */
+
+#ifdef KEY_DEBUGGING
+ unsigned magic;
+#define KEY_DEBUG_MAGIC 0x18273645u
+#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu
+#endif
+
+ /* the description string
+ * - this is used to match a key against search criteria
+ * - this should be a printable string
+ * - eg: for krb5 AFS, this might be "afs@REDHAT.COM"
+ */
+ char *description;
+
+ /* type specific data
+ * - this is used by the keyring type to index the name
+ */
+ union {
+ struct list_head link;
+ } type_data;
+
+ /* key data
+ * - this is used to hold the data actually used in cryptography or
+ * whatever
+ */
+ union {
+ unsigned long value;
+ void *data;
+ struct keyring_list *subscriptions;
+ } payload;
+};
+
+/*****************************************************************************/
+/*
+ * kernel managed key type definition
+ */
+struct key_type {
+ /* name of the type */
+ const char *name;
+
+ /* default payload length for quota precalculation (optional)
+ * - this can be used instead of calling key_payload_reserve(), that
+ * function only needs to be called if the real datalen is different
+ */
+ size_t def_datalen;
+
+ /* instantiate a key of this type
+ * - this method should call key_payload_reserve() to determine if the
+ * user's quota will hold the payload
+ */
+ int (*instantiate)(struct key *key, const void *data, size_t datalen);
+
+ /* duplicate a key of this type (optional)
+ * - the source key will be locked against change
+ * - the new description will be attached
+ * - the quota will have been adjusted automatically from
+ * source->quotalen
+ */
+ int (*duplicate)(struct key *key, const struct key *source);
+
+ /* update a key of this type (optional)
+ * - this method should call key_payload_reserve() to recalculate the
+ * quota consumption
+ * - the key must be locked against read when modifying
+ */
+ int (*update)(struct key *key, const void *data, size_t datalen);
+
+ /* match a key against a description */
+ int (*match)(const struct key *key, const void *desc);
+
+ /* clear the data from a key (optional) */
+ void (*destroy)(struct key *key);
+
+ /* describe a key */
+ void (*describe)(const struct key *key, struct seq_file *p);
+
+ /* read a key's data (optional)
+ * - permission checks will be done by the caller
+ * - the key's semaphore will be readlocked by the caller
+ * - should return the amount of data that could be read, no matter how
+ * much is copied into the buffer
+ * - shouldn't do the copy if the buffer is NULL
+ */
+ long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+
+ /* internal fields */
+ struct list_head link; /* link in types list */
+};
+
+extern struct key_type key_type_keyring;
+
+extern int register_key_type(struct key_type *ktype);
+extern void unregister_key_type(struct key_type *ktype);
+
+extern struct key *key_alloc(struct key_type *type,
+ const char *desc,
+ uid_t uid, gid_t gid, key_perm_t perm,
+ int not_in_quota);
+extern int key_payload_reserve(struct key *key, size_t datalen);
+extern int key_instantiate_and_link(struct key *key,
+ const void *data,
+ size_t datalen,
+ struct key *keyring);
+extern int key_negate_and_link(struct key *key,
+ unsigned timeout,
+ struct key *keyring);
+extern void key_revoke(struct key *key);
+extern void key_put(struct key *key);
+
+static inline struct key *key_get(struct key *key)
+{
+ if (key)
+ atomic_inc(&key->usage);
+ return key;
+}
+
+extern struct key *request_key(struct key_type *type,
+ const char *description,
+ const char *callout_info);
+
+extern int key_validate(struct key *key);
+
+extern struct key *key_create_or_update(struct key *keyring,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ int not_in_quota);
+
+extern int key_update(struct key *key,
+ const void *payload,
+ size_t plen);
+
+extern int key_link(struct key *keyring,
+ struct key *key);
+
+extern int key_unlink(struct key *keyring,
+ struct key *key);
+
+extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+ int not_in_quota, struct key *dest);
+
+extern int keyring_clear(struct key *keyring);
+
+extern struct key *keyring_search(struct key *keyring,
+ struct key_type *type,
+ const char *description);
+
+extern struct key *search_process_keyrings(struct key_type *type,
+ const char *description);
+
+extern int keyring_add_key(struct key *keyring,
+ struct key *key);
+
+extern struct key *key_lookup(key_serial_t id);
+
+#define key_serial(key) ((key) ? (key)->serial : 0)
+
+/*
+ * the userspace interface
+ */
+extern struct key root_user_keyring, root_session_keyring;
+extern int alloc_uid_keyring(struct user_struct *user);
+extern void switch_uid_keyring(struct user_struct *new_user);
+extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk);
+extern void exit_keys(struct task_struct *tsk);
+extern int suid_keys(struct task_struct *tsk);
+extern int exec_keys(struct task_struct *tsk);
+extern void key_fsuid_changed(struct task_struct *tsk);
+extern void key_fsgid_changed(struct task_struct *tsk);
+
+#else /* CONFIG_KEYS */
+
+#define key_validate(k) 0
+#define key_serial(k) 0
+#define key_get(k) NULL
+#define key_put(k) do { } while(0)
+#define alloc_uid_keyring(u) 0
+#define switch_uid_keyring(u) do { } while(0)
+#define copy_keys(f,t) 0
+#define exit_keys(t) do { } while(0)
+#define suid_keys(t) do { } while(0)
+#define exec_keys(t) do { } while(0)
+#define key_fsuid_changed(t) do { } while(0)
+#define key_fsgid_changed(t) do { } while(0)
+
+#endif /* CONFIG_KEYS */
+#endif /* __KERNEL__ */
+#endif /* _LINUX_KEY_H */
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
new file mode 100644
index 000000000000..381dedc370a3
--- /dev/null
+++ b/include/linux/keyctl.h
@@ -0,0 +1,39 @@
+/* keyctl.h: keyctl command IDs
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_KEYCTL_H
+#define _LINUX_KEYCTL_H
+
+/* special process keyring shortcut IDs */
+#define KEY_SPEC_THREAD_KEYRING -1 /* - key ID for thread-specific keyring */
+#define KEY_SPEC_PROCESS_KEYRING -2 /* - key ID for process-specific keyring */
+#define KEY_SPEC_SESSION_KEYRING -3 /* - key ID for session-specific keyring */
+#define KEY_SPEC_USER_KEYRING -4 /* - key ID for UID-specific keyring */
+#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */
+#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */
+
+/* keyctl commands */
+#define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */
+#define KEYCTL_JOIN_SESSION_KEYRING 1 /* join or start named session keyring */
+#define KEYCTL_UPDATE 2 /* update a key */
+#define KEYCTL_REVOKE 3 /* revoke a key */
+#define KEYCTL_CHOWN 4 /* set ownership of a key */
+#define KEYCTL_SETPERM 5 /* set perms on a key */
+#define KEYCTL_DESCRIBE 6 /* describe a key */
+#define KEYCTL_CLEAR 7 /* clear contents of a keyring */
+#define KEYCTL_LINK 8 /* link a key into a keyring */
+#define KEYCTL_UNLINK 9 /* unlink a key from a keyring */
+#define KEYCTL_SEARCH 10 /* search for a key in a keyring */
+#define KEYCTL_READ 11 /* read a key or keyring's contents */
+#define KEYCTL_INSTANTIATE 12 /* instantiate a partially constructed key */
+#define KEYCTL_NEGATE 13 /* negate a partially constructed key */
+
+#endif /* _LINUX_KEYCTL_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
new file mode 100644
index 000000000000..0e6e972a9f70
--- /dev/null
+++ b/include/linux/kfifo.h
@@ -0,0 +1,157 @@
+/*
+ * A simple kernel FIFO implementation.
+ *
+ * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#ifndef _LINUX_KFIFO_H
+#define _LINUX_KFIFO_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+
+struct kfifo {
+ unsigned char *buffer; /* the buffer holding the data */
+ unsigned int size; /* the size of the allocated buffer */
+ unsigned int in; /* data is added at offset (in % size) */
+ unsigned int out; /* data is extracted from off. (out % size) */
+ spinlock_t *lock; /* protects concurrent modifications */
+};
+
+extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
+ int gfp_mask, spinlock_t *lock);
+extern struct kfifo *kfifo_alloc(unsigned int size, int gfp_mask,
+ spinlock_t *lock);
+extern void kfifo_free(struct kfifo *fifo);
+extern unsigned int __kfifo_put(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len);
+extern unsigned int __kfifo_get(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len);
+
+/*
+ * __kfifo_reset - removes the entire FIFO contents, no locking version
+ * @fifo: the fifo to be emptied.
+ */
+static inline void __kfifo_reset(struct kfifo *fifo)
+{
+ fifo->in = fifo->out = 0;
+}
+
+/*
+ * kfifo_reset - removes the entire FIFO contents
+ * @fifo: the fifo to be emptied.
+ */
+static inline void kfifo_reset(struct kfifo *fifo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(fifo->lock, flags);
+
+ __kfifo_reset(fifo);
+
+ spin_unlock_irqrestore(fifo->lock, flags);
+}
+
+/*
+ * kfifo_put - puts some data into the FIFO
+ * @fifo: the fifo to be used.
+ * @buffer: the data to be added.
+ * @len: the length of the data to be added.
+ *
+ * This function copies at most 'len' bytes from the 'buffer' into
+ * the FIFO depending on the free space, and returns the number of
+ * bytes copied.
+ */
+static inline unsigned int kfifo_put(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(fifo->lock, flags);
+
+ ret = __kfifo_put(fifo, buffer, len);
+
+ spin_unlock_irqrestore(fifo->lock, flags);
+
+ return ret;
+}
+
+/*
+ * kfifo_get - gets some data from the FIFO
+ * @fifo: the fifo to be used.
+ * @buffer: where the data must be copied.
+ * @len: the size of the destination buffer.
+ *
+ * This function copies at most 'len' bytes from the FIFO into the
+ * 'buffer' and returns the number of copied bytes.
+ */
+static inline unsigned int kfifo_get(struct kfifo *fifo,
+ unsigned char *buffer, unsigned int len)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(fifo->lock, flags);
+
+ ret = __kfifo_get(fifo, buffer, len);
+
+ /*
+ * optimization: if the FIFO is empty, set the indices to 0
+ * so we don't wrap the next time
+ */
+ if (fifo->in == fifo->out)
+ fifo->in = fifo->out = 0;
+
+ spin_unlock_irqrestore(fifo->lock, flags);
+
+ return ret;
+}
+
+/*
+ * __kfifo_len - returns the number of bytes available in the FIFO, no locking version
+ * @fifo: the fifo to be used.
+ */
+static inline unsigned int __kfifo_len(struct kfifo *fifo)
+{
+ return fifo->in - fifo->out;
+}
+
+/*
+ * kfifo_len - returns the number of bytes available in the FIFO
+ * @fifo: the fifo to be used.
+ */
+static inline unsigned int kfifo_len(struct kfifo *fifo)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(fifo->lock, flags);
+
+ ret = __kfifo_len(fifo);
+
+ spin_unlock_irqrestore(fifo->lock, flags);
+
+ return ret;
+}
+
+#else
+#warning "don't include kernel headers in userspace"
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 7738749e1285..15a806ad61ee 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -56,9 +56,7 @@ int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
sector_t, unsigned int[]);
void mb_cache_entry_rehash(struct mb_cache_entry *, unsigned int[]);
void mb_cache_entry_release(struct mb_cache_entry *);
-void mb_cache_entry_takeout(struct mb_cache_entry *);
void mb_cache_entry_free(struct mb_cache_entry *);
-struct mb_cache_entry *mb_cache_entry_dup(struct mb_cache_entry *);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *,
struct block_device *,
sector_t);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7c36a10f6720..b812151cdd07 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -410,35 +410,6 @@ extern struct pglist_data contig_page_data;
#error ZONES_SHIFT > MAX_ZONES_SHIFT
#endif
-extern DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
-
-#if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_NUMA)
-
-#define node_online(node) test_bit(node, node_online_map)
-#define node_set_online(node) set_bit(node, node_online_map)
-#define node_set_offline(node) clear_bit(node, node_online_map)
-static inline unsigned int num_online_nodes(void)
-{
- int i, num = 0;
-
- for(i = 0; i < MAX_NUMNODES; i++){
- if (node_online(i))
- num++;
- }
- return num;
-}
-
-#else /* !CONFIG_DISCONTIGMEM && !CONFIG_NUMA */
-
-#define node_online(node) \
- ({ BUG_ON((node) != 0); test_bit(node, node_online_map); })
-#define node_set_online(node) \
- ({ BUG_ON((node) != 0); set_bit(node, node_online_map); })
-#define node_set_offline(node) \
- ({ BUG_ON((node) != 0); clear_bit(node, node_online_map); })
-#define num_online_nodes() 1
-
-#endif /* CONFIG_DISCONTIGMEM || CONFIG_NUMA */
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index fbf2dfc90015..bebd8bc1fd20 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -550,40 +550,20 @@ struct obsolete_modparm {
#define MODULE_PARM(var,type) \
struct obsolete_modparm __parm_##var __attribute__((section("__obsparm"))) = \
{ __stringify(var), type };
-
-static inline void __deprecated MOD_INC_USE_COUNT(struct module *module)
-{
- __unsafe(module);
-
-#if defined(CONFIG_MODULE_UNLOAD) && defined(MODULE)
- local_inc(&module->ref[get_cpu()].count);
- put_cpu();
-#else
- (void)try_module_get(module);
-#endif
-}
-
-static inline void __deprecated MOD_DEC_USE_COUNT(struct module *module)
-{
- module_put(module);
-}
-
-#define MOD_INC_USE_COUNT MOD_INC_USE_COUNT(THIS_MODULE)
-#define MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT(THIS_MODULE)
#else
#define MODULE_PARM(var,type)
-#define MOD_INC_USE_COUNT do { } while (0)
-#define MOD_DEC_USE_COUNT do { } while (0)
#endif
#define __MODULE_STRING(x) __stringify(x)
/* Use symbol_get and symbol_put instead. You'll thank me. */
#define HAVE_INTER_MODULE
-extern void inter_module_register(const char *, struct module *, const void *);
-extern void inter_module_unregister(const char *);
-extern const void *inter_module_get(const char *);
-extern const void *inter_module_get_request(const char *, const char *);
-extern void inter_module_put(const char *);
+extern void __deprecated inter_module_register(const char *,
+ struct module *, const void *);
+extern void __deprecated inter_module_unregister(const char *);
+extern const void * __deprecated inter_module_get(const char *);
+extern const void * __deprecated inter_module_get_request(const char *,
+ const char *);
+extern void __deprecated inter_module_put(const char *);
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
new file mode 100644
index 000000000000..4de843d94147
--- /dev/null
+++ b/include/linux/nodemask.h
@@ -0,0 +1,326 @@
+#ifndef __LINUX_NODEMASK_H
+#define __LINUX_NODEMASK_H
+
+/*
+ * Nodemasks provide a bitmap suitable for representing the
+ * set of Node's in a system, one bit position per Node number.
+ *
+ * See detailed comments in the file linux/bitmap.h describing the
+ * data type on which these nodemasks are based.
+ *
+ * For details of nodemask_scnprintf() and nodemask_parse(),
+ * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ *
+ * The available nodemask operations are:
+ *
+ * void node_set(node, mask) turn on bit 'node' in mask
+ * void node_clear(node, mask) turn off bit 'node' in mask
+ * void nodes_setall(mask) set all bits
+ * void nodes_clear(mask) clear all bits
+ * int node_isset(node, mask) true iff bit 'node' set in mask
+ * int node_test_and_set(node, mask) test and set bit 'node' in mask
+ *
+ * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection]
+ * void nodes_or(dst, src1, src2) dst = src1 | src2 [union]
+ * void nodes_xor(dst, src1, src2) dst = src1 ^ src2
+ * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2
+ * void nodes_complement(dst, src) dst = ~src
+ *
+ * int nodes_equal(mask1, mask2) Does mask1 == mask2?
+ * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect?
+ * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2?
+ * int nodes_empty(mask) Is mask empty (no bits sets)?
+ * int nodes_full(mask) Is mask full (all bits sets)?
+ * int nodes_weight(mask) Hamming weight - number of set bits
+ *
+ * void nodes_shift_right(dst, src, n) Shift right
+ * void nodes_shift_left(dst, src, n) Shift left
+ *
+ * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
+ * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ *
+ * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
+ * NODE_MASK_ALL Initializer - all bits set
+ * NODE_MASK_NONE Initializer - no bits set
+ * unsigned long *nodes_addr(mask) Array of unsigned long's in mask
+ *
+ * int nodemask_scnprintf(buf, len, mask) Format nodemask for printing
+ * int nodemask_parse(ubuf, ulen, mask) Parse ascii string as nodemask
+ *
+ * for_each_node_mask(node, mask) for-loop node over mask
+ *
+ * int num_online_nodes() Number of online Nodes
+ * int num_possible_nodes() Number of all possible Nodes
+ *
+ * int node_online(node) Is some node online?
+ * int node_possible(node) Is some node possible?
+ *
+ * int any_online_node(mask) First online node in mask
+ *
+ * node_set_online(node) set bit 'node' in node_online_map
+ * node_set_offline(node) clear bit 'node' in node_online_map
+ *
+ * for_each_node(node) for-loop node over node_possible_map
+ * for_each_online_node(node) for-loop node over node_online_map
+ *
+ * Subtlety:
+ * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway)
+ * to generate slightly worse code. So use a simple one-line #define
+ * for node_isset(), instead of wrapping an inline inside a macro, the
+ * way we do the other calls.
+ */
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/bitmap.h>
+#include <linux/numa.h>
+#include <asm/bug.h>
+
+typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
+extern nodemask_t _unused_nodemask_arg_;
+
+#define node_set(node, dst) __node_set((node), &(dst))
+static inline void __node_set(int node, volatile nodemask_t *dstp)
+{
+ set_bit(node, dstp->bits);
+}
+
+#define node_clear(node, dst) __node_clear((node), &(dst))
+static inline void __node_clear(int node, volatile nodemask_t *dstp)
+{
+ clear_bit(node, dstp->bits);
+}
+
+#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
+static inline void __nodes_setall(nodemask_t *dstp, int nbits)
+{
+ bitmap_fill(dstp->bits, nbits);
+}
+
+#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
+static inline void __nodes_clear(nodemask_t *dstp, int nbits)
+{
+ bitmap_zero(dstp->bits, nbits);
+}
+
+/* No static inline type checking - see Subtlety (1) above. */
+#define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
+
+#define node_test_and_set(node, nodemask) \
+ __node_test_and_set((node), &(nodemask))
+static inline int __node_test_and_set(int node, nodemask_t *addr)
+{
+ return test_and_set_bit(node, addr->bits);
+}
+
+#define nodes_and(dst, src1, src2) \
+ __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_or(dst, src1, src2) \
+ __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_xor(dst, src1, src2) \
+ __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_andnot(dst, src1, src2) \
+ __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_complement(dst, src) \
+ __nodes_complement(&(dst), &(src), MAX_NUMNODES)
+static inline void __nodes_complement(nodemask_t *dstp,
+ const nodemask_t *srcp, int nbits)
+{
+ bitmap_complement(dstp->bits, srcp->bits, nbits);
+}
+
+#define nodes_equal(src1, src2) \
+ __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
+static inline int __nodes_equal(const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_intersects(src1, src2) \
+ __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
+static inline int __nodes_intersects(const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_subset(src1, src2) \
+ __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
+static inline int __nodes_subset(const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
+static inline int __nodes_empty(const nodemask_t *srcp, int nbits)
+{
+ return bitmap_empty(srcp->bits, nbits);
+}
+
+#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
+static inline int __nodes_full(const nodemask_t *srcp, int nbits)
+{
+ return bitmap_full(srcp->bits, nbits);
+}
+
+#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
+static inline int __nodes_weight(const nodemask_t *srcp, int nbits)
+{
+ return bitmap_weight(srcp->bits, nbits);
+}
+
+#define nodes_shift_right(dst, src, n) \
+ __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
+static inline void __nodes_shift_right(nodemask_t *dstp,
+ const nodemask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define nodes_shift_left(dst, src, n) \
+ __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
+static inline void __nodes_shift_left(nodemask_t *dstp,
+ const nodemask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define first_node(src) __first_node(&(src), MAX_NUMNODES)
+static inline int __first_node(const nodemask_t *srcp, int nbits)
+{
+ return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+}
+
+#define next_node(n, src) __next_node((n), &(src), MAX_NUMNODES)
+static inline int __next_node(int n, const nodemask_t *srcp, int nbits)
+{
+ return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+}
+
+#define nodemask_of_node(node) \
+({ \
+ typeof(_unused_nodemask_arg_) m; \
+ if (sizeof(m) == sizeof(unsigned long)) { \
+ m.bits[0] = 1UL<<(node); \
+ } else { \
+ nodes_clear(m); \
+ node_set((node), m); \
+ } \
+ m; \
+})
+
+#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
+
+#if MAX_NUMNODES <= BITS_PER_LONG
+
+#define NODE_MASK_ALL \
+((nodemask_t) { { \
+ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
+} })
+
+#else
+
+#define NODE_MASK_ALL \
+((nodemask_t) { { \
+ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \
+ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
+} })
+
+#endif
+
+#define NODE_MASK_NONE \
+((nodemask_t) { { \
+ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \
+} })
+
+#define nodes_addr(src) ((src).bits)
+
+#define nodemask_scnprintf(buf, len, src) \
+ __nodemask_scnprintf((buf), (len), &(src), MAX_NUMNODES)
+static inline int __nodemask_scnprintf(char *buf, int len,
+ const nodemask_t *srcp, int nbits)
+{
+ return bitmap_scnprintf(buf, len, srcp->bits, nbits);
+}
+
+#define nodemask_parse(ubuf, ulen, src) \
+ __nodemask_parse((ubuf), (ulen), &(src), MAX_NUMNODES)
+static inline int __nodemask_parse(const char __user *buf, int len,
+ nodemask_t *dstp, int nbits)
+{
+ return bitmap_parse(buf, len, dstp->bits, nbits);
+}
+
+#if MAX_NUMNODES > 1
+#define for_each_node_mask(node, mask) \
+ for ((node) = first_node(mask); \
+ (node) < MAX_NUMNODES; \
+ (node) = next_node((node), (mask)))
+#else /* MAX_NUMNODES == 1 */
+#define for_each_node_mask(node, mask) \
+ if (!nodes_empty(mask)) \
+ for ((node) = 0; (node) < 1; (node)++)
+#endif /* MAX_NUMNODES */
+
+/*
+ * The following particular system nodemasks and operations
+ * on them manage all possible and online nodes.
+ */
+
+extern nodemask_t node_online_map;
+extern nodemask_t node_possible_map;
+
+#if MAX_NUMNODES > 1
+#define num_online_nodes() nodes_weight(node_online_map)
+#define num_possible_nodes() nodes_weight(node_possible_map)
+#define node_online(node) node_isset((node), node_online_map)
+#define node_possible(node) node_isset((node), node_possible_map)
+#else
+#define num_online_nodes() 1
+#define num_possible_nodes() 1
+#define node_online(node) ((node) == 0)
+#define node_possible(node) ((node) == 0)
+#endif
+
+#define any_online_node(mask) \
+({ \
+ int node; \
+ for_each_node_mask(node, (mask)) \
+ if (node_online(node)) \
+ break; \
+ node; \
+})
+
+#define node_set_online(node) set_bit((node), node_online_map.bits)
+#define node_set_offline(node) clear_bit((node), node_online_map.bits)
+
+#define for_each_node(node) for_each_node_mask((node), node_possible_map)
+#define for_each_online_node(node) for_each_node_mask((node), node_online_map)
+
+#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index e6e43ce82b55..39cca92a8d63 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -5,14 +5,14 @@
* pages. A pagevec is a multipage container which is used for that.
*/
-#define PAGEVEC_SIZE 16
+#define PAGEVEC_SIZE 15
struct page;
struct address_space;
struct pagevec {
- unsigned nr;
- int cold;
+ unsigned short nr;
+ unsigned short cold;
struct page *pages[PAGEVEC_SIZE];
};
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
new file mode 100644
index 000000000000..4e2d2a942ecb
--- /dev/null
+++ b/include/linux/pktcdvd.h
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices.
+ *
+ */
+#ifndef __PKTCDVD_H
+#define __PKTCDVD_H
+
+#include <linux/types.h>
+
+/*
+ * 1 for normal debug messages, 2 is very verbose. 0 to turn it off.
+ */
+#define PACKET_DEBUG 1
+
+#define MAX_WRITERS 8
+
+#define PKT_RB_POOL_SIZE 512
+
+/*
+ * How long we should hold a non-full packet before starting data gathering.
+ */
+#define PACKET_WAIT_TIME (HZ * 5 / 1000)
+
+/*
+ * use drive write caching -- we need deferred error handling to be
+ * able to sucessfully recover with this option (drive will return good
+ * status as soon as the cdb is validated).
+ */
+#if defined(CONFIG_CDROM_PKTCDVD_WCACHE)
+#define USE_WCACHING 1
+#else
+#define USE_WCACHING 0
+#endif
+
+/*
+ * No user-servicable parts beyond this point ->
+ */
+
+/*
+ * device types
+ */
+#define PACKET_CDR 1
+#define PACKET_CDRW 2
+#define PACKET_DVDR 3
+#define PACKET_DVDRW 4
+
+/*
+ * flags
+ */
+#define PACKET_WRITABLE 1 /* pd is writable */
+#define PACKET_NWA_VALID 2 /* next writable address valid */
+#define PACKET_LRA_VALID 3 /* last recorded address valid */
+#define PACKET_MERGE_SEGS 4 /* perform segment merging to keep */
+ /* underlying cdrom device happy */
+
+/*
+ * Disc status -- from READ_DISC_INFO
+ */
+#define PACKET_DISC_EMPTY 0
+#define PACKET_DISC_INCOMPLETE 1
+#define PACKET_DISC_COMPLETE 2
+#define PACKET_DISC_OTHER 3
+
+/*
+ * write type, and corresponding data block type
+ */
+#define PACKET_MODE1 1
+#define PACKET_MODE2 2
+#define PACKET_BLOCK_MODE1 8
+#define PACKET_BLOCK_MODE2 10
+
+/*
+ * Last session/border status
+ */
+#define PACKET_SESSION_EMPTY 0
+#define PACKET_SESSION_INCOMPLETE 1
+#define PACKET_SESSION_RESERVED 2
+#define PACKET_SESSION_COMPLETE 3
+
+#define PACKET_MCN "4a656e734178626f65323030300000"
+
+#undef PACKET_USE_LS
+
+#define PKT_CTRL_CMD_SETUP 0
+#define PKT_CTRL_CMD_TEARDOWN 1
+#define PKT_CTRL_CMD_STATUS 2
+
+struct pkt_ctrl_command {
+ __u32 command; /* in: Setup, teardown, status */
+ __u32 dev_index; /* in/out: Device index */
+ __u32 dev; /* in/out: Device nr for cdrw device */
+ __u32 pkt_dev; /* in/out: Device nr for packet device */
+ __u32 num_devices; /* out: Largest device index + 1 */
+ __u32 padding; /* Not used */
+};
+
+/*
+ * packet ioctls
+ */
+#define PACKET_IOCTL_MAGIC ('X')
+#define PACKET_CTRL_CMD _IOWR(PACKET_IOCTL_MAGIC, 1, struct pkt_ctrl_command)
+
+#ifdef __KERNEL__
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/cdrom.h>
+
+struct packet_settings
+{
+ __u8 size; /* packet size in (512 byte) sectors */
+ __u8 fp; /* fixed packets */
+ __u8 link_loss; /* the rest is specified
+ * as per Mt Fuji */
+ __u8 write_type;
+ __u8 track_mode;
+ __u8 block_mode;
+};
+
+/*
+ * Very crude stats for now
+ */
+struct packet_stats
+{
+ unsigned long pkt_started;
+ unsigned long pkt_ended;
+ unsigned long secs_w;
+ unsigned long secs_rg;
+ unsigned long secs_r;
+};
+
+struct packet_cdrw
+{
+ struct list_head pkt_free_list;
+ struct list_head pkt_active_list;
+ spinlock_t active_list_lock; /* Serialize access to pkt_active_list */
+ struct task_struct *thread;
+ atomic_t pending_bios;
+};
+
+/*
+ * Switch to high speed reading after reading this many kilobytes
+ * with no interspersed writes.
+ */
+#define HI_SPEED_SWITCH 512
+
+struct packet_iosched
+{
+ atomic_t attention; /* Set to non-zero when queue processing is needed */
+ int writing; /* Non-zero when writing, zero when reading */
+ spinlock_t lock; /* Protecting read/write queue manipulations */
+ struct bio *read_queue;
+ struct bio *read_queue_tail;
+ struct bio *write_queue;
+ struct bio *write_queue_tail;
+ int high_prio_read; /* An important read request has been queued */
+ int successive_reads;
+};
+
+/*
+ * 32 buffers of 2048 bytes
+ */
+#define PACKET_MAX_SIZE 32
+#define PAGES_PER_PACKET (PACKET_MAX_SIZE * CD_FRAMESIZE / PAGE_SIZE)
+#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
+
+enum packet_data_state {
+ PACKET_IDLE_STATE, /* Not used at the moment */
+ PACKET_WAITING_STATE, /* Waiting for more bios to arrive, so */
+ /* we don't have to do as much */
+ /* data gathering */
+ PACKET_READ_WAIT_STATE, /* Waiting for reads to fill in holes */
+ PACKET_WRITE_WAIT_STATE, /* Waiting for the write to complete */
+ PACKET_RECOVERY_STATE, /* Recover after read/write errors */
+ PACKET_FINISHED_STATE, /* After write has finished */
+
+ PACKET_NUM_STATES /* Number of possible states */
+};
+
+/*
+ * Information needed for writing a single packet
+ */
+struct pktcdvd_device;
+
+struct packet_data
+{
+ struct list_head list;
+
+ spinlock_t lock; /* Lock protecting state transitions and */
+ /* orig_bios list */
+
+ struct bio *orig_bios; /* Original bios passed to pkt_make_request */
+ struct bio *orig_bios_tail;/* that will be handled by this packet */
+ int write_size; /* Total size of all bios in the orig_bios */
+ /* list, measured in number of frames */
+
+ struct bio *w_bio; /* The bio we will send to the real CD */
+ /* device once we have all data for the */
+ /* packet we are going to write */
+ sector_t sector; /* First sector in this packet */
+ int frames; /* Number of frames in this packet */
+
+ enum packet_data_state state; /* Current state */
+ atomic_t run_sm; /* Incremented whenever the state */
+ /* machine needs to be run */
+ long sleep_time; /* Set this to non-zero to make the state */
+ /* machine run after this many jiffies. */
+
+ atomic_t io_wait; /* Number of pending IO operations */
+ atomic_t io_errors; /* Number of read/write errors during IO */
+
+ struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
+ struct page *pages[PAGES_PER_PACKET];
+
+ int cache_valid; /* If non-zero, the data for the zone defined */
+ /* by the sector variable is completely cached */
+ /* in the pages[] vector. */
+
+ int id; /* ID number for debugging */
+ struct pktcdvd_device *pd;
+};
+
+struct pkt_rb_node {
+ struct rb_node rb_node;
+ struct bio *bio;
+};
+
+struct packet_stacked_data
+{
+ struct bio *bio; /* Original read request bio */
+ struct pktcdvd_device *pd;
+};
+#define PSD_POOL_SIZE 64
+
+struct pktcdvd_device
+{
+ struct block_device *bdev; /* dev attached */
+ dev_t pkt_dev; /* our dev */
+ char name[20];
+ struct packet_settings settings;
+ struct packet_stats stats;
+ int refcnt; /* Open count */
+ int write_speed; /* current write speed, kB/s */
+ int read_speed; /* current read speed, kB/s */
+ unsigned long offset; /* start offset */
+ __u8 mode_offset; /* 0 / 8 */
+ __u8 type;
+ unsigned long flags;
+ __u16 mmc3_profile;
+ __u32 nwa; /* next writable address */
+ __u32 lra; /* last recorded address */
+ struct packet_cdrw cdrw;
+ wait_queue_head_t wqueue;
+
+ spinlock_t lock; /* Serialize access to bio_queue */
+ struct rb_root bio_queue; /* Work queue of bios we need to handle */
+ int bio_queue_size; /* Number of nodes in bio_queue */
+ sector_t current_sector; /* Keep track of where the elevator is */
+ atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
+ /* needs to be run. */
+ mempool_t *rb_pool; /* mempool for pkt_rb_node allocations */
+
+ struct packet_iosched iosched;
+ struct gendisk *disk;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* __PKTCDVD_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 7bfd2d43963e..6446e4f65e93 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -143,11 +143,6 @@ int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data);
*/
int pm_send_all(pm_request_t rqst, void *data);
-/*
- * Find a device
- */
-struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from);
-
static inline void pm_access(struct pm_dev *dev) {}
static inline void pm_dev_idle(struct pm_dev *dev) {}
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index aff9a6adb39e..fc74ef3fef36 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -79,7 +79,6 @@ extern struct posix_acl *posix_acl_from_mode(mode_t, int);
extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *);
extern int posix_acl_create_masq(struct posix_acl *, mode_t *);
extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
-extern int posix_acl_masq_nfs_mode(struct posix_acl *, mode_t *);
extern struct posix_acl *get_posix_acl(struct inode *, int);
extern int set_posix_acl(struct inode *, int, struct posix_acl *);
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 54333c98e532..edb036b43597 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -49,7 +49,6 @@
# define PR_TIMING_TIMESTAMP 1 /* Accurate timestamp based
process timing */
-
#define PR_SET_NAME 15 /* Set process name */
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 6eacc2c653f1..df62d2ac38a7 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -438,7 +438,7 @@ static inline void set_offset_v2_k_offset( struct offset_v2 *v2, loff_t offset )
/* Key of an item determines its location in the S+tree, and
is composed of 4 components */
-struct key {
+struct reiserfs_key {
__u32 k_dir_id; /* packing locality: by default parent
directory object id */
__u32 k_objectid; /* object identifier */
@@ -450,7 +450,7 @@ struct key {
struct cpu_key {
- struct key on_disk_key;
+ struct reiserfs_key on_disk_key;
int version;
int key_length; /* 3 in all cases but direct2indirect and
indirect2direct conversion */
@@ -470,7 +470,7 @@ struct cpu_key {
#define KEY_FOUND 1
#define KEY_NOT_FOUND 0
-#define KEY_SIZE (sizeof(struct key))
+#define KEY_SIZE (sizeof(struct reiserfs_key))
#define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32))
/* return values for search_by_key and clones */
@@ -503,7 +503,7 @@ struct item_head
{
/* Everything in the tree is found by searching for it based on
* its key.*/
- struct key ih_key;
+ struct reiserfs_key ih_key;
union {
/* The free space in the last unformatted node of an
indirect item if this is an indirect item. This
@@ -602,7 +602,7 @@ static inline __u32 type2uniqueness (int type)
// there is no way to get version of object from key, so, provide
// version to these defines
//
-static inline loff_t le_key_k_offset (int version, const struct key * key)
+static inline loff_t le_key_k_offset (int version, const struct reiserfs_key * key)
{
return (version == KEY_FORMAT_3_5) ?
le32_to_cpu( key->u.k_offset_v1.k_offset ) :
@@ -614,7 +614,7 @@ static inline loff_t le_ih_k_offset (const struct item_head * ih)
return le_key_k_offset (ih_version (ih), &(ih->ih_key));
}
-static inline loff_t le_key_k_type (int version, const struct key * key)
+static inline loff_t le_key_k_type (int version, const struct reiserfs_key * key)
{
return (version == KEY_FORMAT_3_5) ?
uniqueness2type( le32_to_cpu( key->u.k_offset_v1.k_uniqueness)) :
@@ -627,7 +627,7 @@ static inline loff_t le_ih_k_type (const struct item_head * ih)
}
-static inline void set_le_key_k_offset (int version, struct key * key, loff_t offset)
+static inline void set_le_key_k_offset (int version, struct reiserfs_key * key, loff_t offset)
{
(version == KEY_FORMAT_3_5) ?
(void)(key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
@@ -641,7 +641,7 @@ static inline void set_le_ih_k_offset (struct item_head * ih, loff_t offset)
}
-static inline void set_le_key_k_type (int version, struct key * key, int type)
+static inline void set_le_key_k_type (int version, struct reiserfs_key * key, int type)
{
(version == KEY_FORMAT_3_5) ?
(void)(key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
@@ -738,7 +738,7 @@ static inline void cpu_key_k_offset_dec (struct cpu_key * key)
/* object identifier for root dir */
#define REISERFS_ROOT_OBJECTID 2
#define REISERFS_ROOT_PARENT_OBJECTID 1
-extern struct key root_key;
+extern struct reiserfs_key root_key;
@@ -760,7 +760,7 @@ struct block_head {
__u16 blk_free_space; /* Block free space in bytes. */
__u16 blk_reserved;
/* dump this in v4/planA */
- struct key blk_right_delim_key; /* kept only for compatibility */
+ struct reiserfs_key blk_right_delim_key; /* kept only for compatibility */
};
#define BLKH_SIZE (sizeof(struct block_head))
@@ -1301,7 +1301,7 @@ struct path var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
#define UNFM_P_SHIFT 2
// in in-core inode key is stored on le form
-#define INODE_PKEY(inode) ((struct key *)(REISERFS_I(inode)->i_key))
+#define INODE_PKEY(inode) ((struct reiserfs_key *)(REISERFS_I(inode)->i_key))
#define MAX_UL_INT 0xffffffff
#define MAX_INT 0x7ffffff
@@ -1479,7 +1479,7 @@ struct tree_balance
int fs_gen; /* saved value of `reiserfs_generation' counter
see FILESYSTEM_CHANGED() macro in reiserfs_fs.h */
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
- struct key key; /* key pointer, to pass to block allocator or
+ struct reiserfs_key key; /* key pointer, to pass to block allocator or
another low-level subsystem */
#endif
} ;
@@ -1543,7 +1543,7 @@ struct buffer_info {
struct item_operations {
int (*bytes_number) (struct item_head * ih, int block_size);
void (*decrement_key) (struct cpu_key *);
- int (*is_left_mergeable) (struct key * ih, unsigned long bsize);
+ int (*is_left_mergeable) (struct reiserfs_key * ih, unsigned long bsize);
void (*print_item) (struct item_head *, char * item);
void (*check_item) (struct item_head *, char * item);
@@ -1594,7 +1594,7 @@ extern struct item_operations * item_ops [TYPE_ANY + 1];
#define B_N_PITEM_HEAD(bh,item_num) ( (struct item_head * )((bh)->b_data + BLKH_SIZE) + (item_num) )
/* get key */
-#define B_N_PDELIM_KEY(bh,item_num) ( (struct key * )((bh)->b_data + BLKH_SIZE) + (item_num) )
+#define B_N_PDELIM_KEY(bh,item_num) ( (struct reiserfs_key * )((bh)->b_data + BLKH_SIZE) + (item_num) )
/* get the key */
#define B_N_PKEY(bh,item_num) ( &(B_N_PITEM_HEAD(bh,item_num)->ih_key) )
@@ -1832,11 +1832,11 @@ extern void copy_item_head(struct item_head * p_v_to,
const struct item_head * p_v_from);
// first key is in cpu form, second - le
-extern int comp_keys (const struct key * le_key,
+extern int comp_keys (const struct reiserfs_key * le_key,
const struct cpu_key * cpu_key);
-extern int comp_short_keys (const struct key * le_key,
+extern int comp_short_keys (const struct reiserfs_key * le_key,
const struct cpu_key * cpu_key);
-extern void le_key2cpu_key (struct cpu_key * to, const struct key * from);
+extern void le_key2cpu_key (struct cpu_key * to, const struct reiserfs_key * from);
// both are cpu keys
extern int comp_cpu_keys (const struct cpu_key *, const struct cpu_key *);
@@ -1845,13 +1845,13 @@ extern int comp_short_cpu_keys (const struct cpu_key *,
extern void cpu_key2cpu_key (struct cpu_key *, const struct cpu_key *);
// both are in le form
-extern int comp_le_keys (const struct key *, const struct key *);
-extern int comp_short_le_keys (const struct key *, const struct key *);
+extern int comp_le_keys (const struct reiserfs_key *, const struct reiserfs_key *);
+extern int comp_short_le_keys (const struct reiserfs_key *, const struct reiserfs_key *);
//
// get key version from on disk key - kludge
//
-static inline int le_key_version (const struct key * key)
+static inline int le_key_version (const struct reiserfs_key * key)
{
int type;
@@ -1864,14 +1864,14 @@ static inline int le_key_version (const struct key * key)
}
-static inline void copy_key (struct key *to, const struct key *from)
+static inline void copy_key (struct reiserfs_key *to, const struct reiserfs_key *from)
{
memcpy (to, from, KEY_SIZE);
}
int comp_items (const struct item_head * stored_ih, const struct path * p_s_path);
-const struct key * get_rkey (const struct path * p_s_chk_path,
+const struct reiserfs_key * get_rkey (const struct path * p_s_chk_path,
const struct super_block * p_s_sb);
inline int bin_search (const void * p_v_key, const void * p_v_base,
int p_n_num, int p_n_width, int * p_n_pos);
@@ -1913,7 +1913,7 @@ int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
struct buffer_head * p_s_un_bh);
void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
- struct inode *inode, struct key * key);
+ struct inode *inode, struct reiserfs_key * key);
int reiserfs_delete_object (struct reiserfs_transaction_handle *th, struct inode * p_s_inode);
int reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
struct inode * p_s_inode, struct page *,
@@ -2131,7 +2131,7 @@ struct buffer_head * get_FEB (struct tree_balance *);
struct __reiserfs_blocknr_hint {
struct inode * inode; /* inode passed to allocator, if we allocate unf. nodes */
long block; /* file offset, in blocks */
- struct key key;
+ struct reiserfs_key key;
struct path * path; /* search path, used by allocator to deternine search_start by
* various ways */
struct reiserfs_transaction_handle * th; /* transaction handle is needed to log super blocks and
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dc3f297a726d..c8f981f108d4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -227,7 +227,7 @@ struct mm_struct {
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long rss, total_vm, locked_vm, shared_vm;
- unsigned long exec_vm, stack_vm, reserved_vm, def_flags;
+ unsigned long exec_vm, stack_vm, reserved_vm, def_flags, nr_ptes;
unsigned long saved_auxv[42]; /* for /proc/PID/auxv */
@@ -358,6 +358,11 @@ struct user_struct {
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
unsigned long locked_shm; /* How many pages of mlocked shm ? */
+#ifdef CONFIG_KEYS
+ struct key *uid_keyring; /* UID specific keyring */
+ struct key *session_keyring; /* UID's default session keyring */
+#endif
+
/* Hash table maintenance information */
struct list_head uidhash_list;
uid_t uid;
@@ -611,6 +616,11 @@ struct task_struct {
kernel_cap_t cap_effective, cap_inheritable, cap_permitted;
unsigned keep_capabilities:1;
struct user_struct *user;
+#ifdef CONFIG_KEYS
+ struct key *session_keyring; /* keyring inherited over fork */
+ struct key *process_keyring; /* keyring private to this process (CLONE_THREAD) */
+ struct key *thread_keyring; /* keyring private to this thread */
+#endif
unsigned short used_math;
char comm[16];
/* file system info */
@@ -644,7 +654,7 @@ struct task_struct {
/* Thread group tracking */
u32 parent_exec_id;
u32 self_exec_id;
-/* Protection of (de-)allocation: mm, files, fs, tty */
+/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
spinlock_t alloc_lock;
/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
spinlock_t proc_lock;
@@ -828,7 +838,6 @@ extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
extern int kill_pg_info(int, struct siginfo *, pid_t);
-extern int kill_sl_info(int, struct siginfo *, pid_t);
extern int kill_proc_info(int, struct siginfo *, pid_t);
extern void do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
@@ -977,8 +986,8 @@ static inline int thread_group_empty(task_t *p)
extern void unhash_process(struct task_struct *p);
/*
- * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm and
- * synchronises with wait4().
+ * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
+ * subscriptions and synchronises with wait4(). Also used in procfs.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
diff --git a/include/linux/security.h b/include/linux/security.h
index a1dee9a60587..ab0941c9fca7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -27,13 +27,14 @@
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/sem.h>
-#include <linux/sysctl.h>
#include <linux/shm.h>
#include <linux/msg.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
+struct ctl_table;
+
/*
* These functions are in security/capability.c and are used
* as the default capabilities functions
@@ -395,13 +396,13 @@ struct swap_info_struct;
* Return 0 if permission is granted.
* @inode_getsecurity:
* Copy the extended attribute representation of the security label
- * associated with @name for @dentry into @buffer. @buffer may be
+ * associated with @name for @inode into @buffer. @buffer may be
* NULL to request the size of the buffer required. @size indicates
* the size of @buffer in bytes. Note that @name is the remainder
* of the attribute name after the security. prefix has been removed.
* Return number of bytes used/required on success.
* @inode_setsecurity:
- * Set the security label associated with @name for @dentry from the
+ * Set the security label associated with @name for @inode from the
* extended attribute value @value. @size indicates the size of the
* @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
* Note that @name is the remainder of the attribute name after the
@@ -409,8 +410,9 @@ struct swap_info_struct;
* Return 0 on success.
* @inode_listsecurity:
* Copy the extended attribute names for the security labels
- * associated with @dentry into @buffer. @buffer may be NULL to
- * request the size of the buffer required.
+ * associated with @inode into @buffer. The maximum size of @buffer
+ * is specified by @buffer_size. @buffer may be NULL to request
+ * the size of the buffer required.
* Returns number of bytes used/required on success.
*
* Security hooks for file operations
@@ -1029,7 +1031,7 @@ struct security_operations {
kernel_cap_t * inheritable,
kernel_cap_t * permitted);
int (*acct) (struct file * file);
- int (*sysctl) (ctl_table * table, int op);
+ int (*sysctl) (struct ctl_table * table, int op);
int (*capable) (struct task_struct * tsk, int cap);
int (*quotactl) (int cmds, int type, int id, struct super_block * sb);
int (*quota_on) (struct file * f);
@@ -1108,9 +1110,9 @@ struct security_operations {
int (*inode_getxattr) (struct dentry *dentry, char *name);
int (*inode_listxattr) (struct dentry *dentry);
int (*inode_removexattr) (struct dentry *dentry, char *name);
- int (*inode_getsecurity)(struct dentry *dentry, const char *name, void *buffer, size_t size);
- int (*inode_setsecurity)(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
- int (*inode_listsecurity)(struct dentry *dentry, char *buffer);
+ int (*inode_getsecurity)(struct inode *inode, const char *name, void *buffer, size_t size);
+ int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags);
+ int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size);
int (*file_permission) (struct file * file, int mask);
int (*file_alloc_security) (struct file * file);
@@ -1268,7 +1270,7 @@ static inline int security_acct (struct file *file)
return security_ops->acct (file);
}
-static inline int security_sysctl(ctl_table * table, int op)
+static inline int security_sysctl(struct ctl_table *table, int op)
{
return security_ops->sysctl(table, op);
}
@@ -1575,19 +1577,19 @@ static inline int security_inode_removexattr (struct dentry *dentry, char *name)
return security_ops->inode_removexattr (dentry, name);
}
-static inline int security_inode_getsecurity(struct dentry *dentry, const char *name, void *buffer, size_t size)
+static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size)
{
- return security_ops->inode_getsecurity(dentry, name, buffer, size);
+ return security_ops->inode_getsecurity(inode, name, buffer, size);
}
-static inline int security_inode_setsecurity(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
- return security_ops->inode_setsecurity(dentry, name, value, size, flags);
+ return security_ops->inode_setsecurity(inode, name, value, size, flags);
}
-static inline int security_inode_listsecurity(struct dentry *dentry, char *buffer)
+static inline int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
- return security_ops->inode_listsecurity(dentry, buffer);
+ return security_ops->inode_listsecurity(inode, buffer, buffer_size);
}
static inline int security_file_permission (struct file *file, int mask)
@@ -1940,7 +1942,7 @@ static inline int security_acct (struct file *file)
return 0;
}
-static inline int security_sysctl(ctl_table * table, int op)
+static inline int security_sysctl(struct ctl_table *table, int op)
{
return 0;
}
@@ -2214,17 +2216,17 @@ static inline int security_inode_removexattr (struct dentry *dentry, char *name)
return cap_inode_removexattr(dentry, name);
}
-static inline int security_inode_getsecurity(struct dentry *dentry, const char *name, void *buffer, size_t size)
+static inline int security_inode_getsecurity(struct inode *inode, const char *name, void *buffer, size_t size)
{
return -EOPNOTSUPP;
}
-static inline int security_inode_setsecurity(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
return -EOPNOTSUPP;
}
-static inline int security_inode_listsecurity(struct dentry *dentry, char *buffer)
+static inline int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
return 0;
}
diff --git a/include/linux/smb_mount.h b/include/linux/smb_mount.h
index 256900c55881..d10f00cb5703 100644
--- a/include/linux/smb_mount.h
+++ b/include/linux/smb_mount.h
@@ -38,7 +38,10 @@ struct smb_mount_data {
#define SMB_MOUNT_DIRATTR 0x0004 /* Use find_first for getattr */
#define SMB_MOUNT_CASE 0x0008 /* Be case sensitive */
#define SMB_MOUNT_UNICODE 0x0010 /* Server talks unicode */
-
+#define SMB_MOUNT_UID 0x0020 /* Use user specified uid */
+#define SMB_MOUNT_GID 0x0040 /* Use user specified gid */
+#define SMB_MOUNT_FMODE 0x0080 /* Use user specified file mode */
+#define SMB_MOUNT_DMODE 0x0100 /* Use user specified dir mode */
struct smb_mount_data_kernel {
int version;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2a8c7faf2dcc..bc93606badbc 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -61,6 +61,7 @@ struct mq_attr;
#include <asm/siginfo.h>
#include <asm/signal.h>
#include <linux/quota.h>
+#include <linux/key.h>
asmlinkage long sys_time(int __user *tloc);
asmlinkage long sys_stime(time_t __user *tptr);
@@ -492,4 +493,18 @@ asmlinkage long sys_uselib(const char __user *library);
asmlinkage long sys_setaltroot(const char __user *altroot);
asmlinkage long sys_ni_syscall(void);
+asmlinkage long sys_add_key(const char __user *_type,
+ const char __user *_description,
+ const void __user *_payload,
+ size_t plen,
+ key_serial_t destringid);
+
+asmlinkage long sys_request_key(const char __user *_type,
+ const char __user *_description,
+ const char __user *_callout_info,
+ key_serial_t destringid);
+
+asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+
#endif
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 047e28bd7d8e..4243c55cce87 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -30,6 +30,6 @@
/*
* A maximum of 4 million PIDs should be enough for a while:
*/
-#define PID_MAX_LIMIT (4*1024*1024)
+#define PID_MAX_LIMIT (sizeof(long) > 4 ? 4*1024*1024 : PID_MAX_DEFAULT)
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index 2111941c1af7..ae174b8ab036 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -1,9 +1,12 @@
#ifndef _LINUX_TIME_H
#define _LINUX_TIME_H
-#include <asm/param.h>
#include <linux/types.h>
+#ifdef __KERNEL__
+#include <linux/seqlock.h>
+#endif
+
#ifndef _STRUCT_TIMESPEC
#define _STRUCT_TIMESPEC
struct timespec {
@@ -24,39 +27,6 @@ struct timezone {
#ifdef __KERNEL__
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <linux/timex.h>
-#include <asm/div64.h>
-#ifndef div_long_long_rem
-
-#define div_long_long_rem(dividend,divisor,remainder) ({ \
- u64 result = dividend; \
- *remainder = do_div(result,divisor); \
- result; })
-
-#endif
-
-/*
- * Have the 32 bit jiffies value wrap 5 minutes after boot
- * so jiffies wrap bugs show up earlier.
- */
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
-
-/*
- * Change timeval to jiffies, trying to avoid the
- * most obvious overflows..
- *
- * And some not so obvious.
- *
- * Note that we don't want to return MAX_LONG, because
- * for various timeout reasons we often end up having
- * to wait "jiffies+1" in order to guarantee that we wait
- * at _least_ "jiffies" - so "jiffies+1" had better still
- * be positive.
- */
-#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
-
/* Parameters used to convert the timespec values */
#ifndef USEC_PER_SEC
#define USEC_PER_SEC (1000000L)
@@ -70,232 +40,6 @@ struct timezone {
#define NSEC_PER_USEC (1000L)
#endif
-/*
- * We want to do realistic conversions of time so we need to use the same
- * values the update wall clock code uses as the jiffies size. This value
- * is: TICK_NSEC (which is defined in timex.h). This
- * is a constant and is in nanoseconds. We will used scaled math
- * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
- * NSEC_JIFFIE_SC. Note that these defines contain nothing but
- * constants and so are computed at compile time. SHIFT_HZ (computed in
- * timex.h) adjusts the scaling for different HZ values.
-
- * Scaled math??? What is that?
- *
- * Scaled math is a way to do integer math on values that would,
- * otherwise, either overflow, underflow, or cause undesired div
- * instructions to appear in the execution path. In short, we "scale"
- * up the operands so they take more bits (more precision, less
- * underflow), do the desired operation and then "scale" the result back
- * by the same amount. If we do the scaling by shifting we avoid the
- * costly mpy and the dastardly div instructions.
-
- * Suppose, for example, we want to convert from seconds to jiffies
- * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The
- * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
- * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
- * might calculate at compile time, however, the result will only have
- * about 3-4 bits of precision (less for smaller values of HZ).
- *
- * So, we scale as follows:
- * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
- * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
- * Then we make SCALE a power of two so:
- * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
- * Now we define:
- * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
- * jiff = (sec * SEC_CONV) >> SCALE;
- *
- * Often the math we use will expand beyond 32-bits so we tell C how to
- * do this and pass the 64-bit result of the mpy through the ">> SCALE"
- * which should take the result back to 32-bits. We want this expansion
- * to capture as much precision as possible. At the same time we don't
- * want to overflow so we pick the SCALE to avoid this. In this file,
- * that means using a different scale for each range of HZ values (as
- * defined in timex.h).
- *
- * For those who want to know, gcc will give a 64-bit result from a "*"
- * operator if the result is a long long AND at least one of the
- * operands is cast to long long (usually just prior to the "*" so as
- * not to confuse it into thinking it really has a 64-bit operand,
- * which, buy the way, it can do, but it take more code and at least 2
- * mpys).
-
- * We also need to be aware that one second in nanoseconds is only a
- * couple of bits away from overflowing a 32-bit word, so we MUST use
- * 64-bits to get the full range time in nanoseconds.
-
- */
-
-/*
- * Here are the scales we will use. One for seconds, nanoseconds and
- * microseconds.
- *
- * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
- * check if the sign bit is set. If not, we bump the shift count by 1.
- * (Gets an extra bit of precision where we can use it.)
- * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
- * Haven't tested others.
-
- * Limits of cpp (for #if expressions) only long (no long long), but
- * then we only need the most signicant bit.
- */
-
-#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
-#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
-#undef SEC_JIFFIE_SC
-#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
-#endif
-#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
-#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-
-#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION \
- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion. See there
- * for more details. It is the scaled resolution rounding value. Note
- * that it is a 64-bit value. Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
-/*
- * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
- * into seconds. The 64-bit case will overflow if we are not careful,
- * so use the messy SH_DIV macro to do it. Still all constants.
- */
-#if BITS_PER_LONG < 64
-# define MAX_SEC_IN_JIFFIES \
- (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
-#else /* take care of overflow on 64 bits machines */
-# define MAX_SEC_IN_JIFFIES \
- (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
-
-#endif
-
-/*
- * Convert jiffies to milliseconds and back.
- *
- * Avoid unnecessary multiplications/divisions in the
- * two most common HZ cases:
- */
-static inline unsigned int jiffies_to_msecs(const unsigned long j)
-{
-#if HZ <= 1000 && !(1000 % HZ)
- return (1000 / HZ) * j;
-#elif HZ > 1000 && !(HZ % 1000)
- return (j + (HZ / 1000) - 1)/(HZ / 1000);
-#else
- return (j * 1000) / HZ;
-#endif
-}
-
-static inline unsigned int jiffies_to_usecs(const unsigned long j)
-{
-#if HZ <= 1000 && !(1000 % HZ)
- return (1000000 / HZ) * j;
-#elif HZ > 1000 && !(HZ % 1000)
- return (j*1000 + (HZ - 1000))/(HZ / 1000);
-#else
- return (j * 1000000) / HZ;
-#endif
-}
-
-static inline unsigned long msecs_to_jiffies(const unsigned int m)
-{
- if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= 1000 && !(1000 % HZ)
- return (m + (1000 / HZ) - 1) / (1000 / HZ);
-#elif HZ > 1000 && !(HZ % 1000)
- return m * (HZ / 1000);
-#else
- return (m * HZ + 999) / 1000;
-#endif
-}
-
-/*
- * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
- * that a remainder subtract here would not do the right thing as the
- * resolution values don't fall on second boundries. I.e. the line:
- * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
- *
- * Rather, we just shift the bits off the right.
- *
- * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
- * value to a scaled second value.
- */
-static __inline__ unsigned long
-timespec_to_jiffies(const struct timespec *value)
-{
- unsigned long sec = value->tv_sec;
- long nsec = value->tv_nsec + TICK_NSEC - 1;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- nsec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)nsec * NSEC_CONVERSION) >>
- (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-
-}
-
-static __inline__ void
-jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
-}
-
-/* Same for "timeval"
- *
- * Well, almost. The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
- */
-static __inline__ unsigned long
-timeval_to_jiffies(const struct timeval *value)
-{
- unsigned long sec = value->tv_sec;
- long usec = value->tv_usec;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- usec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-}
-
-static __inline__ void
-jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
- value->tv_usec /= NSEC_PER_USEC;
-}
-
static __inline__ int timespec_equal(struct timespec *a, struct timespec *b)
{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
@@ -347,11 +91,6 @@ struct timespec current_kernel_time(void);
#define CURRENT_TIME (current_kernel_time())
-#endif /* __KERNEL__ */
-
-#define NFDBITS __NFDBITS
-
-#ifdef __KERNEL__
extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday(struct timespec *tv);
extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
@@ -378,7 +117,10 @@ set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
-#endif
+
+#endif /* __KERNEL__ */
+
+#define NFDBITS __NFDBITS
#define FD_SETSIZE __FD_SETSIZE
#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp)
diff --git a/include/linux/times.h b/include/linux/times.h
index 0c5aa078dad4..e2d3020742a6 100644
--- a/include/linux/times.h
+++ b/include/linux/times.h
@@ -1,79 +1,7 @@
#ifndef _LINUX_TIMES_H
#define _LINUX_TIMES_H
-#ifdef __KERNEL__
-#include <linux/timex.h>
-#include <asm/div64.h>
-#include <asm/types.h>
-#include <asm/param.h>
-
-static inline clock_t jiffies_to_clock_t(long x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
- return x / (HZ / USER_HZ);
-#else
- u64 tmp = (u64)x * TICK_NSEC;
- do_div(tmp, (NSEC_PER_SEC / USER_HZ));
- return (long)tmp;
-#endif
-}
-
-static inline unsigned long clock_t_to_jiffies(unsigned long x)
-{
-#if (HZ % USER_HZ)==0
- if (x >= ~0UL / (HZ / USER_HZ))
- return ~0UL;
- return x * (HZ / USER_HZ);
-#else
- u64 jif;
-
- /* Don't worry about loss of precision here .. */
- if (x >= ~0UL / HZ * USER_HZ)
- return ~0UL;
-
- /* .. but do try to contain it here */
- jif = x * (u64) HZ;
- do_div(jif, USER_HZ);
- return jif;
-#endif
-}
-
-static inline u64 jiffies_64_to_clock_t(u64 x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
- do_div(x, HZ / USER_HZ);
-#else
- /*
- * There are better ways that don't overflow early,
- * but even this doesn't overflow in hundreds of years
- * in 64 bits, so..
- */
- x *= TICK_NSEC;
- do_div(x, (NSEC_PER_SEC / USER_HZ));
-#endif
- return x;
-}
-#endif
-
-static inline u64 nsec_to_clock_t(u64 x)
-{
-#if (NSEC_PER_SEC % USER_HZ) == 0
- do_div(x, (NSEC_PER_SEC / USER_HZ));
-#elif (USER_HZ % 512) == 0
- x *= USER_HZ/512;
- do_div(x, (NSEC_PER_SEC / 512));
-#else
- /*
- * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
- * overflow after 64.99 years.
- * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
- */
- x *= 9;
- do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
- / USER_HZ));
-#endif
- return x;
-}
+#include <linux/types.h>
struct tms {
clock_t tms_utime;
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 31ef4595b1fd..438645f2fedc 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -55,33 +55,10 @@
#include <linux/config.h>
#include <linux/compiler.h>
+#include <linux/time.h>
#include <asm/param.h>
-
-/*
- * The following defines establish the engineering parameters of the PLL
- * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
- * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
- * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
- * nearest power of two in order to avoid hardware multiply operations.
- */
-#if HZ >= 12 && HZ < 24
-# define SHIFT_HZ 4
-#elif HZ >= 24 && HZ < 48
-# define SHIFT_HZ 5
-#elif HZ >= 48 && HZ < 96
-# define SHIFT_HZ 6
-#elif HZ >= 96 && HZ < 192
-# define SHIFT_HZ 7
-#elif HZ >= 192 && HZ < 384
-# define SHIFT_HZ 8
-#elif HZ >= 384 && HZ < 768
-# define SHIFT_HZ 9
-#elif HZ >= 768 && HZ < 1536
-# define SHIFT_HZ 10
-#else
-# error You lose.
-#endif
+#include <asm/timex.h>
/*
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
@@ -152,41 +129,6 @@
#define MAXGLITCH 30 /* pps signal glitch max (s) */
/*
- * Pick up the architecture specific timex specifications
- */
-#include <asm/timex.h>
-
-/* LATCH is used in the interval timer and ftape setup. */
-#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-
-/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
- * improve accuracy by shifting LSH bits, hence calculating:
- * (NOM << LSH) / DEN
- * This however means trouble for large NOM, because (NOM << LSH) may no
- * longer fit in 32 bits. The following way of calculating this gives us
- * some slack, under the following conditions:
- * - (NOM / DEN) fits in (32 - LSH) bits.
- * - (NOM % DEN) fits in (32 - LSH) bits.
- */
-#define SH_DIV(NOM,DEN,LSH) ( ((NOM / DEN) << LSH) \
- + (((NOM % DEN) << LSH) + DEN / 2) / DEN)
-
-/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
-#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
-
-/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
-#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
-
-/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
-#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-
-/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
-/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
-#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
-
-
-#include <linux/time.h>
-/*
* syscall interface - used (mainly by NTP daemon)
* to discipline kernel clock oscillator
*/
diff --git a/include/linux/types.h b/include/linux/types.h
index 13ccdf3036fd..893c4b367bae 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -140,6 +140,13 @@ typedef unsigned long sector_t;
#define pgoff_t unsigned long
#endif
+#endif /* __KERNEL_STRICT_NAMES */
+
+/*
+ * Below are truly Linux-specific types that should never collide with
+ * any application/library that wants linux/types.h.
+ */
+
#ifdef __CHECKER__
#define __bitwise __attribute__((bitwise))
#else
@@ -153,13 +160,6 @@ typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
-#endif /* __KERNEL_STRICT_NAMES */
-
-/*
- * Below are truly Linux-specific types that should never collide with
- * any application/library that wants linux/types.h.
- */
-
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 21cd4df67b24..8b3a2b86d92a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -24,6 +24,7 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <asm/system.h>
+#include <asm/current.h>
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
@@ -37,6 +38,16 @@ struct __wait_queue {
struct list_head task_list;
};
+struct wait_bit_key {
+ void *flags;
+ int bit_nr;
+};
+
+struct wait_bit_queue {
+ struct wait_bit_key key;
+ wait_queue_t wait;
+};
+
struct __wait_queue_head {
spinlock_t lock;
struct list_head task_list;
@@ -63,6 +74,9 @@ typedef struct __wait_queue_head wait_queue_head_t;
#define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
+#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
+ { .flags = word, .bit_nr = bit, }
+
static inline void init_waitqueue_head(wait_queue_head_t *q)
{
q->lock = SPIN_LOCK_UNLOCKED;
@@ -125,11 +139,17 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
+int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
+int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
+void FASTCALL(wake_up_bit(void *, int));
+int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
+int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
+wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
-#define wake_up_all_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0)
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
@@ -300,6 +320,7 @@ void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
wait_queue_t *wait, int state));
void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
#define DEFINE_WAIT(name) \
wait_queue_t name = { \
@@ -310,12 +331,69 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
}, \
}
+#define DEFINE_WAIT_BIT(name, word, bit) \
+ struct wait_bit_queue name = { \
+ .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
+ .wait = { \
+ .task = current, \
+ .func = wake_bit_function, \
+ .task_list = \
+ LIST_HEAD_INIT(name.wait.task_list), \
+ }, \
+ }
+
#define init_wait(wait) \
do { \
wait->task = current; \
wait->func = autoremove_wake_function; \
INIT_LIST_HEAD(&wait->task_list); \
} while (0)
+
+/**
+ * wait_on_bit - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit.
+ * For instance, if one were to have waiters on a bitflag, one would
+ * call wait_on_bit() in threads waiting for the bit to clear.
+ * One uses wait_on_bit() where one is waiting for the bit to clear,
+ * but has no intention of setting it.
+ */
+static inline int wait_on_bit(void *word, int bit,
+ int (*action)(void *), unsigned mode)
+{
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit(word, bit, action, mode);
+}
+
+/**
+ * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit
+ * when one intends to set it, for instance, trying to lock bitflags.
+ * For instance, if one were to have waiters trying to set bitflag
+ * and waiting for it to clear before setting it, one would call
+ * wait_on_bit() in threads waiting to be able to set the bit.
+ * One uses wait_on_bit_lock() where one is waiting for the bit to
+ * clear with the intention of setting it, and when done, clearing it.
+ */
+static inline int wait_on_bit_lock(void *word, int bit,
+ int (*action)(void *), unsigned mode)
+{
+ if (!test_and_set_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_lock(word, bit, action, mode);
+}
#endif /* __KERNEL__ */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 7c165c334be5..1c9994fe2acc 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -68,7 +68,7 @@ struct writeback_control {
*/
void writeback_inodes(struct writeback_control *wbc);
void wake_up_inode(struct inode *inode);
-void __wait_on_inode(struct inode * inode);
+int inode_wait(void *);
void sync_inodes_sb(struct super_block *, int wait);
void sync_inodes(int wait);
@@ -76,8 +76,8 @@ void sync_inodes(int wait);
static inline void wait_on_inode(struct inode *inode)
{
might_sleep();
- if (inode->i_state & I_LOCK)
- __wait_on_inode(inode);
+ wait_on_bit(&inode->i_state, __I_LOCK, inode_wait,
+ TASK_UNINTERRUPTIBLE);
}
/*
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d9c5d5c83d49..23f9c61d9546 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -5,6 +5,7 @@
Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
+ Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*/
#ifndef _LINUX_XATTR_H
#define _LINUX_XATTR_H
@@ -14,4 +15,19 @@
#define XATTR_SECURITY_PREFIX "security."
+struct xattr_handler {
+ char *prefix;
+ size_t (*list)(struct inode *inode, char *list, size_t list_size,
+ const char *name, size_t name_len);
+ int (*get)(struct inode *inode, const char *name, void *buffer,
+ size_t size);
+ int (*set)(struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags);
+};
+
+ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
+ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
+int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
+int generic_removexattr(struct dentry *dentry, const char *name);
+
#endif /* _LINUX_XATTR_H */