summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-05 00:18:49 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-05 00:18:49 -0800
commit1ea864f1c53bc771294e61cf9be43b1d22e78f4c (patch)
treede928ec3d1e22abf7b0963e59092152daa5a2ecb /include/linux
parent2f886464aa00cd9eb9cf46c8c155a24a752bb317 (diff)
v2.5.2.6 -> v2.5.3
- Doug Ledford: i810 audio driver update - Evgeniy Polyakov: update various SCSI drivers to new locking - David Howells: syscall latency improvement, try 2 - Francois Romieu: dscc4 driver update - Patrick Mochel: driver model fixes - Andrew Morton: clean up a few details in ext3 inode initialization - Pete Wyckoff: make x86 machine check print out right address.. - Hans Reiser: reiserfs update - Richard Gooch: devfs update - Greg KH: USB updates - Dave Jones: PNPBIOS - Nathan Scott: extended attributes - Corey Minyard: clean up zlib duplication (triplication..)
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ext3_fs_i.h1
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/limits.h3
-rw-r--r--include/linux/pnpbios.h211
-rw-r--r--include/linux/reiserfs_fs.h696
-rw-r--r--include/linux/reiserfs_fs_i.h83
-rw-r--r--include/linux/reiserfs_fs_sb.h239
-rw-r--r--include/linux/sched.h22
-rw-r--r--include/linux/usb.h13
-rw-r--r--include/linux/xattr.h15
-rw-r--r--include/linux/zlib.h654
-rw-r--r--include/linux/zlib_fs.h707
13 files changed, 1351 insertions, 1308 deletions
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 104aea4e0c19..6faf8f1b6e75 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -28,7 +28,6 @@ struct ext3_inode_info {
__u32 i_faddr;
__u8 i_frag_no;
__u8 i_frag_size;
- __u16 unused; /* formerly i_osync */
#endif
__u32 i_file_acl;
__u32 i_dir_acl;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 711a1674e74c..cdfaac0cdc0e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -813,6 +813,10 @@ struct inode_operations {
int (*revalidate) (struct dentry *);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct dentry *, struct iattr *);
+ int (*setxattr) (struct dentry *, char *, void *, size_t, int);
+ int (*getxattr) (struct dentry *, char *, void *, size_t);
+ int (*listxattr) (struct dentry *, char *, size_t);
+ int (*removexattr) (struct dentry *, char *);
};
struct seq_file;
@@ -1428,6 +1432,7 @@ extern int block_read_full_page(struct page*, get_block_t*);
extern int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
extern int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
unsigned long *);
+extern int generic_cont_expand(struct inode *inode, loff_t size) ;
extern int block_commit_write(struct page *page, unsigned from, unsigned to);
extern int block_sync_page(struct page *);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7fa1243b73ff..4da3384179c4 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -35,6 +35,14 @@
siglock: SPIN_LOCK_UNLOCKED \
}
+#define INIT_TASK_WORK \
+{ \
+ need_resched: 0, \
+ syscall_trace: 0, \
+ sigpending: 0, \
+ notify_resume: 0, \
+}
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -43,7 +51,7 @@
{ \
state: 0, \
flags: 0, \
- sigpending: 0, \
+ work: INIT_TASK_WORK, \
addr_limit: KERNEL_DS, \
exec_domain: &default_exec_domain, \
lock_depth: -1, \
diff --git a/include/linux/limits.h b/include/linux/limits.h
index 45faa81d981d..cc13552cd15b 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -13,6 +13,9 @@
#define NAME_MAX 255 /* # chars in a file name */
#define PATH_MAX 4096 /* # chars in a path name including nul */
#define PIPE_BUF 4096 /* # bytes in atomic write to a pipe */
+#define XATTR_NAME_MAX 255 /* # chars in an extended attribute name */
+#define XATTR_SIZE_MAX 65536 /* size of an extended attribute value (64k) */
+#define XATTR_LIST_MAX 65536 /* size of extended attribute namelist (64k) */
#define RTSIG_MAX 32
diff --git a/include/linux/pnpbios.h b/include/linux/pnpbios.h
new file mode 100644
index 000000000000..ce68c4bb7ab4
--- /dev/null
+++ b/include/linux/pnpbios.h
@@ -0,0 +1,211 @@
+/*
+ * Include file for the interface to a PnP BIOS
+ *
+ * Original BIOS code (C) 1998 Christian Schmidt (chr.schmidt@tu-bs.de)
+ * PnP handler parts (c) 1998 Tom Lees <tom@lpsg.demon.co.uk>
+ * Minor reorganizations by David Hinds <dhinds@zen.stanford.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_PNPBIOS_H
+#define _LINUX_PNPBIOS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/*
+ * Status codes (warnings and errors)
+ */
+#define PNP_SUCCESS 0x00
+#define PNP_NOT_SET_STATICALLY 0x7f
+#define PNP_UNKNOWN_FUNCTION 0x81
+#define PNP_FUNCTION_NOT_SUPPORTED 0x82
+#define PNP_INVALID_HANDLE 0x83
+#define PNP_BAD_PARAMETER 0x84
+#define PNP_SET_FAILED 0x85
+#define PNP_EVENTS_NOT_PENDING 0x86
+#define PNP_SYSTEM_NOT_DOCKED 0x87
+#define PNP_NO_ISA_PNP_CARDS 0x88
+#define PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES 0x89
+#define PNP_CONFIG_CHANGE_FAILED_NO_BATTERY 0x8a
+#define PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT 0x8b
+#define PNP_BUFFER_TOO_SMALL 0x8c
+#define PNP_USE_ESCD_SUPPORT 0x8d
+#define PNP_MESSAGE_NOT_SUPPORTED 0x8e
+#define PNP_HARDWARE_ERROR 0x8f
+
+#define ESCD_SUCCESS 0x00
+#define ESCD_IO_ERROR_READING 0x55
+#define ESCD_INVALID 0x56
+#define ESCD_BUFFER_TOO_SMALL 0x59
+#define ESCD_NVRAM_TOO_SMALL 0x5a
+#define ESCD_FUNCTION_NOT_SUPPORTED 0x81
+
+/*
+ * Events that can be received by "get event"
+ */
+#define PNPEV_ABOUT_TO_CHANGE_CONFIG 0x0001
+#define PNPEV_DOCK_CHANGED 0x0002
+#define PNPEV_SYSTEM_DEVICE_CHANGED 0x0003
+#define PNPEV_CONFIG_CHANGED_FAILED 0x0004
+#define PNPEV_UNKNOWN_SYSTEM_EVENT 0xffff
+/* 0x8000 through 0xfffe are OEM defined */
+
+/*
+ * Messages that should be sent through "send message"
+ */
+#define PNPMSG_OK 0x00
+#define PNPMSG_ABORT 0x01
+#define PNPMSG_UNDOCK_DEFAULT_ACTION 0x40
+#define PNPMSG_POWER_OFF 0x41
+#define PNPMSG_PNP_OS_ACTIVE 0x42
+#define PNPMSG_PNP_OS_INACTIVE 0x43
+/* 0x8000 through 0xffff are OEM defined */
+
+#pragma pack(1)
+struct pnp_dev_node_info {
+ __u16 no_nodes;
+ __u16 max_node_size;
+};
+struct pnp_docking_station_info {
+ __u32 location_id;
+ __u32 serial;
+ __u16 capabilities;
+};
+struct pnp_isa_config_struc {
+ __u8 revision;
+ __u8 no_csns;
+ __u16 isa_rd_data_port;
+ __u16 reserved;
+};
+struct escd_info_struc {
+ __u16 min_escd_write_size;
+ __u16 escd_size;
+ __u32 nv_storage_base;
+};
+struct pnp_bios_node {
+ __u16 size;
+ __u8 handle;
+ __u32 eisa_id;
+ __u8 type_code[3];
+ __u16 flags;
+ __u8 data[0];
+};
+#pragma pack()
+
+struct pnpbios_device_id
+{
+ char id[8];
+ unsigned long driver_data;
+};
+
+struct pnpbios_driver {
+ struct list_head node;
+ char *name;
+ const struct pnpbios_device_id *id_table; /* NULL if wants all devices */
+ int (*probe) (struct pci_dev *dev, const struct pnpbios_device_id *id); /* New device inserted */
+ void (*remove) (struct pci_dev *dev); /* Device removed, either due to hotplug remove or module remove */
+};
+
+#ifdef CONFIG_PNPBIOS
+
+/* exported */
+extern int pnpbios_register_driver(struct pnpbios_driver *drv);
+extern void pnpbios_unregister_driver(struct pnpbios_driver *drv);
+
+/* non-exported */
+#define pnpbios_for_each_dev(dev) \
+ for(dev = pnpbios_dev_g(pnpbios_devices.next); dev != pnpbios_dev_g(&pnpbios_devices); dev = pnpbios_dev_g(dev->global_list.next))
+
+
+#define pnpbios_dev_g(n) list_entry(n, struct pci_dev, global_list)
+
+static __inline struct pnpbios_driver *pnpbios_dev_driver(const struct pci_dev *dev)
+{
+ return (struct pnpbios_driver *)dev->driver;
+}
+
+extern int pnpbios_dont_use_current_config;
+extern void *pnpbios_kmalloc(size_t size, int f);
+extern void pnpbios_init (void);
+extern void pnpbios_proc_init (void);
+
+extern int pnp_bios_dev_node_info (struct pnp_dev_node_info *data);
+extern int pnp_bios_get_dev_node (u8 *nodenum, char config, struct pnp_bios_node *data);
+extern int pnp_bios_set_dev_node (u8 nodenum, char config, struct pnp_bios_node *data);
+#if needed
+extern int pnp_bios_get_event (u16 *message);
+extern int pnp_bios_send_message (u16 message);
+extern int pnp_bios_set_stat_res (char *info);
+extern int pnp_bios_get_stat_res (char *info);
+extern int pnp_bios_apm_id_table (char *table, u16 *size);
+extern int pnp_bios_isapnp_config (struct pnp_isa_config_struc *data);
+extern int pnp_bios_escd_info (struct escd_info_struc *data);
+extern int pnp_bios_read_escd (char *data, u32 nvram_base);
+extern int pnp_bios_write_escd (char *data, u32 nvram_base);
+#endif
+
+/*
+ * a helper function which helps ensure correct pnpbios_driver
+ * setup and cleanup for commonly-encountered hotplug/modular cases
+ *
+ * This MUST stay in a header, as it checks for -DMODULE
+ */
+
+static inline int pnpbios_module_init(struct pnpbios_driver *drv)
+{
+ int rc = pnpbios_register_driver (drv);
+
+ if (rc > 0)
+ return 0;
+
+ /* iff CONFIG_HOTPLUG and built into kernel, we should
+ * leave the driver around for future hotplug events.
+ * For the module case, a hotplug daemon of some sort
+ * should load a module in response to an insert event. */
+#if defined(CONFIG_HOTPLUG) && !defined(MODULE)
+ if (rc == 0)
+ return 0;
+#else
+ if (rc == 0)
+ rc = -ENODEV;
+#endif
+
+ /* if we get here, we need to clean up pci driver instance
+ * and return some sort of error */
+ pnpbios_unregister_driver (drv);
+
+ return rc;
+}
+
+#else /* CONFIG_PNPBIOS */
+
+static __inline__ int pnpbios_register_driver(struct pnpbios_driver *drv)
+{
+ return 0;
+}
+
+static __inline__ void pnpbios_unregister_driver(struct pnpbios_driver *drv)
+{
+ return;
+}
+
+#endif /* CONFIG_PNPBIOS */
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_PNPBIOS_H */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index f9f74b6d7212..9c0015031c03 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -53,48 +53,17 @@
*/
- /* Vladimir, what is the story with
- new_get_new_buffer nowadays? I
- want a complete explanation written
- here. */
-
-/* NEW_GET_NEW_BUFFER will try to allocate new blocks better */
-/*#define NEW_GET_NEW_BUFFER*/
-#define OLD_GET_NEW_BUFFER
-
- /* Vladimir, what about this one too? */
-/* if this is undefined, all inode changes get into stat data immediately, if it can be found in RAM */
-#define DIRTY_LATER
-
-/* enable journalling */
-#define ENABLE_JOURNAL
-
#define USE_INODE_GENERATION_COUNTER
-
-#ifdef __KERNEL__
-
-/* #define REISERFS_CHECK */
-
#define REISERFS_PREALLOCATE
-#endif
#define PREALLOCATION_SIZE 8
-/* if this is undefined, all inode changes get into stat data
- immediately, if it can be found in RAM */
-#define DIRTY_LATER
-
-
-/*#define READ_LOCK_REISERFS*/
-
-
/* n must be power of 2 */
#define _ROUND_UP(x,n) (((x)+(n)-1u) & ~((n)-1u))
// to be ok for alpha and others we have to align structures to 8 byte
// boundary.
// FIXME: do not change 4 by anything else: there is code which relies on that
- /* what 4? -Hans */
#define ROUND_UP(x) _ROUND_UP(x,8LL)
/* debug levels. Right now, CONFIG_REISERFS_CHECK means print all debug
@@ -130,6 +99,135 @@ if( !( cond ) ) \
* Structure of super block on disk, a version of which in RAM is often accessed as s->u.reiserfs_sb.s_rs
* the version in RAM is part of a larger structure containing fields never written to disk.
*/
+#define UNSET_HASH 0 // read_super will guess about, what hash names
+ // in directories were sorted with
+#define TEA_HASH 1
+#define YURA_HASH 2
+#define R5_HASH 3
+#define DEFAULT_HASH R5_HASH
+
+
+struct journal_params {
+ __u32 jp_journal_1st_block; /* where does journal start from on its
+ * device */
+ __u32 jp_journal_dev; /* journal device st_rdev */
+ __u32 jp_journal_size; /* size of the journal */
+ __u32 jp_journal_trans_max; /* max number of blocks in a transaction. */
+ __u32 jp_journal_magic; /* random value made on fs creation (this
+ * was sb_journal_block_count) */
+ __u32 jp_journal_max_batch; /* max number of blocks to batch into a
+ * trans */
+ __u32 jp_journal_max_commit_age; /* in seconds, how old can an async
+ * commit be */
+ __u32 jp_journal_max_trans_age; /* in seconds, how old can a transaction
+ * be */
+};
+
+/* this is the super from 3.5.X, where X >= 10 */
+struct reiserfs_super_block_v1
+{
+ __u32 s_block_count; /* blocks count */
+ __u32 s_free_blocks; /* free blocks count */
+ __u32 s_root_block; /* root block number */
+ struct journal_params s_journal;
+ __u16 s_blocksize; /* block size */
+ __u16 s_oid_maxsize; /* max size of object id array, see
+ * get_objectid() commentary */
+ __u16 s_oid_cursize; /* current size of object id array */
+ __u16 s_umount_state; /* this is set to 1 when filesystem was
+ * umounted, to 2 - when not */
+ char s_magic[10]; /* reiserfs magic string indicates that
+ * file system is reiserfs:
+ * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs" */
+ __u16 s_fs_state; /* it is set to used by fsck to mark which
+ * phase of rebuilding is done */
+ __u32 s_hash_function_code; /* indicate, what hash function is being use
+ * to sort names in a directory*/
+ __u16 s_tree_height; /* height of disk tree */
+ __u16 s_bmap_nr; /* amount of bitmap blocks needed to address
+ * each block of file system */
+ __u16 s_version; /* this field is only reliable on filesystem
+ * with non-standard journal */
+ __u16 s_reserved_for_journal; /* size in blocks of journal area on main
+ * device, we need to keep after
+ * making fs with non-standard journal */
+} __attribute__ ((__packed__));
+
+#define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1))
+
+/* this is the on disk super block */
+struct reiserfs_super_block
+{
+ struct reiserfs_super_block_v1 s_v1;
+ __u32 s_inode_generation;
+ __u32 s_flags; /* Right now used only by inode-attributes, if enabled */
+ unsigned char s_uuid[16]; /* filesystem unique identifier */
+ unsigned char s_label[16]; /* filesystem volume label */
+ char s_unused[88] ; /* zero filled by mkreiserfs and
+ * reiserfs_convert_objectid_map_v1()
+ * so any additions must be updated
+ * there as well. */
+} __attribute__ ((__packed__));
+
+#define SB_SIZE (sizeof(struct reiserfs_super_block))
+
+#define REISERFS_VERSION_1 0
+#define REISERFS_VERSION_2 2
+
+
+// on-disk super block fields converted to cpu form
+#define SB_DISK_SUPER_BLOCK(s) ((s)->u.reiserfs_sb.s_rs)
+#define SB_V1_DISK_SUPER_BLOCK(s) (&(SB_DISK_SUPER_BLOCK(s)->s_v1))
+#define SB_BLOCKSIZE(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_blocksize))
+#define SB_BLOCK_COUNT(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_block_count))
+#define SB_FREE_BLOCKS(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks))
+#define SB_REISERFS_MAGIC(s) \
+ (SB_V1_DISK_SUPER_BLOCK(s)->s_magic)
+#define SB_ROOT_BLOCK(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_root_block))
+#define SB_TREE_HEIGHT(s) \
+ le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height))
+#define SB_REISERFS_STATE(s) \
+ le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state))
+#define SB_VERSION(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_version))
+#define SB_BMAP_NR(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr))
+
+#define PUT_SB_BLOCK_COUNT(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_block_count = cpu_to_le32(val); } while (0)
+#define PUT_SB_FREE_BLOCKS(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks = cpu_to_le32(val); } while (0)
+#define PUT_SB_ROOT_BLOCK(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_root_block = cpu_to_le32(val); } while (0)
+#define PUT_SB_TREE_HEIGHT(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height = cpu_to_le16(val); } while (0)
+#define PUT_SB_REISERFS_STATE(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state = cpu_to_le16(val); } while (0)
+#define PUT_SB_VERSION(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_version = cpu_to_le16(val); } while (0)
+#define PUT_SB_BMAP_NR(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr = cpu_to_le16 (val); } while (0)
+
+
+#define SB_ONDISK_JP(s) (&SB_V1_DISK_SUPER_BLOCK(s)->s_journal)
+#define SB_ONDISK_JOURNAL_SIZE(s) \
+ le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_size))
+#define SB_ONDISK_JOURNAL_1st_BLOCK(s) \
+ le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_1st_block))
+#define SB_ONDISK_JOURNAL_DEVICE(s) \
+ le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_dev))
+#define SB_ONDISK_RESERVED_FOR_JOURNAL(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_reserved_for_journal))
+
+#define is_block_in_log_or_reserved_area(s, block) \
+ block >= SB_JOURNAL_1st_RESERVED_BLOCK(s) \
+ && block < SB_JOURNAL_1st_RESERVED_BLOCK(s) + \
+ ((!is_reiserfs_jr(SB_DISK_SUPER_BLOCK(s)) ? \
+ SB_ONDISK_JOURNAL_SIZE(s) + 1 : SB_ONDISK_RESERVED_FOR_JOURNAL(s)))
+
+
/* used by gcc */
#define REISERFS_SUPER_MAGIC 0x52654973
@@ -137,34 +235,28 @@ if( !( cond ) ) \
look at the superblock, etc. */
#define REISERFS_SUPER_MAGIC_STRING "ReIsErFs"
#define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs"
+#define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
-extern char reiserfs_super_magic_string[];
-extern char reiser2fs_super_magic_string[];
+extern const char reiserfs_3_5_magic_string[];
+extern const char reiserfs_3_6_magic_string[];
+extern const char reiserfs_jr_magic_string[];
-static inline int is_reiserfs_magic_string (const struct reiserfs_super_block * rs)
-{
- return (!strncmp (rs->s_magic, reiserfs_super_magic_string,
- strlen ( reiserfs_super_magic_string)) ||
- !strncmp (rs->s_magic, reiser2fs_super_magic_string,
- strlen ( reiser2fs_super_magic_string)));
-}
+int is_reiserfs_3_5 (struct reiserfs_super_block * rs);
+int is_reiserfs_3_6 (struct reiserfs_super_block * rs);
+int is_reiserfs_jr (struct reiserfs_super_block * rs);
- /* ReiserFS leaves the first 64k unused,
- so that partition labels have enough
- space. If someone wants to write a
- fancy bootloader that needs more than
- 64k, let us know, and this will be
- increased in size. This number must
- be larger than than the largest block
- size on any platform, or code will
- break. -Hans */
+/* ReiserFS leaves the first 64k unused, so that partition labels have
+ enough space. If someone wants to write a fancy bootloader that
+ needs more than 64k, let us know, and this will be increased in size.
+ This number must be larger than than the largest block size on any
+ platform, or code will break. -Hans */
#define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024)
#define REISERFS_FIRST_BLOCK unused_define
+#define REISERFS_JOURNAL_OFFSET_IN_BYTES REISERFS_DISK_OFFSET_IN_BYTES
/* the spot for the super in versions 3.5 - 3.5.10 (inclusive) */
#define REISERFS_OLD_DISK_OFFSET_IN_BYTES (8 * 1024)
-
// reiserfs internal error code (used by search_by_key adn fix_nodes))
#define CARRY_ON 0
#define REPEAT_SEARCH -1
@@ -173,57 +265,58 @@ static inline int is_reiserfs_magic_string (const struct reiserfs_super_block *
#define NO_BALANCING_NEEDED (-4)
#define NO_MORE_UNUSED_CONTIGUOUS_BLOCKS (-5)
-//#define SCHEDULE_OCCURRED 1
-//#define PATH_INCORRECT 2
-
-//#define NO_DISK_SPACE (-1)
-
-
-
typedef unsigned long b_blocknr_t;
typedef __u32 unp_t;
- /* who is responsible for this
- completely uncommented struct? */
struct unfm_nodeinfo {
- /* This is what? */
unp_t unfm_nodenum;
- /* now this I know what it is, and
- most of the people on our project
- know what it is, but I bet nobody
- new I hire will have a clue. */
unsigned short unfm_freespace;
};
+/* there are two formats of keys: 3.5 and 3.6
+ */
+#define KEY_FORMAT_3_5 0
+#define KEY_FORMAT_3_6 1
+
+/* there are two stat datas */
+#define STAT_DATA_V1 0
+#define STAT_DATA_V2 1
-/* when reiserfs_file_write is called with a byte count >= MIN_PACK_ON_CLOSE,
-** it sets the inode to pack on close, and when extending the file, will only
-** use unformatted nodes.
-**
-** This is a big speed up for the journal, which is badly hurt by direct->indirect
-** conversions (they must be logged).
-*/
-#define MIN_PACK_ON_CLOSE 512
static inline struct reiserfs_inode_info *REISERFS_I(struct inode *inode)
{
return list_entry(inode, struct reiserfs_inode_info, vfs_inode);
}
-// this says about version of all items (but stat data) the object
-// consists of
-#define inode_items_version(inode) (REISERFS_I(inode)->i_version)
-
-
- /* This is an aggressive tail suppression policy, I am hoping it
- improves our benchmarks. The principle behind it is that
- percentage space saving is what matters, not absolute space
- saving. This is non-intuitive, but it helps to understand it if
- you consider that the cost to access 4 blocks is not much more
- than the cost to access 1 block, if you have to do a seek and
- rotate. A tail risks a non-linear disk access that is
- significant as a percentage of total time cost for a 4 block file
- and saves an amount of space that is less significant as a
- percentage of space, or so goes the hypothesis. -Hans */
+/** this says about version of key of all items (but stat data) the
+ object consists of */
+#define get_inode_item_key_version( inode ) \
+ ((REISERFS_I(inode)->i_flags & i_item_key_version_mask) ? KEY_FORMAT_3_6 : KEY_FORMAT_3_5)
+
+#define set_inode_item_key_version( inode, version ) \
+ ({ if((version)==KEY_FORMAT_3_6) \
+ REISERFS_I(inode)->i_flags |= i_item_key_version_mask; \
+ else \
+ REISERFS_I(inode)->i_flags &= ~i_item_key_version_mask; })
+
+#define get_inode_sd_version(inode) \
+ ((REISERFS_I(inode)->i_flags & i_stat_data_version_mask) ? STAT_DATA_V2 : STAT_DATA_V1)
+
+#define set_inode_sd_version(inode, version) \
+ ({ if((version)==STAT_DATA_V2) \
+ REISERFS_I(inode)->i_flags |= i_stat_data_version_mask; \
+ else \
+ REISERFS_I(inode)->i_flags &= ~i_stat_data_version_mask; })
+
+/* This is an aggressive tail suppression policy, I am hoping it
+ improves our benchmarks. The principle behind it is that percentage
+ space saving is what matters, not absolute space saving. This is
+ non-intuitive, but it helps to understand it if you consider that the
+ cost to access 4 blocks is not much more than the cost to access 1
+ block, if you have to do a seek and rotate. A tail risks a
+ non-linear disk access that is significant as a percentage of total
+ time cost for a 4 block file and saves an amount of space that is
+ less significant as a percentage of space, or so goes the hypothesis.
+ -Hans */
#define STORE_TAIL_IN_UNFM(n_file_size,n_tail_size,n_block_size) \
(\
(!(n_tail_size)) || \
@@ -239,7 +332,7 @@ static inline struct reiserfs_inode_info *REISERFS_I(struct inode *inode)
/*
- * values for s_state field
+ * values for s_umount_state field
*/
#define REISERFS_VALID_FS 1
#define REISERFS_ERROR_FS 2
@@ -251,21 +344,6 @@ static inline struct reiserfs_inode_info *REISERFS_I(struct inode *inode)
/***************************************************************************/
//
-// we do support for old format of reiserfs: the problem is to
-// distinuquish keys with 32 bit offset and keys with 60 bit ones. On
-// leaf level we use ih_version of struct item_head (was
-// ih_reserved). For all old items it is set to 0
-// (ITEM_VERSION_1). For new items it is ITEM_VERSION_2. On internal
-// levels we have to know version of item key belongs to.
-//
-#define ITEM_VERSION_1 0
-#define ITEM_VERSION_2 1
-
-
-/* loff_t - long long */
-
-
-//
// directories use this key as well as old files
//
struct offset_v1 {
@@ -291,9 +369,9 @@ typedef union {
__u64 linear;
} __attribute__ ((__packed__)) offset_v2_esafe_overlay;
-static inline __u16 offset_v2_k_type( struct offset_v2 *v2 )
+static inline __u16 offset_v2_k_type( const struct offset_v2 *v2 )
{
- offset_v2_esafe_overlay tmp = *(offset_v2_esafe_overlay *)v2;
+ offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2;
tmp.linear = le64_to_cpu( tmp.linear );
return tmp.offset_v2.k_type;
}
@@ -306,9 +384,9 @@ static inline void set_offset_v2_k_type( struct offset_v2 *v2, int type )
tmp->linear = le64_to_cpu(tmp->linear);
}
-static inline loff_t offset_v2_k_offset( struct offset_v2 *v2 )
+static inline loff_t offset_v2_k_offset( const struct offset_v2 *v2 )
{
- offset_v2_esafe_overlay tmp = *(offset_v2_esafe_overlay *)v2;
+ offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2;
tmp.linear = le64_to_cpu( tmp.linear );
return tmp.offset_v2.k_offset;
}
@@ -346,18 +424,11 @@ struct cpu_key {
indirect2direct conversion */
};
-
-
-
-
-
-
- /* Our function for comparing keys can compare keys of different
- lengths. It takes as a parameter the length of the keys it is to
- compare. These defines are used in determining what is to be
- passed to it as that parameter. */
+/* Our function for comparing keys can compare keys of different
+ lengths. It takes as a parameter the length of the keys it is to
+ compare. These defines are used in determining what is to be passed
+ to it as that parameter. */
#define REISERFS_FULL_KEY_LEN 4
-
#define REISERFS_SHORT_KEY_LEN 2
/* The result of the key compare */
@@ -367,7 +438,6 @@ struct cpu_key {
#define KEY_FOUND 1
#define KEY_NOT_FOUND 0
-
#define KEY_SIZE (sizeof(struct key))
#define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32))
@@ -392,8 +462,6 @@ struct cpu_key {
#define GOTO_PREVIOUS_ITEM 2
#define NAME_FOUND_INVISIBLE 3
-
-
/* Everything in the filesystem is stored as a set of items. The
item head contains the key of the item, its free space (for
indirect items) and specifies the location of the item itself
@@ -401,37 +469,28 @@ struct cpu_key {
struct item_head
{
- struct key ih_key; /* Everything in the tree is found by searching for it based on its key.*/
-
- /* This is bloat, this should be part
- of the item not the item
- header. -Hans */
- union {
- __u16 ih_free_space_reserved; /* The free space in the last unformatted node of an indirect item if this
- is an indirect item. This equals 0xFFFF iff this is a direct item or
- stat data item. Note that the key, not this field, is used to determine
- the item type, and thus which field this union contains. */
- __u16 ih_entry_count; /* Iff this is a directory item, this field equals the number of directory
- entries in the directory item. */
- } __attribute__ ((__packed__)) u;
- __u16 ih_item_len; /* total size of the item body */
- __u16 ih_item_location; /* an offset to the item body within the block */
- /* I thought we were going to use this
- for having lots of item types? Why
- don't you use this for item type
- not item version. That is how you
- talked me into this field a year
- ago, remember? I am still not
- convinced it needs to be 16 bits
- (for at least many years), but at
- least I can sympathize with that
- hope. Change the name from version
- to type, and tell people not to use
- FFFF in case 16 bits is someday too
- small and needs to be extended:-). */
- __u16 ih_version; /* 0 for all old items, 2 for new
- ones. Highest bit is set by fsck
- temporary, cleaned after all done */
+ /* Everything in the tree is found by searching for it based on
+ * its key.*/
+ struct key ih_key;
+ union {
+ /* The free space in the last unformatted node of an
+ indirect item if this is an indirect item. This
+ equals 0xFFFF iff this is a direct item or stat data
+ item. Note that the key, not this field, is used to
+ determine the item type, and thus which field this
+ union contains. */
+ __u16 ih_free_space_reserved;
+ /* Iff this is a directory item, this field equals the
+ number of directory entries in the directory item. */
+ __u16 ih_entry_count;
+ } __attribute__ ((__packed__)) u;
+ __u16 ih_item_len; /* total size of the item body */
+ __u16 ih_item_location; /* an offset to the item body
+ * within the block */
+ __u16 ih_version; /* 0 for all old items, 2 for new
+ ones. Highest bit is set by fsck
+ temporary, cleaned after all
+ done */
} __attribute__ ((__packed__));
/* size of item header */
#define IH_SIZE (sizeof(struct item_head))
@@ -451,8 +510,8 @@ struct item_head
#define unreachable_item(ih) (ih_version(ih) & (1 << 15))
-#define get_ih_free_space(ih) (ih_version (ih) == ITEM_VERSION_2 ? 0 : ih_free_space (ih))
-#define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == ITEM_VERSION_2) ? 0 : (val)))
+#define get_ih_free_space(ih) (ih_version (ih) == KEY_FORMAT_3_6 ? 0 : ih_free_space (ih))
+#define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == KEY_FORMAT_3_6) ? 0 : (val)))
/* these operate on indirect items, where you've got an array of ints
** at a possibly unaligned location. These are a noop on ia32
@@ -481,6 +540,9 @@ struct item_head
#define V1_DIRENTRY_UNIQUENESS 500
#define V1_ANY_UNIQUENESS 555 // FIXME: comment is required
+extern void reiserfs_warning (const char * fmt, ...);
+/* __attribute__( ( format ( printf, 1, 2 ) ) ); */
+
//
// here are conversion routines
//
@@ -492,14 +554,11 @@ static inline int uniqueness2type (__u32 uniqueness)
case V1_INDIRECT_UNIQUENESS: return TYPE_INDIRECT;
case V1_DIRECT_UNIQUENESS: return TYPE_DIRECT;
case V1_DIRENTRY_UNIQUENESS: return TYPE_DIRENTRY;
+ default:
+ reiserfs_warning( "vs-500: unknown uniqueness %d\n", uniqueness);
+ case V1_ANY_UNIQUENESS:
+ return TYPE_ANY;
}
-/*
- if (uniqueness != V1_ANY_UNIQUENESS) {
- printk ("uniqueness %d\n", uniqueness);
- BUG ();
- }
-*/
- return TYPE_ANY;
}
static inline __u32 type2uniqueness (int type) CONSTF;
@@ -510,15 +569,13 @@ static inline __u32 type2uniqueness (int type)
case TYPE_INDIRECT: return V1_INDIRECT_UNIQUENESS;
case TYPE_DIRECT: return V1_DIRECT_UNIQUENESS;
case TYPE_DIRENTRY: return V1_DIRENTRY_UNIQUENESS;
+ default:
+ reiserfs_warning( "vs-501: unknown type %d\n", type);
+ case TYPE_ANY:
+ return V1_ANY_UNIQUENESS;
}
- /*
- if (type != TYPE_ANY)
- BUG ();
- */
- return V1_ANY_UNIQUENESS;
}
-
//
// key is pointer to on disk key which is stored in le, result is cpu,
// there is no way to get version of object from key, so, provide
@@ -526,7 +583,7 @@ static inline __u32 type2uniqueness (int type)
//
static inline loff_t le_key_k_offset (int version, const struct key * key)
{
- return (version == ITEM_VERSION_1) ?
+ return (version == KEY_FORMAT_3_5) ?
le32_to_cpu( key->u.k_offset_v1.k_offset ) :
offset_v2_k_offset( &(key->u.k_offset_v2) );
}
@@ -538,7 +595,7 @@ static inline loff_t le_ih_k_offset (const struct item_head * ih)
static inline loff_t le_key_k_type (int version, const struct key * key)
{
- return (version == ITEM_VERSION_1) ?
+ return (version == KEY_FORMAT_3_5) ?
uniqueness2type( le32_to_cpu( key->u.k_offset_v1.k_uniqueness)) :
offset_v2_k_type( &(key->u.k_offset_v2) );
}
@@ -551,20 +608,21 @@ static inline loff_t le_ih_k_type (const struct item_head * ih)
static inline void set_le_key_k_offset (int version, struct key * key, loff_t offset)
{
- (version == ITEM_VERSION_1) ?
+ (version == KEY_FORMAT_3_5) ?
(key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
(set_offset_v2_k_offset( &(key->u.k_offset_v2), offset ));
}
+
+
static inline void set_le_ih_k_offset (struct item_head * ih, loff_t offset)
{
set_le_key_k_offset (ih_version (ih), &(ih->ih_key), offset);
}
-
static inline void set_le_key_k_type (int version, struct key * key, int type)
{
- (version == ITEM_VERSION_1) ?
+ (version == KEY_FORMAT_3_5) ?
(key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
(set_offset_v2_k_type( &(key->u.k_offset_v2), type ));
}
@@ -594,21 +652,21 @@ static inline void set_le_ih_k_type (struct item_head * ih, int type)
//
static inline loff_t cpu_key_k_offset (const struct cpu_key * key)
{
- return (key->version == ITEM_VERSION_1) ?
+ return (key->version == KEY_FORMAT_3_5) ?
key->on_disk_key.u.k_offset_v1.k_offset :
key->on_disk_key.u.k_offset_v2.k_offset;
}
static inline loff_t cpu_key_k_type (const struct cpu_key * key)
{
- return (key->version == ITEM_VERSION_1) ?
+ return (key->version == KEY_FORMAT_3_5) ?
uniqueness2type (key->on_disk_key.u.k_offset_v1.k_uniqueness) :
key->on_disk_key.u.k_offset_v2.k_type;
}
static inline void set_cpu_key_k_offset (struct cpu_key * key, loff_t offset)
{
- (key->version == ITEM_VERSION_1) ?
+ (key->version == KEY_FORMAT_3_5) ?
(key->on_disk_key.u.k_offset_v1.k_offset = offset) :
(key->on_disk_key.u.k_offset_v2.k_offset = offset);
}
@@ -616,14 +674,15 @@ static inline void set_cpu_key_k_offset (struct cpu_key * key, loff_t offset)
static inline void set_cpu_key_k_type (struct cpu_key * key, int type)
{
- (key->version == ITEM_VERSION_1) ?
+ (key->version == KEY_FORMAT_3_5) ?
(key->on_disk_key.u.k_offset_v1.k_uniqueness = type2uniqueness (type)):
(key->on_disk_key.u.k_offset_v2.k_type = type);
}
+
static inline void cpu_key_k_offset_dec (struct cpu_key * key)
{
- if (key->version == ITEM_VERSION_1)
+ if (key->version == KEY_FORMAT_3_5)
key->on_disk_key.u.k_offset_v1.k_offset --;
else
key->on_disk_key.u.k_offset_v2.k_offset --;
@@ -766,7 +825,7 @@ struct stat_data_v1
} __attribute__ ((__packed__));
#define SD_V1_SIZE (sizeof(struct stat_data_v1))
-#define stat_data_v1(ih) (ih_version (ih) == ITEM_VERSION_1)
+#define stat_data_v1(ih) (ih_version (ih) == KEY_FORMAT_3_5)
#define sd_v1_mode(sdp) (le16_to_cpu((sdp)->sd_mode))
#define set_sd_v1_mode(sdp,v) ((sdp)->sd_mode = cpu_to_le16(v))
#define sd_v1_nlink(sdp) (le16_to_cpu((sdp)->sd_nlink))
@@ -820,11 +879,11 @@ struct stat_data {
} __attribute__ ((__packed__)) u;
} __attribute__ ((__packed__));
//
-// this is 40 bytes long
+// this is 44 bytes long
//
#define SD_SIZE (sizeof(struct stat_data))
#define SD_V2_SIZE SD_SIZE
-#define stat_data_v2(ih) (ih_version (ih) == ITEM_VERSION_2)
+#define stat_data_v2(ih) (ih_version (ih) == KEY_FORMAT_3_6)
#define sd_v2_mode(sdp) (le16_to_cpu((sdp)->sd_mode))
#define set_sd_v2_mode(sdp,v) ((sdp)->sd_mode = cpu_to_le16(v))
/* sd_reserved */
@@ -954,76 +1013,10 @@ struct reiserfs_de_head
#define de_visible(deh) test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
#define de_hidden(deh) !test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
-/* compose directory item containing "." and ".." entries (entries are
- not aligned to 4 byte boundary) */
-/* the last four params are LE */
-static inline void make_empty_dir_item_v1 (char * body,
- __u32 dirid, __u32 objid,
- __u32 par_dirid, __u32 par_objid)
-{
- struct reiserfs_de_head * deh;
-
- memset (body, 0, EMPTY_DIR_SIZE_V1);
- deh = (struct reiserfs_de_head *)body;
-
- /* direntry header of "." */
- put_deh_offset( &(deh[0]), DOT_OFFSET );
- /* these two are from make_le_item_head, and are are LE */
- deh[0].deh_dir_id = dirid;
- deh[0].deh_objectid = objid;
- deh[0].deh_state = 0; /* Endian safe if 0 */
- put_deh_location( &(deh[0]), EMPTY_DIR_SIZE_V1 - strlen( "." ));
- mark_de_visible(&(deh[0]));
-
- /* direntry header of ".." */
- put_deh_offset( &(deh[1]), DOT_DOT_OFFSET);
- /* key of ".." for the root directory */
- /* these two are from the inode, and are are LE */
- deh[1].deh_dir_id = par_dirid;
- deh[1].deh_objectid = par_objid;
- deh[1].deh_state = 0; /* Endian safe if 0 */
- put_deh_location( &(deh[1]), deh_location( &(deh[0]) ) - strlen( ".." ) );
- mark_de_visible(&(deh[1]));
-
- /* copy ".." and "." */
- memcpy (body + deh_location( &(deh[0]) ), ".", 1);
- memcpy (body + deh_location( &(deh[1]) ), "..", 2);
-}
-
-/* compose directory item containing "." and ".." entries */
-static inline void make_empty_dir_item (char * body,
- __u32 dirid, __u32 objid,
- __u32 par_dirid, __u32 par_objid)
-{
- struct reiserfs_de_head * deh;
-
- memset (body, 0, EMPTY_DIR_SIZE);
- deh = (struct reiserfs_de_head *)body;
-
- /* direntry header of "." */
- put_deh_offset( &(deh[0]), DOT_OFFSET );
- /* these two are from make_le_item_head, and are are LE */
- deh[0].deh_dir_id = dirid;
- deh[0].deh_objectid = objid;
- deh[0].deh_state = 0; /* Endian safe if 0 */
- put_deh_location( &(deh[0]), EMPTY_DIR_SIZE - ROUND_UP( strlen( "." ) ) );
- mark_de_visible(&(deh[0]));
-
- /* direntry header of ".." */
- put_deh_offset( &(deh[1]), DOT_DOT_OFFSET );
- /* key of ".." for the root directory */
- /* these two are from the inode, and are are LE */
- deh[1].deh_dir_id = par_dirid;
- deh[1].deh_objectid = par_objid;
- deh[1].deh_state = 0; /* Endian safe if 0 */
- put_deh_location( &(deh[1]), deh_location( &(deh[0])) - ROUND_UP( strlen( ".." ) ) );
- mark_de_visible(&(deh[1]));
-
- /* copy ".." and "." */
- memcpy (body + deh_location( &(deh[0]) ), ".", 1);
- memcpy (body + deh_location( &(deh[1]) ), "..", 2);
-}
-
+extern void make_empty_dir_item_v1 (char * body, __u32 dirid, __u32 objid,
+ __u32 par_dirid, __u32 par_objid);
+extern void make_empty_dir_item (char * body, __u32 dirid, __u32 objid,
+ __u32 par_dirid, __u32 par_objid);
/* array of the entry headers */
/* get item body */
@@ -1064,13 +1057,9 @@ static inline int entry_length (const struct buffer_head * bh,
// two entries per block (at least)
//#define REISERFS_MAX_NAME_LEN(block_size)
//((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE * 2) / 2)
-
-// two entries per block (at least)
#define REISERFS_MAX_NAME_LEN(block_size) 255
-
-
/* this structure is used for operations on directory entries. It is
not a disk structure. */
/* When reiserfs_find_entry or search_by_entry_key find directory
@@ -1263,23 +1252,17 @@ struct path var = {ILLEGAL_PATH_ELEMENT_OFFSET, }
// in in-core inode key is stored on le form
#define INODE_PKEY(inode) ((struct key *)(REISERFS_I(inode)->i_key))
-//#define mark_tail_converted(inode) (atomic_set(&(REISERFS_I(inode)->i_converted),1))
-//#define unmark_tail_converted(inode) (REISERFS_I(inode)->i_converted), 0))
-//#define is_tail_converted(inode) (REISERFS_I(inode)->i_converted)))
-
-
#define MAX_UL_INT 0xffffffff
#define MAX_INT 0x7ffffff
#define MAX_US_INT 0xffff
-///#define TOO_LONG_LENGTH (~0ULL)
-
// reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset
#define U32_MAX (~(__u32)0)
+
static inline loff_t max_reiserfs_offset (struct inode * inode)
{
- if (inode_items_version (inode) == ITEM_VERSION_1)
+ if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5)
return (loff_t)U32_MAX;
return (loff_t)((~(__u64)0) >> 4);
@@ -1312,13 +1295,6 @@ static inline loff_t max_reiserfs_offset (struct inode * inode)
/* FIXATE NODES */
/***************************************************************************/
-//#define VI_TYPE_STAT_DATA 1
-//#define VI_TYPE_DIRECT 2
-//#define VI_TYPE_INDIRECT 4
-//#define VI_TYPE_DIRECTORY 8
-//#define VI_TYPE_FIRST_DIRECTORY_ITEM 16
-//#define VI_TYPE_INSERTED_DIRECTORY_ITEM 32
-
#define VI_TYPE_LEFT_MERGEABLE 1
#define VI_TYPE_RIGHT_MERGEABLE 2
@@ -1541,11 +1517,7 @@ extern struct item_operations * item_ops [4];
#define COMP_KEYS comp_keys
#define COMP_SHORT_KEYS comp_short_keys
-#define keys_of_same_object comp_short_keys
-
-/*#define COMP_KEYS(p_s_key1, p_s_key2) comp_keys((unsigned long *)(p_s_key1), (unsigned long *)(p_s_key2))
-#define COMP_SHORT_KEYS(p_s_key1, p_s_key2) comp_short_keys((unsigned long *)(p_s_key1), (unsigned long *)(p_s_key2))*/
-
+/*#define keys_of_same_object comp_short_keys*/
/* number of blocks pointed to by the indirect item */
#define I_UNFM_NUM(p_s_ih) ( ih_item_len(p_s_ih) / UNFM_P_SIZE )
@@ -1626,6 +1598,7 @@ struct reiserfs_journal_header {
__u32 j_last_flush_trans_id ; /* id of last fully flushed transaction */
__u32 j_first_unflushed_offset ; /* offset in the log of where to start replay after a crash */
__u32 j_mount_id ;
+ /* 12 */ struct journal_params jh_journal;
} ;
extern task_queue reiserfs_commit_thread_tq ;
@@ -1633,7 +1606,10 @@ extern wait_queue_head_t reiserfs_commit_thread_wait ;
/* biggest tunable defines are right here */
#define JOURNAL_BLOCK_COUNT 8192 /* number of blocks in the journal */
-#define JOURNAL_MAX_BATCH 900 /* max blocks to batch into one transaction, don't make this any bigger than 900 */
+#define JOURNAL_TRANS_MAX_DEFAULT 1024 /* biggest possible single transaction, don't change for now (8/3/99) */
+#define JOURNAL_TRANS_MIN_DEFAULT 256
+#define JOURNAL_MAX_BATCH_DEFAULT 900 /* max blocks to batch into one transaction, don't make this any bigger than 900 */
+#define JOURNAL_MIN_RATIO 2
#define JOURNAL_MAX_COMMIT_AGE 30
#define JOURNAL_MAX_TRANS_AGE 30
#define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9)
@@ -1671,7 +1647,7 @@ void reiserfs_allow_writes(struct super_block *s) ;
void reiserfs_check_lock_depth(char *caller) ;
void reiserfs_prepare_for_journal(struct super_block *, struct buffer_head *bh, int wait) ;
void reiserfs_restore_prepared_buffer(struct super_block *, struct buffer_head *bh) ;
-int journal_init(struct super_block *) ;
+int journal_init(struct super_block *, const char * j_dev_name, int old_format) ;
int journal_release(struct reiserfs_transaction_handle*, struct super_block *) ;
int journal_release_error(struct reiserfs_transaction_handle*, struct super_block *) ;
int journal_end(struct reiserfs_transaction_handle *, struct super_block *, unsigned long) ;
@@ -1680,17 +1656,12 @@ int journal_mark_dirty_nolog(struct reiserfs_transaction_handle *, struct super_
int journal_mark_freed(struct reiserfs_transaction_handle *, struct super_block *, unsigned long blocknr) ;
int push_journal_writer(char *w) ;
int pop_journal_writer(int windex) ;
-int journal_lock_dobalance(struct super_block *p_s_sb) ;
-int journal_unlock_dobalance(struct super_block *p_s_sb) ;
int journal_transaction_should_end(struct reiserfs_transaction_handle *, int) ;
int reiserfs_in_journal(struct super_block *p_s_sb, unsigned long bl, int searchall, unsigned long *next) ;
int journal_begin(struct reiserfs_transaction_handle *, struct super_block *p_s_sb, unsigned long) ;
-int journal_join(struct reiserfs_transaction_handle *, struct super_block *p_s_sb, unsigned long) ;
struct super_block *reiserfs_get_super(kdev_t dev) ;
void flush_async_commits(struct super_block *p_s_sb) ;
-int remove_from_transaction(struct super_block *p_s_sb, unsigned long blocknr, int already_cleaned) ;
-
int buffer_journaled(const struct buffer_head *bh) ;
int mark_buffer_journal_new(struct buffer_head *bh) ;
int reiserfs_sync_all_buffers(kdev_t dev, int wait) ;
@@ -1729,6 +1700,10 @@ static inline int mark_buffer_notjournal_new(struct buffer_head *bh) {
return 0 ;
}
+void add_save_link (struct reiserfs_transaction_handle * th,
+ struct inode * inode, int truncate);
+void remove_save_link (struct inode * inode, int truncate);
+
/* objectid.c */
__u32 reiserfs_get_unused_objectid (struct reiserfs_transaction_handle *th);
void reiserfs_release_objectid (struct reiserfs_transaction_handle *th, __u32 objectid_to_release);
@@ -1766,16 +1741,16 @@ static inline int le_key_version (const struct key * key)
type = offset_v2_k_type( &(key->u.k_offset_v2));
if (type != TYPE_DIRECT && type != TYPE_INDIRECT && type != TYPE_DIRENTRY)
- return ITEM_VERSION_1;
+ return KEY_FORMAT_3_5;
- return ITEM_VERSION_2;
+ return KEY_FORMAT_3_6;
}
static inline void copy_key (struct key *to, const struct key *from)
{
- memcpy (to, from, KEY_SIZE);
+ memcpy (to, from, KEY_SIZE);
}
@@ -1819,17 +1794,12 @@ int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
struct inode * inode,
struct buffer_head * p_s_un_bh);
-
+void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
+ struct key * key);
void reiserfs_delete_object (struct reiserfs_transaction_handle *th, struct inode * p_s_inode);
void reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
struct inode * p_s_inode, struct page *,
int update_timestamps);
-//
-//void lock_inode_to_convert (struct inode * p_s_inode);
-//void unlock_inode_after_convert (struct inode * p_s_inode);
-//void increment_i_read_sync_counter (struct inode * p_s_inode);
-//void decrement_i_read_sync_counter (struct inode * p_s_inode);
-
#define i_block_size(inode) ((inode)->i_sb->s_blocksize)
#define file_size(inode) ((inode)->i_size)
@@ -1838,19 +1808,18 @@ void reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
#define tail_has_to_be_packed(inode) (!dont_have_tails ((inode)->i_sb) &&\
!STORE_TAIL_IN_UNFM(file_size (inode), tail_size(inode), i_block_size (inode)))
-/*
-int get_buffer_by_range (struct super_block * p_s_sb, struct key * p_s_range_begin, struct key * p_s_range_end,
- struct buffer_head ** pp_s_buf, unsigned long * p_n_objectid);
-int get_buffers_from_range (struct super_block * p_s_sb, struct key * p_s_range_start, struct key * p_s_range_end,
- struct buffer_head ** p_s_range_buffers,
- int n_max_nr_buffers_to_return);
-*/
-
void padd_item (char * item, int total_length, int length);
-
/* inode.c */
+void reiserfs_read_inode (struct inode * inode) ;
+void reiserfs_read_inode2(struct inode * inode, void *p) ;
+void reiserfs_delete_inode (struct inode * inode);
+void reiserfs_write_inode (struct inode * inode, int) ;
+struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, __u32 *data,
+ int len, int fhtype, int parent);
+int reiserfs_dentry_to_fh(struct dentry *dentry, __u32 *data, int *lenp, int need_parent);
+
int reiserfs_prepare_write(struct file *, struct page *, unsigned, unsigned) ;
void reiserfs_truncate_file(struct inode *, int update_timestamps) ;
void make_cpu_key (struct cpu_key * cpu_key, struct inode * inode, loff_t offset,
@@ -1858,24 +1827,9 @@ void make_cpu_key (struct cpu_key * cpu_key, struct inode * inode, loff_t offset
void make_le_item_head (struct item_head * ih, const struct cpu_key * key,
int version,
loff_t offset, int type, int length, int entry_count);
-/*void store_key (struct key * key);
-void forget_key (struct key * key);*/
-int reiserfs_get_block (struct inode * inode, sector_t block,
- struct buffer_head * bh_result, int create);
struct inode * reiserfs_iget (struct super_block * s,
const struct cpu_key * key);
-void reiserfs_read_inode (struct inode * inode) ;
-void reiserfs_read_inode2(struct inode * inode, void *p) ;
-void reiserfs_delete_inode (struct inode * inode);
-extern int reiserfs_notify_change(struct dentry * dentry, struct iattr * attr);
-void reiserfs_write_inode (struct inode * inode, int) ;
-/* nfsd support functions */
-struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, __u32 *fh, int len, int fhtype, int parent);
-int reiserfs_dentry_to_fh(struct dentry *, __u32 *fh, int *lenp, int need_parent);
-
-/* we don't mark inodes dirty, we just log them */
-void reiserfs_dirty_inode (struct inode * inode) ;
struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
struct inode * dir, int mode,
@@ -1883,36 +1837,12 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
struct dentry *dentry, struct inode *inode, int * err);
int reiserfs_sync_inode (struct reiserfs_transaction_handle *th, struct inode * inode);
void reiserfs_update_sd (struct reiserfs_transaction_handle *th, struct inode * inode);
-int reiserfs_inode_setattr(struct dentry *, struct iattr * attr);
/* namei.c */
inline void set_de_name_and_namelen (struct reiserfs_dir_entry * de);
int search_by_entry_key (struct super_block * sb, const struct cpu_key * key,
struct path * path,
struct reiserfs_dir_entry * de);
-struct dentry * reiserfs_lookup (struct inode * dir, struct dentry *dentry);
-int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode);
-int reiserfs_mknod (struct inode * dir_inode, struct dentry *dentry, int mode, int rdev);
-int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode);
-int reiserfs_rmdir (struct inode * dir, struct dentry *dentry);
-int reiserfs_unlink (struct inode * dir, struct dentry *dentry);
-int reiserfs_symlink (struct inode * dir, struct dentry *dentry, const char * symname);
-int reiserfs_link (struct dentry * old_dentry, struct inode * dir, struct dentry *dentry);
-int reiserfs_rename (struct inode * old_dir, struct dentry *old_dentry, struct inode * new_dir, struct dentry *new_dentry);
-
-/* super.c */
-inline void reiserfs_mark_buffer_dirty (struct buffer_head * bh, int flag);
-inline void reiserfs_mark_buffer_clean (struct buffer_head * bh);
-void reiserfs_write_super (struct super_block * s);
-void reiserfs_put_super (struct super_block * s);
-int reiserfs_remount (struct super_block * s, int * flags, char * data);
-/*int read_super_block (struct super_block * s, int size);
-int read_bitmaps (struct super_block * s);
-int read_old_bitmaps (struct super_block * s);
-int read_old_super_block (struct super_block * s, int size);*/
-struct super_block * reiserfs_read_super (struct super_block * s, void * data, int silent);
-int reiserfs_statfs (struct super_block * s, struct statfs * buf);
-
/* procfs.c */
#if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
@@ -2009,8 +1939,6 @@ void free_buffers_in_tb (struct tree_balance * p_s_tb);
/* prints.c */
void reiserfs_panic (struct super_block * s, const char * fmt, ...)
__attribute__ ( ( noreturn ) );/* __attribute__( ( format ( printf, 2, 3 ) ) ) */
-void reiserfs_warning (const char * fmt, ...);
-/* __attribute__( ( format ( printf, 1, 2 ) ) ); */
void reiserfs_debug (struct super_block *s, int level, const char * fmt, ...);
/* __attribute__( ( format ( printf, 3, 4 ) ) ); */
void print_virtual_node (struct virtual_node * vn);
@@ -2091,9 +2019,6 @@ __u32 keyed_hash (const signed char *msg, int len);
__u32 yura_hash (const signed char *msg, int len);
__u32 r5_hash (const signed char *msg, int len);
-/* version.c */
-const char *reiserfs_get_version_string(void) CONSTF;
-
/* the ext2 bit routines adjust for big or little endian as
** appropriate for the arch, so in our laziness we use them rather
** than using the bit routines they call more directly. These
@@ -2103,83 +2028,6 @@ const char *reiserfs_get_version_string(void) CONSTF;
#define reiserfs_test_le_bit ext2_test_bit
#define reiserfs_find_next_zero_le_bit ext2_find_next_zero_bit
-
-//
-// this was totally copied from from linux's
-// find_first_zero_bit and changed a bit
-//
-
-#ifdef __i386__
-
-static __inline__ int
-find_first_nonzero_bit(const void * addr, unsigned size) {
- int res;
- int __d0;
- void *__d1;
-
-
- if (!size) {
- return (0);
- }
- __asm__ __volatile__ (
- "cld\n\t"
- "xorl %%eax,%%eax\n\t"
- "repe; scasl\n\t"
- "je 1f\n\t"
- "movl -4(%%edi),%%eax\n\t"
- "subl $4, %%edi\n\t"
- "bsfl %%eax,%%eax\n\t"
- "1:\tsubl %%edx,%%edi\n\t"
- "shll $3,%%edi\n\t"
- "addl %%edi,%%eax"
- :"=a" (res),
- "=c"(__d0), "=D"(__d1)
- :"1" ((size + 31) >> 5), "d" (addr), "2" (addr));
- return (res);
-}
-
-#else /* __i386__ */
-
-static __inline__ int find_next_nonzero_bit(const void * addr, unsigned size,
- unsigned offset)
-{
- unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- /* set to zero first offset bits */
- tmp &= ~(~0UL >> (32-offset));
- if (size < 32)
- goto found_first;
- if (tmp != 0U)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0U)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-found_first:
-found_middle:
- return result + ffs(tmp);
-}
-
-#define find_first_nonzero_bit(addr,size) find_next_nonzero_bit((addr), (size), 0)
-
-#endif /* 0 */
-
/* sometimes reiserfs_truncate may require to allocate few new blocks
to perform indirect2direct conversion. People probably used to
think, that truncate should work without problems on a filesystem
@@ -2189,12 +2037,6 @@ found_middle:
absolutely safe */
#define SPARE_SPACE 500
-static inline unsigned long reiserfs_get_journal_block(const struct super_block *s) {
- return le32_to_cpu(SB_DISK_SUPER_BLOCK(s)->s_journal_block) ;
-}
-static inline unsigned long reiserfs_get_journal_orig_size(const struct super_block *s) {
- return le32_to_cpu(SB_DISK_SUPER_BLOCK(s)->s_orig_journal_size) ;
-}
/* prototypes from ioctl.c */
int reiserfs_ioctl (struct inode * inode, struct file * filp,
diff --git a/include/linux/reiserfs_fs_i.h b/include/linux/reiserfs_fs_i.h
index 6b7a94ece6bc..d76abebe56a8 100644
--- a/include/linux/reiserfs_fs_i.h
+++ b/include/linux/reiserfs_fs_i.h
@@ -3,51 +3,46 @@
#include <linux/list.h>
+/** bitmasks for i_flags field in reiserfs-specific part of inode */
+typedef enum {
+ /** this says what format of key do all items (but stat data) of
+ an object have. If this is set, that format is 3.6 otherwise
+ - 3.5 */
+ i_item_key_version_mask = 0x0001,
+ /** If this is unset, object has 3.5 stat data, otherwise, it has
+ 3.6 stat data with 64bit size, 32bit nlink etc. */
+ i_stat_data_version_mask = 0x0002,
+ /** file might need tail packing on close */
+ i_pack_on_close_mask = 0x0004,
+ /** don't pack tail of file */
+ i_nopack_mask = 0x0008,
+ /** If those is set, "safe link" was created for this file during
+ truncate or unlink. Safe link is used to avoid leakage of disk
+ space on crash with some files open, but unlinked. */
+ i_link_saved_unlink_mask = 0x0010,
+ i_link_saved_truncate_mask = 0x0020
+} reiserfs_inode_flags;
+
+
struct reiserfs_inode_info {
- __u32 i_key [4];/* key is still 4 32 bit integers */
-
- /* this comment will be totally
- cryptic to readers not familiar
- with 3.5/3.6 format conversion, and
- it does not consider that that 3.6
- might not be the last version */
- int i_version; // this says whether file is old or new
-
- int i_pack_on_close ; // file might need tail packing on close
-
- __u32 i_first_direct_byte; // offset of first byte stored in direct item.
-
- /* My guess is this contains the first
- unused block of a sequence of
- blocks plus the length of the
- sequence, which I think is always
- at least two at the time of the
- preallocation. I really prefer
- allocate on flush conceptually.....
-
- You know, it really annoys me when
- code is this badly commented that I
- have to guess what it does.
- Neither I nor anyone else has time
- for guessing what your
- datastructures mean. -Hans */
- //For preallocation
- int i_prealloc_block;
- int i_prealloc_count;
- struct list_head i_prealloc_list; /* per-transaction list of inodes which
- * have preallocated blocks */
- /* I regret that you think the below
- is a comment you should make.... -Hans */
- //nopack-attribute
- int nopack;
-
- /* we use these for fsync or O_SYNC to decide which transaction needs
- ** to be committed in order for this inode to be properly flushed
- */
- unsigned long i_trans_id ;
- unsigned long i_trans_index ;
- struct inode vfs_inode;
+ __u32 i_key [4];/* key is still 4 32 bit integers */
+ /** transient inode flags that are never stored on disk. Bitmasks
+ for this field are defined above. */
+ __u32 i_flags;
+
+ __u32 i_first_direct_byte; // offset of first byte stored in direct item.
+
+ int i_prealloc_block; /* first unused block of a sequence of unused blocks */
+ int i_prealloc_count; /* length of that sequence */
+ struct list_head i_prealloc_list; /* per-transaction list of inodes which
+ * have preallocated blocks */
+
+ /* we use these for fsync or O_SYNC to decide which transaction
+ ** needs to be committed in order for this inode to be properly
+ ** flushed */
+ unsigned long i_trans_id ;
+ unsigned long i_trans_index ;
+ struct inode vfs_inode;
};
-
#endif
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 73d6d823fbbb..edcb0f9734ed 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -8,140 +8,71 @@
#include <linux/tqueue.h>
#endif
-//
-// super block's field values
-//
-/*#define REISERFS_VERSION 0 undistributed bitmap */
-/*#define REISERFS_VERSION 1 distributed bitmap and resizer*/
-#define REISERFS_VERSION_2 2 /* distributed bitmap, resizer, 64-bit, etc*/
-#define UNSET_HASH 0 // read_super will guess about, what hash names
- // in directories were sorted with
-#define TEA_HASH 1
-#define YURA_HASH 2
-#define R5_HASH 3
-#define DEFAULT_HASH R5_HASH
-
-/* this is the on disk super block */
-
-struct reiserfs_super_block
-{
- __u32 s_block_count;
- __u32 s_free_blocks; /* free blocks count */
- __u32 s_root_block; /* root block number */
- __u32 s_journal_block; /* journal block number */
- __u32 s_journal_dev; /* journal device number */
-
- /* Since journal size is currently a #define in a header file, if
- ** someone creates a disk with a 16MB journal and moves it to a
- ** system with 32MB journal default, they will overflow their journal
- ** when they mount the disk. s_orig_journal_size, plus some checks
- ** while mounting (inside journal_init) prevent that from happening
- */
- /* great comment Chris. Thanks. -Hans */
-
- __u32 s_orig_journal_size;
- __u32 s_journal_trans_max ; /* max number of blocks in a transaction. */
- __u32 s_journal_block_count ; /* total size of the journal. can change over time */
- __u32 s_journal_max_batch ; /* max number of blocks to batch into a trans */
- __u32 s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
- __u32 s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
- __u16 s_blocksize; /* block size */
- __u16 s_oid_maxsize; /* max size of object id array, see get_objectid() commentary */
- __u16 s_oid_cursize; /* current size of object id array */
- __u16 s_state; /* valid or error */
- char s_magic[12]; /* reiserfs magic string indicates that file system is reiserfs */
- __u32 s_hash_function_code; /* indicate, what hash function is being use to sort names in a directory*/
- __u16 s_tree_height; /* height of disk tree */
- __u16 s_bmap_nr; /* amount of bitmap blocks needed to address each block of file system */
- __u16 s_version; /* I'd prefer it if this was a string,
- something like "3.6.4", and maybe
- 16 bytes long mostly unused. We
- don't need to save bytes in the
- superblock. -Hans */
- __u16 s_reserved;
- __u32 s_inode_generation;
- char s_unused[124] ; /* zero filled by mkreiserfs */
-} __attribute__ ((__packed__));
-
-#define SB_SIZE (sizeof(struct reiserfs_super_block))
/* struct reiserfs_super_block accessors/mutators
* since this is a disk structure, it will always be in
* little endian format. */
-#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_block_count))
-#define set_sb_block_count(sbp,v) ((sbp)->s_block_count = cpu_to_le32(v))
-#define sb_free_blocks(sbp) (le32_to_cpu((sbp)->s_free_blocks))
-#define set_sb_free_blocks(sbp,v) ((sbp)->s_free_blocks = cpu_to_le32(v))
-#define sb_root_block(sbp) (le32_to_cpu((sbp)->s_root_block))
-#define set_sb_root_block(sbp,v) ((sbp)->s_root_block = cpu_to_le32(v))
-#define sb_journal_block(sbp) (le32_to_cpu((sbp)->s_journal_block))
-#define set_sb_journal_block(sbp,v) ((sbp)->s_journal_block = cpu_to_le32(v))
-#define sb_journal_dev(sbp) (le32_to_cpu((sbp)->s_journal_dev))
-#define set_sb_journal_dev(sbp,v) ((sbp)->s_journal_dev = cpu_to_le32(v))
-#define sb_orig_journal_size(sbp) (le32_to_cpu((sbp)->s_orig_journal_size))
-#define set_sb_orig_journal_size(sbp,v) \
- ((sbp)->s_orig_journal_size = cpu_to_le32(v))
-#define sb_journal_trans_max(sbp) (le32_to_cpu((sbp)->s_journal_trans_max))
-#define set_journal_trans_max(sbp,v) \
- ((sbp)->s_journal_trans_max = cpu_to_le32(v))
-#define sb_journal_block_count(sbp) (le32_to_cpu((sbp)->journal_block_count))
-#define sb_set_journal_block_count(sbp,v) \
- ((sbp)->s_journal_block_count = cpu_to_le32(v))
-#define sb_journal_max_batch(sbp) (le32_to_cpu((sbp)->s_journal_max_batch))
-#define set_sb_journal_max_batch(sbp,v) \
- ((sbp)->s_journal_max_batch = cpu_to_le32(v))
-#define sb_jourmal_max_commit_age(sbp) \
- (le32_to_cpu((sbp)->s_journal_max_commit_age))
-#define set_sb_journal_max_commit_age(sbp,v) \
- ((sbp)->s_journal_max_commit_age = cpu_to_le32(v))
-#define sb_jourmal_max_trans_age(sbp) \
- (le32_to_cpu((sbp)->s_journal_max_trans_age))
-#define set_sb_journal_max_trans_age(sbp,v) \
- ((sbp)->s_journal_max_trans_age = cpu_to_le32(v))
-#define sb_blocksize(sbp) (le16_to_cpu((sbp)->s_blocksize))
-#define set_sb_blocksize(sbp,v) ((sbp)->s_blocksize = cpu_to_le16(v))
-#define sb_oid_maxsize(sbp) (le16_to_cpu((sbp)->s_oid_maxsize))
-#define set_sb_oid_maxsize(sbp,v) ((sbp)->s_oid_maxsize = cpu_to_le16(v))
-#define sb_oid_cursize(sbp) (le16_to_cpu((sbp)->s_oid_cursize))
-#define set_sb_oid_cursize(sbp,v) ((sbp)->s_oid_cursize = cpu_to_le16(v))
-#define sb_state(sbp) (le16_to_cpu((sbp)->s_state))
-#define set_sb_state(sbp,v) ((sbp)->s_state = cpu_to_le16(v))
+#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
+#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
+#define sb_free_blocks(sbp) (le32_to_cpu((sbp)->s_v1.s_free_blocks))
+#define set_sb_free_blocks(sbp,v) ((sbp)->s_v1.s_free_blocks = cpu_to_le32(v))
+#define sb_root_block(sbp) (le32_to_cpu((sbp)->s_v1.s_root_block))
+#define set_sb_root_block(sbp,v) ((sbp)->s_v1.s_root_block = cpu_to_le32(v))
+
+#define sb_jp_journal_1st_block(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_1st_block))
+#define set_sb_jp_journal_1st_block(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_1st_block = cpu_to_le32(v))
+#define sb_jp_journal_dev(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_dev))
+#define set_sb_jp_journal_dev(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_dev = cpu_to_le32(v))
+#define sb_jp_journal_size(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_size))
+#define set_sb_jp_journal_size(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_size = cpu_to_le32(v))
+#define sb_jp_journal_trans_max(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_trans_max))
+#define set_sb_jp_journal_trans_max(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_trans_max = cpu_to_le32(v))
+#define sb_jp_journal_magic(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_magic))
+#define set_sb_jp_journal_magic(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_magic = cpu_to_le32(v))
+#define sb_jp_journal_max_batch(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_batch))
+#define set_sb_jp_journal_max_batch(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_max_batch = cpu_to_le32(v))
+#define sb_jp_jourmal_max_commit_age(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_commit_age))
+#define set_sb_jp_journal_max_commit_age(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_max_commit_age = cpu_to_le32(v))
+
+#define sb_blocksize(sbp) (le16_to_cpu((sbp)->s_v1.s_blocksize))
+#define set_sb_blocksize(sbp,v) ((sbp)->s_v1.s_blocksize = cpu_to_le16(v))
+#define sb_oid_maxsize(sbp) (le16_to_cpu((sbp)->s_v1.s_oid_maxsize))
+#define set_sb_oid_maxsize(sbp,v) ((sbp)->s_v1.s_oid_maxsize = cpu_to_le16(v))
+#define sb_oid_cursize(sbp) (le16_to_cpu((sbp)->s_v1.s_oid_cursize))
+#define set_sb_oid_cursize(sbp,v) ((sbp)->s_v1.s_oid_cursize = cpu_to_le16(v))
+#define sb_umount_state(sbp) (le16_to_cpu((sbp)->s_v1.s_umount_state))
+#define set_sb_umount_state(sbp,v) ((sbp)->s_v1.s_umount_state = cpu_to_le16(v))
+#define sb_fs_state(sbp) (le16_to_cpu((sbp)->s_v1.s_fs_state))
+#define set_sb_fs_state(sbp,v) ((sbp)->s_v1.s_fs_state = cpu_to_le16(v))
#define sb_hash_function_code(sbp) \
- (le32_to_cpu((sbp)->s_hash_function_code))
+ (le32_to_cpu((sbp)->s_v1.s_hash_function_code))
#define set_sb_hash_function_code(sbp,v) \
- ((sbp)->s_hash_function_code = cpu_to_le32(v))
-#define sb_tree_height(sbp) (le16_to_cpu((sbp)->s_tree_height))
-#define set_sb_tree_height(sbp,v) ((sbp)->s_tree_height = cpu_to_le16(v))
-#define sb_bmap_nr(sbp) (le16_to_cpu((sbp)->s_bmap_nr))
-#define set_sb_bmap_nr(sbp,v) ((sbp)->s_bmap_nr = cpu_to_le16(v))
-#define sb_version(sbp) (le16_to_cpu((sbp)->s_version))
-#define set_sb_version(sbp,v) ((sbp)->s_version = cpu_to_le16(v))
-
-/* this is the super from 3.5.X, where X >= 10 */
-struct reiserfs_super_block_v1
-{
- __u32 s_block_count; /* blocks count */
- __u32 s_free_blocks; /* free blocks count */
- __u32 s_root_block; /* root block number */
- __u32 s_journal_block; /* journal block number */
- __u32 s_journal_dev; /* journal device number */
- __u32 s_orig_journal_size; /* size of the journal on FS creation. used to make sure they don't overflow it */
- __u32 s_journal_trans_max ; /* max number of blocks in a transaction. */
- __u32 s_journal_block_count ; /* total size of the journal. can change over time */
- __u32 s_journal_max_batch ; /* max number of blocks to batch into a trans */
- __u32 s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
- __u32 s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
- __u16 s_blocksize; /* block size */
- __u16 s_oid_maxsize; /* max size of object id array, see get_objectid() commentary */
- __u16 s_oid_cursize; /* current size of object id array */
- __u16 s_state; /* valid or error */
- char s_magic[16]; /* reiserfs magic string indicates that file system is reiserfs */
- __u16 s_tree_height; /* height of disk tree */
- __u16 s_bmap_nr; /* amount of bitmap blocks needed to address each block of file system */
- __u32 s_reserved;
-} __attribute__ ((__packed__));
-
-#define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1))
+ ((sbp)->s_v1.s_hash_function_code = cpu_to_le32(v))
+#define sb_tree_height(sbp) (le16_to_cpu((sbp)->s_v1.s_tree_height))
+#define set_sb_tree_height(sbp,v) ((sbp)->s_v1.s_tree_height = cpu_to_le16(v))
+#define sb_bmap_nr(sbp) (le16_to_cpu((sbp)->s_v1.s_bmap_nr))
+#define set_sb_bmap_nr(sbp,v) ((sbp)->s_v1.s_bmap_nr = cpu_to_le16(v))
+#define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version))
+#define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v))
+
+#define sb_reserved_for_journal(sbp) \
+ (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal))
+#define set_sb_reserved_for_journal(sbp,v) \
+ ((sbp)->s_v1.s_reserved_for_journal = cpu_to_le16(v))
/* LOGGING -- */
@@ -170,7 +101,6 @@ struct reiserfs_super_block_v1
/* we have a node size define somewhere in reiserfs_fs.h. -Hans */
#define JOURNAL_BLOCK_SIZE 4096 /* BUG gotta get rid of this */
#define JOURNAL_MAX_CNODE 1500 /* max cnodes to allocate. */
-#define JOURNAL_TRANS_MAX 1024 /* biggest possible single transaction, don't change for now (8/3/99) */
#define JOURNAL_HASH_SIZE 8192
#define JOURNAL_NUM_BITMAPS 5 /* number of copies of the bitmaps to have floating. Must be >= 2 */
#define JOURNAL_LIST_COUNT 64
@@ -263,7 +193,12 @@ struct reiserfs_journal {
struct buffer_head ** j_ap_blocks ; /* journal blocks on disk */
struct reiserfs_journal_cnode *j_last ; /* newest journal block */
struct reiserfs_journal_cnode *j_first ; /* oldest journal block. start here for traverse */
-
+
+ kdev_t j_dev;
+ struct file *j_dev_file;
+ struct block_device *j_dev_bd;
+ int j_1st_reserved_block; /* first block on s_dev of reserved area journal */
+
int j_state ;
unsigned long j_trans_id ;
unsigned long j_mount_id ;
@@ -294,6 +229,11 @@ struct reiserfs_journal {
int j_cnode_used ; /* number of cnodes on the used list */
int j_cnode_free ; /* number of cnodes on the free list */
+ unsigned int s_journal_trans_max ; /* max number of blocks in a transaction. */
+ unsigned int s_journal_max_batch ; /* max number of blocks to batch into a trans */
+ unsigned int s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
+ unsigned int s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
+
struct reiserfs_journal_cnode *j_cnode_free_list ;
struct reiserfs_journal_cnode *j_cnode_free_orig ; /* orig pointer returned from vmalloc */
@@ -407,6 +347,8 @@ struct reiserfs_sb_info
/* To be obsoleted soon by per buffer seals.. -Hans */
atomic_t s_generation_counter; // increased by one every time the
// tree gets re-balanced
+ unsigned long s_properties; /* File system properties. Currently holds
+ on-disk FS format */
/* session statistics */
int s_kmallocs;
@@ -420,11 +362,19 @@ struct reiserfs_sb_info
int s_bmaps_without_search;
int s_direct2indirect;
int s_indirect2direct;
+ /* set up when it's ok for reiserfs_read_inode2() to read from
+ disk inode with nlink==0. Currently this is only used during
+ finish_unfinished() processing at mount time */
+ int s_is_unlinked_ok;
reiserfs_proc_info_data_t s_proc_info_data;
struct proc_dir_entry *procdir;
};
+/* Definitions of reiserfs on-disk properties: */
+#define REISERFS_3_5 0
+#define REISERFS_3_6 1
+/* Mount options */
#define NOTAIL 0 /* -o notail: no tails will be created in a session */
#define REPLAYONLY 3 /* replay journal and return 0. Use by fsck */
#define REISERFS_NOLOG 4 /* -o nolog: turn journalling off */
@@ -474,7 +424,8 @@ struct reiserfs_sb_info
#define dont_have_tails(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << NOTAIL))
#define replay_only(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REPLAYONLY))
#define reiserfs_dont_log(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_NOLOG))
-#define old_format_only(s) ((SB_VERSION(s) != REISERFS_VERSION_2) && !((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_CONVERT)))
+#define old_format_only(s) ((s)->u.reiserfs_sb.s_properties & (1 << REISERFS_3_5))
+#define convert_reiserfs(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_CONVERT))
void reiserfs_file_buffer (struct buffer_head * bh, int list);
@@ -490,29 +441,19 @@ int reiserfs_resize(struct super_block *, unsigned long) ;
#define SB_BUFFER_WITH_SB(s) ((s)->u.reiserfs_sb.s_sbh)
#define SB_JOURNAL(s) ((s)->u.reiserfs_sb.s_journal)
+#define SB_JOURNAL_1st_RESERVED_BLOCK(s) (SB_JOURNAL(s)->j_1st_reserved_block)
#define SB_JOURNAL_LIST(s) (SB_JOURNAL(s)->j_journal_list)
#define SB_JOURNAL_LIST_INDEX(s) (SB_JOURNAL(s)->j_journal_list_index)
#define SB_JOURNAL_LEN_FREE(s) (SB_JOURNAL(s)->j_journal_len_free)
#define SB_AP_BITMAP(s) ((s)->u.reiserfs_sb.s_ap_bitmap)
+#define SB_DISK_JOURNAL_HEAD(s) (SB_JOURNAL(s)->j_header_bh->)
-// on-disk super block fields converted to cpu form
-#define SB_DISK_SUPER_BLOCK(s) ((s)->u.reiserfs_sb.s_rs)
-#define SB_BLOCK_COUNT(s) sb_block_count (SB_DISK_SUPER_BLOCK(s))
-#define SB_FREE_BLOCKS(s) sb_free_blocks (SB_DISK_SUPER_BLOCK(s))
-#define SB_REISERFS_MAGIC(s) (SB_DISK_SUPER_BLOCK(s)->s_magic)
-#define SB_ROOT_BLOCK(s) sb_root_block (SB_DISK_SUPER_BLOCK(s))
-#define SB_TREE_HEIGHT(s) sb_tree_height (SB_DISK_SUPER_BLOCK(s))
-#define SB_REISERFS_STATE(s) sb_state (SB_DISK_SUPER_BLOCK(s))
-#define SB_VERSION(s) sb_version (SB_DISK_SUPER_BLOCK(s))
-#define SB_BMAP_NR(s) sb_bmap_nr(SB_DISK_SUPER_BLOCK(s))
-
-#define PUT_SB_BLOCK_COUNT(s, val) do { set_sb_block_count( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_FREE_BLOCKS(s, val) do { set_sb_free_blocks( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_ROOT_BLOCK(s, val) do { set_sb_root_block( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_TREE_HEIGHT(s, val) do { set_sb_tree_height( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_REISERFS_STATE(s, val) do { set_sb_state( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_VERSION(s, val) do { set_sb_version( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_BMAP_NR(s, val) do { set_sb_bmap_nr( SB_DISK_SUPER_BLOCK(s), val); } while (0)
+#define SB_JOURNAL_TRANS_MAX(s) (SB_JOURNAL(s)->s_journal_trans_max)
+#define SB_JOURNAL_MAX_BATCH(s) (SB_JOURNAL(s)->s_journal_max_batch)
+#define SB_JOURNAL_MAX_COMMIT_AGE(s) (SB_JOURNAL(s)->s_journal_max_commit_age)
+#define SB_JOURNAL_MAX_TRANS_AGE(s) (SB_JOURNAL(s)->s_journal_max_trans_age)
+#define SB_JOURNAL_DEV(s) (SB_JOURNAL(s)->j_dev)
+
#endif /* _LINUX_REISER_FS_SB */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bbb5dbbf9d17..6805d6ae39df 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -229,19 +229,29 @@ extern struct user_struct root_user;
typedef struct prio_array prio_array_t;
+/* this struct must occupy one 32-bit chunk so that is can be read in one go */
+struct task_work {
+ __s8 need_resched;
+ __u8 syscall_trace; /* count of syscall interceptors */
+ __u8 sigpending;
+ __u8 notify_resume; /* request for notification on
+ userspace execution resumption */
+} __attribute__((packed));
+
struct task_struct {
/*
* offsets of these are hardcoded elsewhere - touch with care
*/
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
unsigned long flags; /* per process flags, defined below */
- int sigpending;
+ volatile struct task_work work;
+
mm_segment_t addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
struct exec_domain *exec_domain;
- volatile long need_resched;
+ long __pad;
unsigned long ptrace;
int lock_depth; /* Lock depth */
@@ -381,7 +391,7 @@ struct task_struct {
*/
#define PT_PTRACED 0x00000001
-#define PT_TRACESYS 0x00000002
+#define PT_SYSCALLTRACE 0x00000002 /* T if syscall_trace is +1 for ptrace() */
#define PT_DTRACE 0x00000004 /* delayed trace (used on m68k, i386) */
#define PT_TRACESYSGOOD 0x00000008
#define PT_PTRACE_CAP 0x00000010 /* ptracer can follow suid-exec */
@@ -575,12 +585,12 @@ extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
static inline int signal_pending(struct task_struct *p)
{
- return (p->sigpending != 0);
+ return (p->work.sigpending != 0);
}
static inline int need_resched(void)
{
- return unlikely(current->need_resched != 0);
+ return unlikely(current->work.need_resched != 0);
}
static inline void cond_resched(void)
@@ -625,7 +635,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
static inline void recalc_sigpending(struct task_struct *t)
{
- t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
+ t->work.sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
}
/* True if we are on the alternate signal stack. */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 91e8498f3a71..4c5837e684cd 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -641,11 +641,11 @@ typedef void (*usb_complete_t)(struct urb *);
* @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to
* collect the transfer status for each buffer.
*
- * This structure identifies USB transfer requests. URBs may be allocated
- * in any way, although usb_alloc_urb() is often convenient. Initialization
- * may be done using various usb_fill_*_urb() functions. URBs are submitted
- * using usb_submit_urb(), and pending requests may be canceled using
- * usb_unlink_urb().
+ * This structure identifies USB transfer requests. URBs must be allocated by
+ * calling usb_alloc_urb() and freed with a call to usb_free_urb().
+ * Initialization may be done using various usb_fill_*_urb() functions. URBs
+ * are submitted using usb_submit_urb(), and pending requests may be canceled
+ * using usb_unlink_urb().
*
* Initialization:
*
@@ -721,6 +721,7 @@ typedef void (*usb_complete_t)(struct urb *);
struct urb
{
spinlock_t lock; /* lock for the URB */
+ atomic_t count; /* reference count of the URB */
void *hcpriv; /* private data for host controller */
struct list_head urb_list; /* list pointer to all active urbs */
struct urb *next; /* (in) pointer to next URB */
@@ -854,6 +855,8 @@ static inline void usb_fill_int_urb (struct urb *urb,
extern struct urb *usb_alloc_urb(int iso_packets);
extern void usb_free_urb(struct urb *urb);
+#define usb_put_urb usb_free_urb
+extern struct urb *usb_get_urb(struct urb *urb);
extern int usb_submit_urb(struct urb *urb);
extern int usb_unlink_urb(struct urb *urb);
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
new file mode 100644
index 000000000000..a7eb1b4188bd
--- /dev/null
+++ b/include/linux/xattr.h
@@ -0,0 +1,15 @@
+/*
+ File: linux/xattr.h
+
+ Extended attributes handling.
+
+ Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
+ Copyright (C) 2001 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com>
+*/
+#ifndef _LINUX_XATTR_H
+#define _LINUX_XATTR_H
+
+#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
+#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
+
+#endif /* _LINUX_XATTR_H */
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
new file mode 100644
index 000000000000..43b32c613d76
--- /dev/null
+++ b/include/linux/zlib.h
@@ -0,0 +1,654 @@
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+ version 1.1.3, July 9th, 1998
+
+ Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ jloup@gzip.org madler@alumni.caltech.edu
+
+
+ The data format used by the zlib library is described by RFCs (Request for
+ Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
+ (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
+*/
+
+#ifndef _ZLIB_H
+#define _ZLIB_H
+
+#include "zconf.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ZLIB_VERSION "1.1.3"
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed
+ data. This version of the library supports only one compression method
+ (deflation) but other algorithms will be added later and will have the same
+ stream interface.
+
+ Compression can be done in a single step if the buffers are large
+ enough (for example if an input file is mmap'ed), or can be done by
+ repeated calls of the compression function. In the latter case, the
+ application must provide more input and/or consume the output
+ (providing more output space) before each call.
+
+ The library also supports reading and writing files in gzip (.gz) format
+ with an interface similar to that of stdio.
+
+ The library does not install any signal handler. The decoder checks
+ the consistency of the compressed data, so the library should never
+ crash even in case of corrupted input.
+*/
+
+typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
+typedef void (*free_func) OF((voidpf opaque, voidpf address));
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ Bytef *next_in; /* next input byte */
+ uInt avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total nb of input bytes read so far */
+
+ Bytef *next_out; /* next output byte should be put there */
+ uInt avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total nb of bytes output so far */
+
+ char *msg; /* last error message, NULL if no error */
+ struct internal_state FAR *state; /* not visible by applications */
+
+ void *workspace; /* memory allocated for this stream */
+
+ int data_type; /* best guess about the data type: ascii or binary */
+ uLong adler; /* adler32 value of the uncompressed data */
+ uLong reserved; /* reserved for future use */
+} z_stream;
+
+typedef z_stream FAR *z_streamp;
+
+/*
+ The application must update next_in and avail_in when avail_in has
+ dropped to zero. It must update next_out and avail_out when avail_out
+ has dropped to zero. The application must initialize zalloc, zfree and
+ opaque before calling the init function. All other fields are set by the
+ compression library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return Z_NULL if there is not enough memory for the object.
+ If zlib is used in a multi-threaded application, zalloc and zfree must be
+ thread safe.
+
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this
+ if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
+ pointers returned by zalloc for objects of exactly 65536 bytes *must*
+ have their offset normalized to zero. The default allocation function
+ provided by this library ensures this (see zutil.c). To reduce memory
+ requirements and avoid any allocation of 64K objects, at the expense of
+ compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or
+ progress reports. After compression, total_in holds the total size of
+ the uncompressed data and may be saved for use in the decompressor
+ (particularly if the decompressor wants to decompress everything in
+ a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */
+#define Z_PACKET_FLUSH 2
+#define Z_SYNC_FLUSH 3
+#define Z_FULL_FLUSH 4
+#define Z_FINISH 5
+/* Allowed flush values; see deflate() below for details */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_NEED_DICT 2
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+#define Z_VERSION_ERROR (-6)
+/* Return codes for the compression/decompression functions. Negative
+ * values are errors, positive values are used for special but normal events.
+ */
+
+#define Z_NO_COMPRESSION 0
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_DEFAULT_STRATEGY 0
+/* compression strategy; see deflateInit2() below for details */
+
+#define Z_BINARY 0
+#define Z_ASCII 1
+#define Z_UNKNOWN 2
+/* Possible values of the data_type field */
+
+#define Z_DEFLATED 8
+/* The deflate compression method (the only one supported in this version) */
+
+#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
+
+ /* basic functions */
+
+ZEXTERN const char * ZEXPORT zlib_zlibVersion OF((void));
+/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
+ If the first character differs, the library code actually used is
+ not compatible with the zlib.h header file used by the application.
+ This check is automatically made by deflateInit and inflateInit.
+ */
+
+ZEXTERN int ZEXPORT zlib_deflate_workspacesize OF((void));
+/*
+ Returns the number of bytes that needs to be allocated for a per-
+ stream workspace. A pointer to this number of bytes should be
+ returned in stream->workspace before calling zlib_deflateInit().
+*/
+
+/*
+ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level));
+
+ Initializes the internal stream state for compression. The fields
+ zalloc, zfree and opaque must be initialized before by the caller.
+ If zalloc and zfree are set to Z_NULL, deflateInit updates them to
+ use default allocation functions.
+
+ The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
+ 1 gives best speed, 9 gives best compression, 0 gives no compression at
+ all (the input data is simply copied a block at a time).
+ Z_DEFAULT_COMPRESSION requests a default compromise between speed and
+ compression (currently equivalent to level 6).
+
+ deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if level is not a valid compression level,
+ Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
+ with the version assumed by the caller (ZLIB_VERSION).
+ msg is set to null if there is no error message. deflateInit does not
+ perform any compression: this will be done by deflate().
+*/
+
+
+ZEXTERN int ZEXPORT zlib_deflate OF((z_streamp strm, int flush));
+/*
+ deflate compresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may introduce some
+ output latency (reading input without producing any output) except when
+ forced to flush.
+
+ The detailed semantics are as follows. deflate performs one or both of the
+ following actions:
+
+ - Compress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in and avail_in are updated and
+ processing will resume at this point for the next call of deflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. This action is forced if the parameter flush is non zero.
+ Forcing flush frequently degrades the compression ratio, so this parameter
+ should be set only when necessary (in interactive applications).
+ Some output may be provided even if flush is not set.
+
+ Before the call of deflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating avail_in or avail_out accordingly; avail_out
+ should never be zero before the call. The application can consume the
+ compressed output when it wants, for example when the output buffer is full
+ (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
+ and with zero avail_out, it must be called again after making room in the
+ output buffer because there might be more output pending.
+
+ If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
+ flushed to the output buffer and the output is aligned on a byte boundary, so
+ that the decompressor can get all input data available so far. (In particular
+ avail_in is zero after the call if enough output space has been provided
+ before the call.) Flushing may degrade compression for some compression
+ algorithms and so it should be used only when necessary.
+
+ If flush is set to Z_FULL_FLUSH, all output is flushed as with
+ Z_SYNC_FLUSH, and the compression state is reset so that decompression can
+ restart from this point if previous compressed data has been damaged or if
+ random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
+ the compression.
+
+ If deflate returns with avail_out == 0, this function must be called again
+ with the same value of the flush parameter and more output space (updated
+ avail_out), until the flush is complete (deflate returns with non-zero
+ avail_out).
+
+ If the parameter flush is set to Z_FINISH, pending input is processed,
+ pending output is flushed and deflate returns with Z_STREAM_END if there
+ was enough output space; if deflate returns with Z_OK, this function must be
+ called again with Z_FINISH and more output space (updated avail_out) but no
+ more input data, until it returns with Z_STREAM_END or an error. After
+ deflate has returned Z_STREAM_END, the only possible operations on the
+ stream are deflateReset or deflateEnd.
+
+ Z_FINISH can be used immediately after deflateInit if all the compression
+ is to be done in a single step. In this case, avail_out must be at least
+ 0.1% larger than avail_in plus 12 bytes. If deflate does not return
+ Z_STREAM_END, then it must be called again as described above.
+
+ deflate() sets strm->adler to the adler32 checksum of all input read
+ so far (that is, total_in bytes).
+
+ deflate() may update data_type if it can make a good guess about
+ the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
+ binary. This field is only for information purposes and does not affect
+ the compression algorithm in any manner.
+
+ deflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if all input has been
+ consumed and all output has been produced (only when flush is set to
+ Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
+ if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible
+ (for example avail_in or avail_out was zero).
+*/
+
+
+ZEXTERN int ZEXPORT zlib_deflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
+ stream state was inconsistent, Z_DATA_ERROR if the stream was freed
+ prematurely (some input or output was discarded). In the error case,
+ msg may be set but then points to a static string (which must not be
+ deallocated).
+*/
+
+
+ZEXTERN int ZEXPORT zlib_inflate_workspacesize OF((void));
+/*
+ Returns the number of bytes that needs to be allocated for a per-
+ stream workspace. A pointer to this number of bytes should be
+ returned in stream->workspace before calling zlib_inflateInit().
+*/
+
+/*
+ZEXTERN int ZEXPORT zlib_inflateInit OF((z_streamp strm));
+
+ Initializes the internal stream state for decompression. The fields
+ next_in, avail_in, and workspace must be initialized before by
+ the caller. If next_in is not Z_NULL and avail_in is large enough (the exact
+ value depends on the compression method), inflateInit determines the
+ compression method from the zlib header and allocates all data structures
+ accordingly; otherwise the allocation will be deferred to the first call of
+ inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to
+ use default allocation functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
+ version assumed by the caller. msg is set to null if there is no error
+ message. inflateInit does not perform any decompression apart from reading
+ the zlib header if present: this will be done by inflate(). (So next_in and
+ avail_in may be modified, but next_out and avail_out are unchanged.)
+*/
+
+
+ZEXTERN int ZEXPORT zlib_inflate OF((z_streamp strm, int flush));
+/*
+ inflate decompresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may some
+ introduce some output latency (reading input without producing any output)
+ except when forced to flush.
+
+ The detailed semantics are as follows. inflate performs one or both of the
+ following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in is updated and processing
+ will resume at this point for the next call of inflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() provides as much output as possible, until there
+ is no more input data or no more space in the output buffer (see below
+ about the flush parameter).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating the next_* and avail_* values accordingly.
+ The application can consume the uncompressed output when it wants, for
+ example when the output buffer is full (avail_out == 0), or after each
+ call of inflate(). If inflate returns Z_OK and with zero avail_out, it
+ must be called again after making room in the output buffer because there
+ might be more output pending.
+
+ If the parameter flush is set to Z_SYNC_FLUSH, inflate flushes as much
+ output as possible to the output buffer. The flushing behavior of inflate is
+ not specified for values of the flush parameter other than Z_SYNC_FLUSH
+ and Z_FINISH, but the current implementation actually flushes as much output
+ as possible anyway.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step
+ (a single call of inflate), the parameter flush should be set to
+ Z_FINISH. In this case all pending input is processed and all pending
+ output is flushed; avail_out must be large enough to hold all the
+ uncompressed data. (The size of the uncompressed data may have been saved
+ by the compressor for this purpose.) The next operation on this stream must
+ be inflateEnd to deallocate the decompression state. The use of Z_FINISH
+ is never required, but can be used to inform inflate that a faster routine
+ may be used for the single inflate() call.
+
+ If a preset dictionary is needed at this point (see inflateSetDictionary
+ below), inflate sets strm-adler to the adler32 checksum of the
+ dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise
+ it sets strm->adler to the adler32 checksum of all output produced
+ so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or
+ an error code as described below. At the end of the stream, inflate()
+ checks that its computed adler32 checksum is equal to that saved by the
+ compressor and returns Z_STREAM_END only if the checksum is correct.
+
+ inflate() returns Z_OK if some progress has been made (more input processed
+ or more output produced), Z_STREAM_END if the end of the compressed data has
+ been reached and all uncompressed output has been produced, Z_NEED_DICT if a
+ preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
+ corrupted (input stream not conforming to the zlib format or incorrect
+ adler32 checksum), Z_STREAM_ERROR if the stream structure was inconsistent
+ (for example if next_in or next_out was NULL), Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if no progress is possible or if there was not
+ enough room in the output buffer when Z_FINISH is used. In the Z_DATA_ERROR
+ case, the application may then call inflateSync to look for a good
+ compression block.
+*/
+
+
+ZEXTERN int ZEXPORT zlib_inflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
+ was inconsistent. In the error case, msg may be set but then points to a
+ static string (which must not be deallocated).
+*/
+
+ /* Advanced functions */
+
+/*
+ The following functions are needed only in some special applications.
+*/
+
+/*
+ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm,
+ int level,
+ int method,
+ int windowBits,
+ int memLevel,
+ int strategy));
+
+ This is another version of deflateInit with more compression options. The
+ fields next_in, zalloc, zfree and opaque must be initialized before by
+ the caller.
+
+ The method parameter is the compression method. It must be Z_DEFLATED in
+ this version of the library.
+
+ The windowBits parameter is the base two logarithm of the window size
+ (the size of the history buffer). It should be in the range 8..15 for this
+ version of the library. Larger values of this parameter result in better
+ compression at the expense of memory usage. The default value is 15 if
+ deflateInit is used instead.
+
+ The memLevel parameter specifies how much memory should be allocated
+ for the internal compression state. memLevel=1 uses minimum memory but
+ is slow and reduces compression ratio; memLevel=9 uses maximum memory
+ for optimal speed. The default value is 8. See zconf.h for total memory
+ usage as a function of windowBits and memLevel.
+
+ The strategy parameter is used to tune the compression algorithm. Use the
+ value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
+ filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
+ string match). Filtered data consists mostly of small values with a
+ somewhat random distribution. In this case, the compression algorithm is
+ tuned to compress them better. The effect of Z_FILTERED is to force more
+ Huffman coding and less string matching; it is somewhat intermediate
+ between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
+ the compression ratio but not the correctness of the compressed output even
+ if it is not set appropriately.
+
+ deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid
+ method). msg is set to null if there is no error message. deflateInit2 does
+ not perform any compression: this will be done by deflate().
+*/
+
+ZEXTERN int ZEXPORT zlib_deflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the compression dictionary from the given byte sequence
+ without producing any compressed output. This function must be called
+ immediately after deflateInit, deflateInit2 or deflateReset, before any
+ call of deflate. The compressor and decompressor must use exactly the same
+ dictionary (see inflateSetDictionary).
+
+ The dictionary should consist of strings (byte sequences) that are likely
+ to be encountered later in the data to be compressed, with the most commonly
+ used strings preferably put towards the end of the dictionary. Using a
+ dictionary is most useful when the data to be compressed is short and can be
+ predicted with good accuracy; the data can then be compressed better than
+ with the default empty dictionary.
+
+ Depending on the size of the compression data structures selected by
+ deflateInit or deflateInit2, a part of the dictionary may in effect be
+ discarded, for example if the dictionary is larger than the window size in
+ deflate or deflate2. Thus the strings most likely to be useful should be
+ put at the end of the dictionary, not at the front.
+
+ Upon return of this function, strm->adler is set to the Adler32 value
+ of the dictionary; the decompressor may later use this value to determine
+ which dictionary has been used by the compressor. (The Adler32 value
+ applies to the whole dictionary even if only a subset of the dictionary is
+ actually used by the compressor.)
+
+ deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
+ parameter is invalid (such as NULL dictionary) or the stream state is
+ inconsistent (for example if deflate has already been called for this stream
+ or if the compression method is bsort). deflateSetDictionary does not
+ perform any compression: this will be done by deflate().
+*/
+
+ZEXTERN int ZEXPORT zlib_deflateCopy OF((z_streamp dest,
+ z_streamp source));
+/*
+ Sets the destination stream as a complete copy of the source stream.
+
+ This function can be useful when several compression strategies will be
+ tried, for example when there are several ways of pre-processing the input
+ data with a filter. The streams that will be discarded should then be freed
+ by calling deflateEnd. Note that deflateCopy duplicates the internal
+ compression state which can be quite large, so this strategy is slow and
+ can consume lots of memory.
+
+ deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
+ (such as zalloc being NULL). msg is left unchanged in both source and
+ destination.
+*/
+
+ZEXTERN int ZEXPORT zlib_deflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to deflateEnd followed by deflateInit,
+ but does not free and reallocate all the internal compression state.
+ The stream will keep the same compression level and any other attributes
+ that may have been set by deflateInit2.
+
+ deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+ZEXTERN int ZEXPORT zlib_deflateParams OF((z_streamp strm,
+ int level,
+ int strategy));
+/*
+ Dynamically update the compression level and compression strategy. The
+ interpretation of level and strategy is as in deflateInit2. This can be
+ used to switch between compression and straight copy of the input data, or
+ to switch to a different kind of input data requiring a different
+ strategy. If the compression level is changed, the input available so far
+ is compressed with the old level (and may be flushed); the new level will
+ take effect only at the next call of deflate().
+
+ Before the call of deflateParams, the stream state must be set as for
+ a call of deflate(), since the currently available input may have to
+ be compressed and flushed. In particular, strm->avail_out must be non-zero.
+
+ deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
+ stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
+ if strm->avail_out was zero.
+*/
+
+/*
+ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm,
+ int windowBits));
+
+ This is another version of inflateInit with an extra parameter. The
+ fields next_in, avail_in, zalloc, zfree and opaque must be initialized
+ before by the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library. The default value is 15 if inflateInit is used
+ instead. If a compressed stream with a larger window size is given as
+ input, inflate() will return with the error code Z_DATA_ERROR instead of
+ trying to allocate a larger window.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if a parameter is invalid (such as a negative
+ memLevel). msg is set to null if there is no error message. inflateInit2
+ does not perform any decompression apart from reading the zlib header if
+ present: this will be done by inflate(). (So next_in and avail_in may be
+ modified, but next_out and avail_out are unchanged.)
+*/
+
+ZEXTERN int ZEXPORT zlib_inflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the decompression dictionary from the given uncompressed byte
+ sequence. This function must be called immediately after a call of inflate
+ if this call returned Z_NEED_DICT. The dictionary chosen by the compressor
+ can be determined from the Adler32 value returned by this call of
+ inflate. The compressor and decompressor must use exactly the same
+ dictionary (see deflateSetDictionary).
+
+ inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
+ parameter is invalid (such as NULL dictionary) or the stream state is
+ inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
+ expected one (incorrect Adler32 value). inflateSetDictionary does not
+ perform any decompression: this will be done by subsequent calls of
+ inflate().
+*/
+
+ZEXTERN int ZEXPORT zlib_inflateSync OF((z_streamp strm));
+/*
+ Skips invalid compressed data until a full flush point (see above the
+ description of deflate with Z_FULL_FLUSH) can be found, or until all
+ available input is skipped. No output is provided.
+
+ inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
+ if no more input was provided, Z_DATA_ERROR if no flush point has been found,
+ or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
+ case, the application may save the current current value of total_in which
+ indicates where valid compressed data was found. In the error case, the
+ application may repeatedly call inflateSync, providing more input each time,
+ until success or end of the input data.
+*/
+
+ZEXTERN int ZEXPORT zlib_inflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate all the internal decompression state.
+ The stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int ZEXPORT zlib_inflateIncomp OF((z_stream *strm));
+/*
+ This function adds the data at next_in (avail_in bytes) to the output
+ history without performing any output. There must be no pending output,
+ and the decompressor must be expecting to see the start of a block.
+ Calling this function is equivalent to decompressing a stored block
+ containing the data at next_in (except that the data is not output).
+*/
+
+ /* various hacks, don't look :) */
+
+/* deflateInit and inflateInit are macros to allow checking the zlib version
+ * and the compiler's view of z_stream:
+ */
+ZEXTERN int ZEXPORT zlib_deflateInit_ OF((z_streamp strm, int level,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT zlib_inflateInit_ OF((z_streamp strm,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT zlib_deflateInit2_ OF((z_streamp strm, int level, int method,
+ int windowBits, int memLevel,
+ int strategy, const char *version,
+ int stream_size));
+ZEXTERN int ZEXPORT zlib_inflateInit2_ OF((z_streamp strm, int windowBits,
+ const char *version, int stream_size));
+#define zlib_deflateInit(strm, level) \
+ zlib_deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
+#define zlib_inflateInit(strm) \
+ zlib_inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
+#define zlib_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
+ zlib_deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
+ (strategy), ZLIB_VERSION, sizeof(z_stream))
+#define zlib_inflateInit2(strm, windowBits) \
+ zlib_inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
+
+
+#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
+ struct internal_state {int dummy;}; /* hack for buggy compilers */
+#endif
+
+ZEXTERN const char * ZEXPORT zlib_zError OF((int err));
+ZEXTERN int ZEXPORT zlib_inflateSyncPoint OF((z_streamp z));
+ZEXTERN const uLongf * ZEXPORT zlib_get_crc_table OF((void));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZLIB_H */
diff --git a/include/linux/zlib_fs.h b/include/linux/zlib_fs.h
index 66efc18d17f8..923e1ae55182 100644
--- a/include/linux/zlib_fs.h
+++ b/include/linux/zlib_fs.h
@@ -1,679 +1,36 @@
-/* zlib.h -- interface of the 'zlib' general purpose compression library
- version 1.1.3, July 9th, 1998
-
- Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler
-
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- Jean-loup Gailly Mark Adler
- jloup@gzip.org madler@alumni.caltech.edu
-
-
- The data format used by the zlib library is described by RFCs (Request for
- Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
- (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
-*/
-
-#ifndef _ZLIB_H
-#define _ZLIB_H
-
-#include "zconf.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define ZLIB_VERSION "1.1.3"
-
-/*
- The 'zlib' compression library provides in-memory compression and
- decompression functions, including integrity checks of the uncompressed
- data. This version of the library supports only one compression method
- (deflation) but other algorithms will be added later and will have the same
- stream interface.
-
- Compression can be done in a single step if the buffers are large
- enough (for example if an input file is mmap'ed), or can be done by
- repeated calls of the compression function. In the latter case, the
- application must provide more input and/or consume the output
- (providing more output space) before each call.
-
- The library also supports reading and writing files in gzip (.gz) format
- with an interface similar to that of stdio.
-
- The library does not install any signal handler. The decoder checks
- the consistency of the compressed data, so the library should never
- crash even in case of corrupted input.
-*/
-
-typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
-typedef void (*free_func) OF((voidpf opaque, voidpf address));
-
-struct internal_state;
-
-typedef struct z_stream_s {
- Bytef *next_in; /* next input byte */
- uInt avail_in; /* number of bytes available at next_in */
- uLong total_in; /* total nb of input bytes read so far */
-
- Bytef *next_out; /* next output byte should be put there */
- uInt avail_out; /* remaining free space at next_out */
- uLong total_out; /* total nb of bytes output so far */
-
- char *msg; /* last error message, NULL if no error */
- struct internal_state FAR *state; /* not visible by applications */
-
- void *workspace; /* memory allocated for this stream */
-
- int data_type; /* best guess about the data type: ascii or binary */
- uLong adler; /* adler32 value of the uncompressed data */
- uLong reserved; /* reserved for future use */
-} z_stream;
-
-typedef z_stream FAR *z_streamp;
-
-/*
- The application must update next_in and avail_in when avail_in has
- dropped to zero. It must update next_out and avail_out when avail_out
- has dropped to zero. The application must initialize zalloc, zfree and
- opaque before calling the init function. All other fields are set by the
- compression library and must not be updated by the application.
-
- The opaque value provided by the application will be passed as the first
- parameter for calls of zalloc and zfree. This can be useful for custom
- memory management. The compression library attaches no meaning to the
- opaque value.
-
- zalloc must return Z_NULL if there is not enough memory for the object.
- If zlib is used in a multi-threaded application, zalloc and zfree must be
- thread safe.
-
- On 16-bit systems, the functions zalloc and zfree must be able to allocate
- exactly 65536 bytes, but will not be required to allocate more than this
- if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
- pointers returned by zalloc for objects of exactly 65536 bytes *must*
- have their offset normalized to zero. The default allocation function
- provided by this library ensures this (see zutil.c). To reduce memory
- requirements and avoid any allocation of 64K objects, at the expense of
- compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
-
- The fields total_in and total_out can be used for statistics or
- progress reports. After compression, total_in holds the total size of
- the uncompressed data and may be saved for use in the decompressor
- (particularly if the decompressor wants to decompress everything in
- a single step).
-*/
-
- /* constants */
-
-#define Z_NO_FLUSH 0
-#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */
-#define Z_SYNC_FLUSH 2
-#define Z_FULL_FLUSH 3
-#define Z_FINISH 4
-/* Allowed flush values; see deflate() below for details */
-
-#define Z_OK 0
-#define Z_STREAM_END 1
-#define Z_NEED_DICT 2
-#define Z_ERRNO (-1)
-#define Z_STREAM_ERROR (-2)
-#define Z_DATA_ERROR (-3)
-#define Z_MEM_ERROR (-4)
-#define Z_BUF_ERROR (-5)
-#define Z_VERSION_ERROR (-6)
-/* Return codes for the compression/decompression functions. Negative
- * values are errors, positive values are used for special but normal events.
- */
-
-#define Z_NO_COMPRESSION 0
-#define Z_BEST_SPEED 1
-#define Z_BEST_COMPRESSION 9
-#define Z_DEFAULT_COMPRESSION (-1)
-/* compression levels */
-
-#define Z_FILTERED 1
-#define Z_HUFFMAN_ONLY 2
-#define Z_DEFAULT_STRATEGY 0
-/* compression strategy; see deflateInit2() below for details */
-
-#define Z_BINARY 0
-#define Z_ASCII 1
-#define Z_UNKNOWN 2
-/* Possible values of the data_type field */
-
-#define Z_DEFLATED 8
-/* The deflate compression method (the only one supported in this version) */
-
-#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
-
- /* basic functions */
-
-ZEXTERN const char * ZEXPORT zlib_fs_zlibVersion OF((void));
-/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
- If the first character differs, the library code actually used is
- not compatible with the zlib.h header file used by the application.
- This check is automatically made by deflateInit and inflateInit.
- */
-
-/*
-ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level));
-
- Initializes the internal stream state for compression. The fields
- zalloc, zfree and opaque must be initialized before by the caller.
- If zalloc and zfree are set to Z_NULL, deflateInit updates them to
- use default allocation functions.
-
- The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
- 1 gives best speed, 9 gives best compression, 0 gives no compression at
- all (the input data is simply copied a block at a time).
- Z_DEFAULT_COMPRESSION requests a default compromise between speed and
- compression (currently equivalent to level 6).
-
- deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if level is not a valid compression level,
- Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
- with the version assumed by the caller (ZLIB_VERSION).
- msg is set to null if there is no error message. deflateInit does not
- perform any compression: this will be done by deflate().
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_deflate OF((z_streamp strm, int flush));
-/*
- deflate compresses as much data as possible, and stops when the input
- buffer becomes empty or the output buffer becomes full. It may introduce some
- output latency (reading input without producing any output) except when
- forced to flush.
-
- The detailed semantics are as follows. deflate performs one or both of the
- following actions:
-
- - Compress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in and avail_in are updated and
- processing will resume at this point for the next call of deflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. This action is forced if the parameter flush is non zero.
- Forcing flush frequently degrades the compression ratio, so this parameter
- should be set only when necessary (in interactive applications).
- Some output may be provided even if flush is not set.
-
- Before the call of deflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating avail_in or avail_out accordingly; avail_out
- should never be zero before the call. The application can consume the
- compressed output when it wants, for example when the output buffer is full
- (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
- and with zero avail_out, it must be called again after making room in the
- output buffer because there might be more output pending.
-
- If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
- flushed to the output buffer and the output is aligned on a byte boundary, so
- that the decompressor can get all input data available so far. (In particular
- avail_in is zero after the call if enough output space has been provided
- before the call.) Flushing may degrade compression for some compression
- algorithms and so it should be used only when necessary.
-
- If flush is set to Z_FULL_FLUSH, all output is flushed as with
- Z_SYNC_FLUSH, and the compression state is reset so that decompression can
- restart from this point if previous compressed data has been damaged or if
- random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
- the compression.
-
- If deflate returns with avail_out == 0, this function must be called again
- with the same value of the flush parameter and more output space (updated
- avail_out), until the flush is complete (deflate returns with non-zero
- avail_out).
-
- If the parameter flush is set to Z_FINISH, pending input is processed,
- pending output is flushed and deflate returns with Z_STREAM_END if there
- was enough output space; if deflate returns with Z_OK, this function must be
- called again with Z_FINISH and more output space (updated avail_out) but no
- more input data, until it returns with Z_STREAM_END or an error. After
- deflate has returned Z_STREAM_END, the only possible operations on the
- stream are deflateReset or deflateEnd.
-
- Z_FINISH can be used immediately after deflateInit if all the compression
- is to be done in a single step. In this case, avail_out must be at least
- 0.1% larger than avail_in plus 12 bytes. If deflate does not return
- Z_STREAM_END, then it must be called again as described above.
-
- deflate() sets strm->adler to the adler32 checksum of all input read
- so far (that is, total_in bytes).
-
- deflate() may update data_type if it can make a good guess about
- the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
- binary. This field is only for information purposes and does not affect
- the compression algorithm in any manner.
-
- deflate() returns Z_OK if some progress has been made (more input
- processed or more output produced), Z_STREAM_END if all input has been
- consumed and all output has been produced (only when flush is set to
- Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
- if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible
- (for example avail_in or avail_out was zero).
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_deflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
- stream state was inconsistent, Z_DATA_ERROR if the stream was freed
- prematurely (some input or output was discarded). In the error case,
- msg may be set but then points to a static string (which must not be
- deallocated).
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_inflate_workspacesize OF((void));
-/*
- Returns the number of bytes that needs to be allocated for a per-
- stream workspace. A pointer to this number of bytes should be
- returned in stream->workspace before calling zlib_fs_inflateInit().
-*/
-
-/*
-ZEXTERN int ZEXPORT zlib_fs_inflateInit OF((z_streamp strm));
-
- Initializes the internal stream state for decompression. The fields
- next_in, avail_in, and workspace must be initialized before by
- the caller. If next_in is not Z_NULL and avail_in is large enough (the exact
- value depends on the compression method), inflateInit determines the
- compression method from the zlib header and allocates all data structures
- accordingly; otherwise the allocation will be deferred to the first call of
- inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to
- use default allocation functions.
-
- inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
- memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
- version assumed by the caller. msg is set to null if there is no error
- message. inflateInit does not perform any decompression apart from reading
- the zlib header if present: this will be done by inflate(). (So next_in and
- avail_in may be modified, but next_out and avail_out are unchanged.)
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_inflate OF((z_streamp strm, int flush));
-/*
- inflate decompresses as much data as possible, and stops when the input
- buffer becomes empty or the output buffer becomes full. It may some
- introduce some output latency (reading input without producing any output)
- except when forced to flush.
-
- The detailed semantics are as follows. inflate performs one or both of the
- following actions:
-
- - Decompress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in is updated and processing
- will resume at this point for the next call of inflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. inflate() provides as much output as possible, until there
- is no more input data or no more space in the output buffer (see below
- about the flush parameter).
-
- Before the call of inflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating the next_* and avail_* values accordingly.
- The application can consume the uncompressed output when it wants, for
- example when the output buffer is full (avail_out == 0), or after each
- call of inflate(). If inflate returns Z_OK and with zero avail_out, it
- must be called again after making room in the output buffer because there
- might be more output pending.
-
- If the parameter flush is set to Z_SYNC_FLUSH, inflate flushes as much
- output as possible to the output buffer. The flushing behavior of inflate is
- not specified for values of the flush parameter other than Z_SYNC_FLUSH
- and Z_FINISH, but the current implementation actually flushes as much output
- as possible anyway.
-
- inflate() should normally be called until it returns Z_STREAM_END or an
- error. However if all decompression is to be performed in a single step
- (a single call of inflate), the parameter flush should be set to
- Z_FINISH. In this case all pending input is processed and all pending
- output is flushed; avail_out must be large enough to hold all the
- uncompressed data. (The size of the uncompressed data may have been saved
- by the compressor for this purpose.) The next operation on this stream must
- be inflateEnd to deallocate the decompression state. The use of Z_FINISH
- is never required, but can be used to inform inflate that a faster routine
- may be used for the single inflate() call.
-
- If a preset dictionary is needed at this point (see inflateSetDictionary
- below), inflate sets strm-adler to the adler32 checksum of the
- dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise
- it sets strm->adler to the adler32 checksum of all output produced
- so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or
- an error code as described below. At the end of the stream, inflate()
- checks that its computed adler32 checksum is equal to that saved by the
- compressor and returns Z_STREAM_END only if the checksum is correct.
-
- inflate() returns Z_OK if some progress has been made (more input processed
- or more output produced), Z_STREAM_END if the end of the compressed data has
- been reached and all uncompressed output has been produced, Z_NEED_DICT if a
- preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
- corrupted (input stream not conforming to the zlib format or incorrect
- adler32 checksum), Z_STREAM_ERROR if the stream structure was inconsistent
- (for example if next_in or next_out was NULL), Z_MEM_ERROR if there was not
- enough memory, Z_BUF_ERROR if no progress is possible or if there was not
- enough room in the output buffer when Z_FINISH is used. In the Z_DATA_ERROR
- case, the application may then call inflateSync to look for a good
- compression block.
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_inflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
- was inconsistent. In the error case, msg may be set but then points to a
- static string (which must not be deallocated).
-*/
-
- /* Advanced functions */
-
-/*
- The following functions are needed only in some special applications.
-*/
-
-/*
-ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm,
- int level,
- int method,
- int windowBits,
- int memLevel,
- int strategy));
-
- This is another version of deflateInit with more compression options. The
- fields next_in, zalloc, zfree and opaque must be initialized before by
- the caller.
-
- The method parameter is the compression method. It must be Z_DEFLATED in
- this version of the library.
-
- The windowBits parameter is the base two logarithm of the window size
- (the size of the history buffer). It should be in the range 8..15 for this
- version of the library. Larger values of this parameter result in better
- compression at the expense of memory usage. The default value is 15 if
- deflateInit is used instead.
-
- The memLevel parameter specifies how much memory should be allocated
- for the internal compression state. memLevel=1 uses minimum memory but
- is slow and reduces compression ratio; memLevel=9 uses maximum memory
- for optimal speed. The default value is 8. See zconf.h for total memory
- usage as a function of windowBits and memLevel.
-
- The strategy parameter is used to tune the compression algorithm. Use the
- value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
- filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
- string match). Filtered data consists mostly of small values with a
- somewhat random distribution. In this case, the compression algorithm is
- tuned to compress them better. The effect of Z_FILTERED is to force more
- Huffman coding and less string matching; it is somewhat intermediate
- between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
- the compression ratio but not the correctness of the compressed output even
- if it is not set appropriately.
-
- deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
- memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid
- method). msg is set to null if there is no error message. deflateInit2 does
- not perform any compression: this will be done by deflate().
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_deflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the compression dictionary from the given byte sequence
- without producing any compressed output. This function must be called
- immediately after deflateInit, deflateInit2 or deflateReset, before any
- call of deflate. The compressor and decompressor must use exactly the same
- dictionary (see inflateSetDictionary).
-
- The dictionary should consist of strings (byte sequences) that are likely
- to be encountered later in the data to be compressed, with the most commonly
- used strings preferably put towards the end of the dictionary. Using a
- dictionary is most useful when the data to be compressed is short and can be
- predicted with good accuracy; the data can then be compressed better than
- with the default empty dictionary.
-
- Depending on the size of the compression data structures selected by
- deflateInit or deflateInit2, a part of the dictionary may in effect be
- discarded, for example if the dictionary is larger than the window size in
- deflate or deflate2. Thus the strings most likely to be useful should be
- put at the end of the dictionary, not at the front.
-
- Upon return of this function, strm->adler is set to the Adler32 value
- of the dictionary; the decompressor may later use this value to determine
- which dictionary has been used by the compressor. (The Adler32 value
- applies to the whole dictionary even if only a subset of the dictionary is
- actually used by the compressor.)
-
- deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state is
- inconsistent (for example if deflate has already been called for this stream
- or if the compression method is bsort). deflateSetDictionary does not
- perform any compression: this will be done by deflate().
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_deflateCopy OF((z_streamp dest,
- z_streamp source));
-/*
- Sets the destination stream as a complete copy of the source stream.
-
- This function can be useful when several compression strategies will be
- tried, for example when there are several ways of pre-processing the input
- data with a filter. The streams that will be discarded should then be freed
- by calling deflateEnd. Note that deflateCopy duplicates the internal
- compression state which can be quite large, so this strategy is slow and
- can consume lots of memory.
-
- deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
- (such as zalloc being NULL). msg is left unchanged in both source and
- destination.
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_deflateReset OF((z_streamp strm));
-/*
- This function is equivalent to deflateEnd followed by deflateInit,
- but does not free and reallocate all the internal compression state.
- The stream will keep the same compression level and any other attributes
- that may have been set by deflateInit2.
-
- deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_deflateParams OF((z_streamp strm,
- int level,
- int strategy));
-/*
- Dynamically update the compression level and compression strategy. The
- interpretation of level and strategy is as in deflateInit2. This can be
- used to switch between compression and straight copy of the input data, or
- to switch to a different kind of input data requiring a different
- strategy. If the compression level is changed, the input available so far
- is compressed with the old level (and may be flushed); the new level will
- take effect only at the next call of deflate().
-
- Before the call of deflateParams, the stream state must be set as for
- a call of deflate(), since the currently available input may have to
- be compressed and flushed. In particular, strm->avail_out must be non-zero.
-
- deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
- stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
- if strm->avail_out was zero.
-*/
-
-/*
-ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm,
- int windowBits));
-
- This is another version of inflateInit with an extra parameter. The
- fields next_in, avail_in, zalloc, zfree and opaque must be initialized
- before by the caller.
-
- The windowBits parameter is the base two logarithm of the maximum window
- size (the size of the history buffer). It should be in the range 8..15 for
- this version of the library. The default value is 15 if inflateInit is used
- instead. If a compressed stream with a larger window size is given as
- input, inflate() will return with the error code Z_DATA_ERROR instead of
- trying to allocate a larger window.
-
- inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
- memory, Z_STREAM_ERROR if a parameter is invalid (such as a negative
- memLevel). msg is set to null if there is no error message. inflateInit2
- does not perform any decompression apart from reading the zlib header if
- present: this will be done by inflate(). (So next_in and avail_in may be
- modified, but next_out and avail_out are unchanged.)
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_inflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the decompression dictionary from the given uncompressed byte
- sequence. This function must be called immediately after a call of inflate
- if this call returned Z_NEED_DICT. The dictionary chosen by the compressor
- can be determined from the Adler32 value returned by this call of
- inflate. The compressor and decompressor must use exactly the same
- dictionary (see deflateSetDictionary).
-
- inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state is
- inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
- expected one (incorrect Adler32 value). inflateSetDictionary does not
- perform any decompression: this will be done by subsequent calls of
- inflate().
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_inflateSync OF((z_streamp strm));
-/*
- Skips invalid compressed data until a full flush point (see above the
- description of deflate with Z_FULL_FLUSH) can be found, or until all
- available input is skipped. No output is provided.
-
- inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
- if no more input was provided, Z_DATA_ERROR if no flush point has been found,
- or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
- case, the application may save the current current value of total_in which
- indicates where valid compressed data was found. In the error case, the
- application may repeatedly call inflateSync, providing more input each time,
- until success or end of the input data.
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_inflateReset OF((z_streamp strm));
-/*
- This function is equivalent to inflateEnd followed by inflateInit,
- but does not free and reallocate all the internal decompression state.
- The stream will keep attributes that may have been set by inflateInit2.
-
- inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
- /* checksum functions */
-
-/*
- These functions are not related to compression but are exported
- anyway because they might be useful in applications using the
- compression library.
-*/
-
-ZEXTERN uLong ZEXPORT zlib_fs_adler32 OF((uLong adler, const Bytef *buf, uInt len));
-
-/*
- Update a running Adler-32 checksum with the bytes buf[0..len-1] and
- return the updated checksum. If buf is NULL, this function returns
- the required initial value for the checksum.
- An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
- much faster. Usage example:
-
- uLong adler = adler32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- adler = adler32(adler, buffer, length);
- }
- if (adler != original_adler) error();
-*/
-
-ZEXTERN uLong ZEXPORT zlib_fs_crc32 OF((uLong crc, const Bytef *buf, uInt len));
-/*
- Update a running crc with the bytes buf[0..len-1] and return the updated
- crc. If buf is NULL, this function returns the required initial value
- for the crc. Pre- and post-conditioning (one's complement) is performed
- within this function so it shouldn't be done by the application.
- Usage example:
-
- uLong crc = crc32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- crc = crc32(crc, buffer, length);
- }
- if (crc != original_crc) error();
-*/
-
-
- /* various hacks, don't look :) */
-
-/* deflateInit and inflateInit are macros to allow checking the zlib version
- * and the compiler's view of z_stream:
- */
-ZEXTERN int ZEXPORT zlib_fs_deflateInit_ OF((z_streamp strm, int level,
- const char *version, int stream_size));
-ZEXTERN int ZEXPORT zlib_fs_inflateInit_ OF((z_streamp strm,
- const char *version, int stream_size));
-ZEXTERN int ZEXPORT zlib_fs_deflateInit2_ OF((z_streamp strm, int level, int method,
- int windowBits, int memLevel,
- int strategy, const char *version,
- int stream_size));
-ZEXTERN int ZEXPORT zlib_fs_inflateInit2_ OF((z_streamp strm, int windowBits,
- const char *version, int stream_size));
+/* zlib_fs.h -- A compatability file mapping the zlib functions to zlib_fs
+ functions. This will go away. */
+#ifndef _ZLIB_FS_H
+#define _ZLIB_FS_H
+
+#include <linux/zlib.h>
+
+#define zlib_fs_inflate_workspacesize zlib_inflate_workspacesize
+#define zlib_fs_deflate_workspacesize zlib_deflate_workspacesize
+#define zlib_fs_zlibVersion zlib_zlibVersion
+#define zlib_fs_deflate zlib_deflate
+#define zlib_fs_deflateEnd zlib_deflateEnd
+#define zlib_fs_inflate zlib_inflate
+#define zlib_fs_inflateEnd zlib_inflateEnd
+#define zlib_fs_deflateSetDictionary zlib_deflateSetDictionary
+#define zlib_fs_deflateCopy zlib_deflateCopy
+#define zlib_fs_deflateReset zlib_deflateReset
+#define zlib_fs_deflateParams zlib_deflateParams
+#define zlib_fs_inflateIncomp zlib_inflateIncomp
+#define zlib_fs_inflateSetDictionary zlib_inflateSetDictionary
+#define zlib_fs_inflateSync zlib_inflateSync
+#define zlib_fs_inflateReset zlib_inflateReset
+#define zlib_fs_adler32 zlib_adler32
+#define zlib_fs_crc32 zlib_crc32
#define zlib_fs_deflateInit(strm, level) \
- zlib_fs_deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
+ zlib_deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
#define zlib_fs_inflateInit(strm) \
- zlib_fs_inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
-#define zlib_fs_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
- zlib_fs_deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
- (strategy), ZLIB_VERSION, sizeof(z_stream))
+ zlib_inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
+#define zlib_fs_deflateInit2(strm, level, method, windowBits, memLevel, strategy)\
+ zlib_deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
+ (strategy), ZLIB_VERSION, sizeof(z_stream))
#define zlib_fs_inflateInit2(strm, windowBits) \
- zlib_fs_inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
-
-
-#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
- struct internal_state {int dummy;}; /* hack for buggy compilers */
-#endif
-
-ZEXTERN const char * ZEXPORT zlib_fs_zError OF((int err));
-ZEXTERN int ZEXPORT zlib_fs_inflateSyncPoint OF((z_streamp z));
-ZEXTERN const uLongf * ZEXPORT zlib_fs_get_crc_table OF((void));
-
-#ifdef __cplusplus
-}
-#endif
+ zlib_inflateInit2_((strm), (windowBits), ZLIB_VERSION, \
+ sizeof(z_stream))
-#endif /* _ZLIB_H */
+#endif /* _ZLIB_FS_H */