summaryrefslogtreecommitdiff
path: root/reftable
diff options
context:
space:
mode:
Diffstat (limited to 'reftable')
-rw-r--r--reftable/basics.c42
-rw-r--r--reftable/basics.h53
-rw-r--r--reftable/block.c30
-rw-r--r--reftable/block.h14
-rw-r--r--reftable/blocksource.c8
-rw-r--r--reftable/merged.c18
-rw-r--r--reftable/merged.h3
-rw-r--r--reftable/pq.c2
-rw-r--r--reftable/reader.c46
-rw-r--r--reftable/reader.h10
-rw-r--r--reftable/record.c137
-rw-r--r--reftable/record.h25
-rw-r--r--reftable/reftable-basics.h13
-rw-r--r--reftable/reftable-blocksource.h13
-rw-r--r--reftable/reftable-merged.h4
-rw-r--r--reftable/reftable-reader.h2
-rw-r--r--reftable/reftable-record.h16
-rw-r--r--reftable/reftable-stack.h3
-rw-r--r--reftable/reftable-writer.h19
-rw-r--r--reftable/stack.c231
-rw-r--r--reftable/system.c126
-rw-r--r--reftable/system.h89
-rw-r--r--reftable/writer.c44
23 files changed, 633 insertions, 315 deletions
diff --git a/reftable/basics.c b/reftable/basics.c
index bc4fcc9144..3b5ea27bbd 100644
--- a/reftable/basics.c
+++ b/reftable/basics.c
@@ -17,6 +17,8 @@ static void (*reftable_free_ptr)(void *);
void *reftable_malloc(size_t sz)
{
+ if (!sz)
+ return NULL;
if (reftable_malloc_ptr)
return (*reftable_malloc_ptr)(sz);
return malloc(sz);
@@ -24,6 +26,11 @@ void *reftable_malloc(size_t sz)
void *reftable_realloc(void *p, size_t sz)
{
+ if (!sz) {
+ reftable_free(p);
+ return NULL;
+ }
+
if (reftable_realloc_ptr)
return (*reftable_realloc_ptr)(p, sz);
return realloc(p, sz);
@@ -117,11 +124,8 @@ int reftable_buf_add(struct reftable_buf *buf, const void *data, size_t len)
size_t newlen = buf->len + len;
if (newlen + 1 > buf->alloc) {
- char *reallocated = buf->buf;
- REFTABLE_ALLOC_GROW(reallocated, newlen + 1, buf->alloc);
- if (!reallocated)
+ if (REFTABLE_ALLOC_GROW(buf->buf, newlen + 1, buf->alloc))
return REFTABLE_OUT_OF_MEMORY_ERROR;
- buf->buf = reallocated;
}
memcpy(buf->buf + buf->len, data, len);
@@ -226,11 +230,9 @@ char **parse_names(char *buf, int size)
next = end;
}
if (p < next) {
- char **names_grown = names;
- REFTABLE_ALLOC_GROW(names_grown, names_len + 1, names_cap);
- if (!names_grown)
+ if (REFTABLE_ALLOC_GROW(names, names_len + 1,
+ names_cap))
goto err;
- names = names_grown;
names[names_len] = reftable_strdup(p);
if (!names[names_len++])
@@ -239,7 +241,8 @@ char **parse_names(char *buf, int size)
p = next + 1;
}
- REFTABLE_REALLOC_ARRAY(names, names_len + 1);
+ if (REFTABLE_ALLOC_GROW(names, names_len + 1, names_cap))
+ goto err;
names[names_len] = NULL;
return names;
@@ -260,25 +263,24 @@ int names_equal(const char **a, const char **b)
return a[i] == b[i];
}
-int common_prefix_size(struct reftable_buf *a, struct reftable_buf *b)
+size_t common_prefix_size(struct reftable_buf *a, struct reftable_buf *b)
{
- int p = 0;
- for (; p < a->len && p < b->len; p++) {
+ size_t p = 0;
+ for (; p < a->len && p < b->len; p++)
if (a->buf[p] != b->buf[p])
break;
- }
-
return p;
}
-int hash_size(uint32_t id)
+uint32_t hash_size(enum reftable_hash id)
{
+ if (!id)
+ return REFTABLE_HASH_SIZE_SHA1;
switch (id) {
- case 0:
- case GIT_SHA1_FORMAT_ID:
- return GIT_SHA1_RAWSZ;
- case GIT_SHA256_FORMAT_ID:
- return GIT_SHA256_RAWSZ;
+ case REFTABLE_HASH_SHA1:
+ return REFTABLE_HASH_SIZE_SHA1;
+ case REFTABLE_HASH_SHA256:
+ return REFTABLE_HASH_SIZE_SHA256;
}
abort();
}
diff --git a/reftable/basics.h b/reftable/basics.h
index 7aa46d7c30..a2a010a0e1 100644
--- a/reftable/basics.h
+++ b/reftable/basics.h
@@ -120,15 +120,38 @@ char *reftable_strdup(const char *str);
#define REFTABLE_ALLOC_ARRAY(x, alloc) (x) = reftable_malloc(st_mult(sizeof(*(x)), (alloc)))
#define REFTABLE_CALLOC_ARRAY(x, alloc) (x) = reftable_calloc((alloc), sizeof(*(x)))
#define REFTABLE_REALLOC_ARRAY(x, alloc) (x) = reftable_realloc((x), st_mult(sizeof(*(x)), (alloc)))
-#define REFTABLE_ALLOC_GROW(x, nr, alloc) \
- do { \
- if ((nr) > alloc) { \
- alloc = 2 * (alloc) + 1; \
- if (alloc < (nr)) \
- alloc = (nr); \
- REFTABLE_REALLOC_ARRAY(x, alloc); \
- } \
- } while (0)
+
+static inline void *reftable_alloc_grow(void *p, size_t nelem, size_t elsize,
+ size_t *allocp)
+{
+ void *new_p;
+ size_t alloc = *allocp * 2 + 1;
+ if (alloc < nelem)
+ alloc = nelem;
+ new_p = reftable_realloc(p, st_mult(elsize, alloc));
+ if (!new_p)
+ return p;
+ *allocp = alloc;
+ return new_p;
+}
+
+#define REFTABLE_ALLOC_GROW(x, nr, alloc) ( \
+ (nr) > (alloc) && ( \
+ (x) = reftable_alloc_grow((x), (nr), sizeof(*(x)), &(alloc)), \
+ (nr) > (alloc) \
+ ) \
+)
+
+#define REFTABLE_ALLOC_GROW_OR_NULL(x, nr, alloc) do { \
+ size_t reftable_alloc_grow_or_null_alloc = alloc; \
+ if (REFTABLE_ALLOC_GROW((x), (nr), reftable_alloc_grow_or_null_alloc)) { \
+ REFTABLE_FREE_AND_NULL(x); \
+ alloc = 0; \
+ } else { \
+ alloc = reftable_alloc_grow_or_null_alloc; \
+ } \
+} while (0)
+
#define REFTABLE_FREE_AND_NULL(p) do { reftable_free(p); (p) = NULL; } while (0)
#ifndef REFTABLE_ALLOW_BANNED_ALLOCATORS
@@ -146,8 +169,16 @@ char *reftable_strdup(const char *str);
#endif
/* Find the longest shared prefix size of `a` and `b` */
-int common_prefix_size(struct reftable_buf *a, struct reftable_buf *b);
+size_t common_prefix_size(struct reftable_buf *a, struct reftable_buf *b);
+
+uint32_t hash_size(enum reftable_hash id);
-int hash_size(uint32_t id);
+/*
+ * Format IDs that identify the hash function used by a reftable. Note that
+ * these constants end up on disk and thus mustn't change. The format IDs are
+ * "sha1" and "s256" in big endian, respectively.
+ */
+#define REFTABLE_FORMAT_ID_SHA1 ((uint32_t) 0x73686131)
+#define REFTABLE_FORMAT_ID_SHA256 ((uint32_t) 0x73323536)
#endif
diff --git a/reftable/block.c b/reftable/block.c
index 0198078485..8ac865ce78 100644
--- a/reftable/block.c
+++ b/reftable/block.c
@@ -15,7 +15,7 @@ https://developers.google.com/open-source/licenses/bsd
#include "system.h"
#include <zlib.h>
-int header_size(int version)
+size_t header_size(int version)
{
switch (version) {
case 1:
@@ -26,7 +26,7 @@ int header_size(int version)
abort();
}
-int footer_size(int version)
+size_t footer_size(int version)
{
switch (version) {
case 1:
@@ -40,20 +40,20 @@ int footer_size(int version)
static int block_writer_register_restart(struct block_writer *w, int n,
int is_restart, struct reftable_buf *key)
{
- int rlen, err;
+ uint32_t rlen;
+ int err;
rlen = w->restart_len;
- if (rlen >= MAX_RESTARTS) {
+ if (rlen >= MAX_RESTARTS)
is_restart = 0;
- }
- if (is_restart) {
+ if (is_restart)
rlen++;
- }
if (2 + 3 * rlen + n > w->block_size - w->next)
return -1;
if (is_restart) {
- REFTABLE_ALLOC_GROW(w->restarts, w->restart_len + 1, w->restart_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(w->restarts, w->restart_len + 1,
+ w->restart_cap);
if (!w->restarts)
return REFTABLE_OUT_OF_MEMORY_ERROR;
w->restarts[w->restart_len++] = w->next;
@@ -71,7 +71,7 @@ static int block_writer_register_restart(struct block_writer *w, int n,
}
int block_writer_init(struct block_writer *bw, uint8_t typ, uint8_t *block,
- uint32_t block_size, uint32_t header_off, int hash_size)
+ uint32_t block_size, uint32_t header_off, uint32_t hash_size)
{
bw->block = block;
bw->hash_size = hash_size;
@@ -147,8 +147,7 @@ done:
int block_writer_finish(struct block_writer *w)
{
- int i;
- for (i = 0; i < w->restart_len; i++) {
+ for (uint32_t i = 0; i < w->restart_len; i++) {
put_be24(w->block + w->next, w->restarts[i]);
w->next += 3;
}
@@ -176,7 +175,8 @@ int block_writer_finish(struct block_writer *w)
* is guaranteed to return `Z_STREAM_END`.
*/
compressed_len = deflateBound(w->zstream, src_len);
- REFTABLE_ALLOC_GROW(w->compressed, compressed_len, w->compressed_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(w->compressed, compressed_len,
+ w->compressed_cap);
if (!w->compressed) {
ret = REFTABLE_OUT_OF_MEMORY_ERROR;
return ret;
@@ -212,7 +212,7 @@ int block_writer_finish(struct block_writer *w)
int block_reader_init(struct block_reader *br, struct reftable_block *block,
uint32_t header_off, uint32_t table_block_size,
- int hash_size)
+ uint32_t hash_size)
{
uint32_t full_block_size = table_block_size;
uint8_t typ = block->data[header_off];
@@ -235,8 +235,8 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
uLong src_len = block->len - block_header_skip;
/* Log blocks specify the *uncompressed* size in their header. */
- REFTABLE_ALLOC_GROW(br->uncompressed_data, sz,
- br->uncompressed_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(br->uncompressed_data, sz,
+ br->uncompressed_cap);
if (!br->uncompressed_data) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
diff --git a/reftable/block.h b/reftable/block.h
index 0431e8591f..bef2b8a4c5 100644
--- a/reftable/block.h
+++ b/reftable/block.h
@@ -30,7 +30,7 @@ struct block_writer {
/* How often to restart keys. */
uint16_t restart_interval;
- int hash_size;
+ uint32_t hash_size;
/* Offset of next uint8_t to write. */
uint32_t next;
@@ -48,7 +48,7 @@ struct block_writer {
* initializes the blockwriter to write `typ` entries, using `block` as temporary
* storage. `block` is not owned by the block_writer. */
int block_writer_init(struct block_writer *bw, uint8_t typ, uint8_t *block,
- uint32_t block_size, uint32_t header_off, int hash_size);
+ uint32_t block_size, uint32_t header_off, uint32_t hash_size);
/* returns the block type (eg. 'r' for ref records. */
uint8_t block_writer_type(struct block_writer *bw);
@@ -72,7 +72,7 @@ struct block_reader {
/* the memory block */
struct reftable_block block;
- int hash_size;
+ uint32_t hash_size;
/* Uncompressed data for log entries. */
z_stream *zstream;
@@ -92,7 +92,7 @@ struct block_reader {
/* initializes a block reader. */
int block_reader_init(struct block_reader *br, struct reftable_block *bl,
uint32_t header_off, uint32_t table_block_size,
- int hash_size);
+ uint32_t hash_size);
void block_reader_release(struct block_reader *br);
@@ -108,7 +108,7 @@ struct block_iter {
uint32_t next_off;
const unsigned char *block;
size_t block_len;
- int hash_size;
+ uint32_t hash_size;
/* key for last entry we read. */
struct reftable_buf last_key;
@@ -137,10 +137,10 @@ void block_iter_reset(struct block_iter *it);
void block_iter_close(struct block_iter *it);
/* size of file header, depending on format version */
-int header_size(int version);
+size_t header_size(int version);
/* size of file footer, depending on format version */
-int footer_size(int version);
+size_t footer_size(int version);
/* returns a block to its source. */
void reftable_block_done(struct reftable_block *ret);
diff --git a/reftable/blocksource.c b/reftable/blocksource.c
index 52e0915a67..bba4a45b98 100644
--- a/reftable/blocksource.c
+++ b/reftable/blocksource.c
@@ -24,8 +24,8 @@ static void reftable_buf_close(void *b UNUSED)
{
}
-static int reftable_buf_read_block(void *v, struct reftable_block *dest,
- uint64_t off, uint32_t size)
+static ssize_t reftable_buf_read_block(void *v, struct reftable_block *dest,
+ uint64_t off, uint32_t size)
{
struct reftable_buf *b = v;
assert(off + size <= b->len);
@@ -78,8 +78,8 @@ static void file_close(void *v)
reftable_free(b);
}
-static int file_read_block(void *v, struct reftable_block *dest, uint64_t off,
- uint32_t size)
+static ssize_t file_read_block(void *v, struct reftable_block *dest, uint64_t off,
+ uint32_t size)
{
struct file_block_source *b = v;
assert(off + size <= b->size);
diff --git a/reftable/merged.c b/reftable/merged.c
index 514d6facf4..e72b39e178 100644
--- a/reftable/merged.c
+++ b/reftable/merged.c
@@ -66,6 +66,8 @@ static int merged_iter_seek(struct merged_iter *mi, struct reftable_record *want
int err;
mi->advance_index = -1;
+ while (!merged_iter_pqueue_is_empty(mi->pq))
+ merged_iter_pqueue_remove(&mi->pq);
for (size_t i = 0; i < mi->subiters_len; i++) {
err = iterator_seek(&mi->subiters[i].iter, want);
@@ -181,7 +183,7 @@ static void iterator_from_merged_iter(struct reftable_iterator *it,
int reftable_merged_table_new(struct reftable_merged_table **dest,
struct reftable_reader **readers, size_t n,
- uint32_t hash_id)
+ enum reftable_hash hash_id)
{
struct reftable_merged_table *m = NULL;
uint64_t last_max = 0;
@@ -238,14 +240,16 @@ int merged_table_init_iter(struct reftable_merged_table *mt,
struct reftable_iterator *it,
uint8_t typ)
{
- struct merged_subiter *subiters;
+ struct merged_subiter *subiters = NULL;
struct merged_iter *mi = NULL;
int ret;
- REFTABLE_CALLOC_ARRAY(subiters, mt->readers_len);
- if (!subiters) {
- ret = REFTABLE_OUT_OF_MEMORY_ERROR;
- goto out;
+ if (mt->readers_len) {
+ REFTABLE_CALLOC_ARRAY(subiters, mt->readers_len);
+ if (!subiters) {
+ ret = REFTABLE_OUT_OF_MEMORY_ERROR;
+ goto out;
+ }
}
for (size_t i = 0; i < mt->readers_len; i++) {
@@ -293,7 +297,7 @@ int reftable_merged_table_init_log_iterator(struct reftable_merged_table *mt,
return merged_table_init_iter(mt, it, BLOCK_TYPE_LOG);
}
-uint32_t reftable_merged_table_hash_id(struct reftable_merged_table *mt)
+enum reftable_hash reftable_merged_table_hash_id(struct reftable_merged_table *mt)
{
return mt->hash_id;
}
diff --git a/reftable/merged.h b/reftable/merged.h
index 89bd0c4b35..0b7d939e92 100644
--- a/reftable/merged.h
+++ b/reftable/merged.h
@@ -10,11 +10,12 @@ https://developers.google.com/open-source/licenses/bsd
#define MERGED_H
#include "system.h"
+#include "reftable-basics.h"
struct reftable_merged_table {
struct reftable_reader **readers;
size_t readers_len;
- uint32_t hash_id;
+ enum reftable_hash hash_id;
/* If unset, produce deletions. This is useful for compaction. For the
* full stack, deletions should be produced. */
diff --git a/reftable/pq.c b/reftable/pq.c
index 6ee1164dd3..5591e875e1 100644
--- a/reftable/pq.c
+++ b/reftable/pq.c
@@ -49,7 +49,7 @@ int merged_iter_pqueue_add(struct merged_iter_pqueue *pq, const struct pq_entry
{
size_t i = 0;
- REFTABLE_ALLOC_GROW(pq->heap, pq->len + 1, pq->cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(pq->heap, pq->len + 1, pq->cap);
if (!pq->heap)
return REFTABLE_OUT_OF_MEMORY_ERROR;
pq->heap[pq->len++] = *e;
diff --git a/reftable/reader.c b/reftable/reader.c
index 90dc950b57..3f2e4b2800 100644
--- a/reftable/reader.c
+++ b/reftable/reader.c
@@ -20,11 +20,11 @@ uint64_t block_source_size(struct reftable_block_source *source)
return source->ops->size(source->arg);
}
-int block_source_read_block(struct reftable_block_source *source,
- struct reftable_block *dest, uint64_t off,
- uint32_t size)
+ssize_t block_source_read_block(struct reftable_block_source *source,
+ struct reftable_block *dest, uint64_t off,
+ uint32_t size)
{
- int result = source->ops->read_block(source->arg, dest, off, size);
+ ssize_t result = source->ops->read_block(source->arg, dest, off, size);
dest->source = *source;
return result;
}
@@ -57,17 +57,20 @@ static int reader_get_block(struct reftable_reader *r,
struct reftable_block *dest, uint64_t off,
uint32_t sz)
{
+ ssize_t bytes_read;
if (off >= r->size)
return 0;
-
- if (off + sz > r->size) {
+ if (off + sz > r->size)
sz = r->size - off;
- }
- return block_source_read_block(&r->source, dest, off, sz);
+ bytes_read = block_source_read_block(&r->source, dest, off, sz);
+ if (bytes_read < 0)
+ return (int)bytes_read;
+
+ return 0;
}
-uint32_t reftable_reader_hash_id(struct reftable_reader *r)
+enum reftable_hash reftable_reader_hash_id(struct reftable_reader *r)
{
return r->hash_id;
}
@@ -107,18 +110,20 @@ static int parse_footer(struct reftable_reader *r, uint8_t *footer,
f += 8;
if (r->version == 1) {
- r->hash_id = GIT_SHA1_FORMAT_ID;
+ r->hash_id = REFTABLE_HASH_SHA1;
} else {
- r->hash_id = get_be32(f);
- switch (r->hash_id) {
- case GIT_SHA1_FORMAT_ID:
+ switch (get_be32(f)) {
+ case REFTABLE_FORMAT_ID_SHA1:
+ r->hash_id = REFTABLE_HASH_SHA1;
break;
- case GIT_SHA256_FORMAT_ID:
+ case REFTABLE_FORMAT_ID_SHA256:
+ r->hash_id = REFTABLE_HASH_SHA256;
break;
default:
err = REFTABLE_FORMAT_ERROR;
goto done;
}
+
f += 4;
}
@@ -599,6 +604,7 @@ int reftable_reader_new(struct reftable_reader **out,
struct reftable_reader *r;
uint64_t file_size = block_source_size(source);
uint32_t read_size;
+ ssize_t bytes_read;
int err;
REFTABLE_CALLOC_ARRAY(r, 1);
@@ -617,8 +623,8 @@ int reftable_reader_new(struct reftable_reader **out,
goto done;
}
- err = block_source_read_block(source, &header, 0, read_size);
- if (err != read_size) {
+ bytes_read = block_source_read_block(source, &header, 0, read_size);
+ if (bytes_read < 0 || (size_t)bytes_read != read_size) {
err = REFTABLE_IO_ERROR;
goto done;
}
@@ -643,9 +649,9 @@ int reftable_reader_new(struct reftable_reader **out,
r->hash_id = 0;
r->refcount = 1;
- err = block_source_read_block(source, &footer, r->size,
- footer_size(r->version));
- if (err != footer_size(r->version)) {
+ bytes_read = block_source_read_block(source, &footer, r->size,
+ footer_size(r->version));
+ if (bytes_read < 0 || (size_t)bytes_read != footer_size(r->version)) {
err = REFTABLE_IO_ERROR;
goto done;
}
@@ -748,7 +754,7 @@ static int reftable_reader_refs_for_unindexed(struct reftable_reader *r,
struct table_iter *ti;
struct filtering_ref_iterator *filter = NULL;
struct filtering_ref_iterator empty = FILTERING_REF_ITERATOR_INIT;
- int oid_len = hash_size(r->hash_id);
+ uint32_t oid_len = hash_size(r->hash_id);
int err;
REFTABLE_ALLOC_ARRAY(ti, 1);
diff --git a/reftable/reader.h b/reftable/reader.h
index 010fbfe851..bb72108a6f 100644
--- a/reftable/reader.h
+++ b/reftable/reader.h
@@ -16,9 +16,9 @@ https://developers.google.com/open-source/licenses/bsd
uint64_t block_source_size(struct reftable_block_source *source);
-int block_source_read_block(struct reftable_block_source *source,
- struct reftable_block *dest, uint64_t off,
- uint32_t size);
+ssize_t block_source_read_block(struct reftable_block_source *source,
+ struct reftable_block *dest, uint64_t off,
+ uint32_t size);
void block_source_close(struct reftable_block_source *source);
/* metadata for a block type */
@@ -37,8 +37,8 @@ struct reftable_reader {
/* Size of the file, excluding the footer. */
uint64_t size;
- /* 'sha1' for SHA1, 's256' for SHA-256 */
- uint32_t hash_id;
+ /* The hash function used for ref records. */
+ enum reftable_hash hash_id;
uint32_t block_size;
uint64_t min_update_index;
diff --git a/reftable/record.c b/reftable/record.c
index fb5652ed57..8919df8a4d 100644
--- a/reftable/record.c
+++ b/reftable/record.c
@@ -21,47 +21,49 @@ static void *reftable_record_data(struct reftable_record *rec);
int get_var_int(uint64_t *dest, struct string_view *in)
{
- int ptr = 0;
+ const unsigned char *buf = in->buf;
+ unsigned char c;
uint64_t val;
- if (in->len == 0)
+ if (!in->len)
return -1;
- val = in->buf[ptr] & 0x7f;
-
- while (in->buf[ptr] & 0x80) {
- ptr++;
- if (ptr > in->len) {
+ c = *buf++;
+ val = c & 0x7f;
+
+ while (c & 0x80) {
+ /*
+ * We use a micro-optimization here: whenever we see that the
+ * 0x80 bit is set, we know that the remainder of the value
+ * cannot be 0. The zero-values thus doesn't need to be encoded
+ * at all, which is why we subtract 1 when encoding and add 1
+ * when decoding.
+ *
+ * This allows us to save a byte in some edge cases.
+ */
+ val += 1;
+ if (!val || (val & (uint64_t)(~0ULL << (64 - 7))))
+ return -1; /* overflow */
+ if (buf >= in->buf + in->len)
return -1;
- }
- val = (val + 1) << 7 | (uint64_t)(in->buf[ptr] & 0x7f);
+ c = *buf++;
+ val = (val << 7) + (c & 0x7f);
}
*dest = val;
- return ptr + 1;
+ return buf - in->buf;
}
-int put_var_int(struct string_view *dest, uint64_t val)
+int put_var_int(struct string_view *dest, uint64_t value)
{
- uint8_t buf[10] = { 0 };
- int i = 9;
- int n = 0;
- buf[i] = (uint8_t)(val & 0x7f);
- i--;
- while (1) {
- val >>= 7;
- if (!val) {
- break;
- }
- val--;
- buf[i] = 0x80 | (uint8_t)(val & 0x7f);
- i--;
- }
-
- n = sizeof(buf) - i - 1;
- if (dest->len < n)
+ unsigned char varint[10];
+ unsigned pos = sizeof(varint) - 1;
+ varint[pos] = value & 0x7f;
+ while (value >>= 7)
+ varint[--pos] = 0x80 | (--value & 0x7f);
+ if (dest->len < sizeof(varint) - pos)
return -1;
- memcpy(dest->buf, &buf[i + 1], n);
- return n;
+ memcpy(dest->buf, varint + pos, sizeof(varint) - pos);
+ return sizeof(varint) - pos;
}
int reftable_is_block_type(uint8_t typ)
@@ -124,7 +126,7 @@ static int decode_string(struct reftable_buf *dest, struct string_view in)
static int encode_string(const char *str, struct string_view s)
{
struct string_view start = s;
- int l = strlen(str);
+ size_t l = strlen(str);
int n = put_var_int(&s, l);
if (n < 0)
return -1;
@@ -142,9 +144,9 @@ int reftable_encode_key(int *restart, struct string_view dest,
uint8_t extra)
{
struct string_view start = dest;
- int prefix_len = common_prefix_size(&prev_key, &key);
+ size_t prefix_len = common_prefix_size(&prev_key, &key);
uint64_t suffix_len = key.len - prefix_len;
- int n = put_var_int(&dest, (uint64_t)prefix_len);
+ int n = put_var_int(&dest, prefix_len);
if (n < 0)
return -1;
string_view_consume(&dest, n);
@@ -227,7 +229,7 @@ static int reftable_ref_record_key(const void *r, struct reftable_buf *dest)
}
static int reftable_ref_record_copy_from(void *rec, const void *src_rec,
- int hash_size)
+ uint32_t hash_size)
{
struct reftable_ref_record *ref = rec;
const struct reftable_ref_record *src = src_rec;
@@ -235,8 +237,6 @@ static int reftable_ref_record_copy_from(void *rec, const void *src_rec,
size_t refname_cap = 0;
int err;
- assert(hash_size > 0);
-
SWAP(refname, ref->refname);
SWAP(refname_cap, ref->refname_cap);
reftable_ref_record_release(ref);
@@ -246,8 +246,8 @@ static int reftable_ref_record_copy_from(void *rec, const void *src_rec,
if (src->refname) {
size_t refname_len = strlen(src->refname);
- REFTABLE_ALLOC_GROW(ref->refname, refname_len + 1,
- ref->refname_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(ref->refname, refname_len + 1,
+ ref->refname_cap);
if (!ref->refname) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto out;
@@ -317,13 +317,12 @@ static uint8_t reftable_ref_record_val_type(const void *rec)
}
static int reftable_ref_record_encode(const void *rec, struct string_view s,
- int hash_size)
+ uint32_t hash_size)
{
const struct reftable_ref_record *r =
(const struct reftable_ref_record *)rec;
struct string_view start = s;
int n = put_var_int(&s, r->update_index);
- assert(hash_size > 0);
if (n < 0)
return -1;
string_view_consume(&s, n);
@@ -363,7 +362,7 @@ static int reftable_ref_record_encode(const void *rec, struct string_view s,
static int reftable_ref_record_decode(void *rec, struct reftable_buf key,
uint8_t val_type, struct string_view in,
- int hash_size, struct reftable_buf *scratch)
+ uint32_t hash_size, struct reftable_buf *scratch)
{
struct reftable_ref_record *r = rec;
struct string_view start = in;
@@ -372,8 +371,6 @@ static int reftable_ref_record_decode(void *rec, struct reftable_buf key,
size_t refname_cap = 0;
int n, err;
- assert(hash_size > 0);
-
n = get_var_int(&update_index, &in);
if (n < 0)
return n;
@@ -385,7 +382,7 @@ static int reftable_ref_record_decode(void *rec, struct reftable_buf key,
SWAP(r->refname, refname);
SWAP(r->refname_cap, refname_cap);
- REFTABLE_ALLOC_GROW(r->refname, key.len + 1, r->refname_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(r->refname, key.len + 1, r->refname_cap);
if (!r->refname) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -449,7 +446,7 @@ static int reftable_ref_record_is_deletion_void(const void *p)
}
static int reftable_ref_record_equal_void(const void *a,
- const void *b, int hash_size)
+ const void *b, uint32_t hash_size)
{
struct reftable_ref_record *ra = (struct reftable_ref_record *) a;
struct reftable_ref_record *rb = (struct reftable_ref_record *) b;
@@ -493,7 +490,7 @@ static void reftable_obj_record_release(void *rec)
}
static int reftable_obj_record_copy_from(void *rec, const void *src_rec,
- int hash_size UNUSED)
+ uint32_t hash_size UNUSED)
{
struct reftable_obj_record *obj = rec;
const struct reftable_obj_record *src = src_rec;
@@ -525,7 +522,7 @@ static uint8_t reftable_obj_record_val_type(const void *rec)
}
static int reftable_obj_record_encode(const void *rec, struct string_view s,
- int hash_size UNUSED)
+ uint32_t hash_size UNUSED)
{
const struct reftable_obj_record *r = rec;
struct string_view start = s;
@@ -560,7 +557,7 @@ static int reftable_obj_record_encode(const void *rec, struct string_view s,
static int reftable_obj_record_decode(void *rec, struct reftable_buf key,
uint8_t val_type, struct string_view in,
- int hash_size UNUSED,
+ uint32_t hash_size UNUSED,
struct reftable_buf *scratch UNUSED)
{
struct string_view start = in;
@@ -568,7 +565,6 @@ static int reftable_obj_record_decode(void *rec, struct reftable_buf key,
uint64_t count = val_type;
int n = 0;
uint64_t last;
- int j;
reftable_obj_record_release(r);
@@ -603,8 +599,7 @@ static int reftable_obj_record_decode(void *rec, struct reftable_buf key,
string_view_consume(&in, n);
last = r->offsets[0];
- j = 1;
- while (j < count) {
+ for (uint64_t j = 1; j < count; j++) {
uint64_t delta = 0;
int n = get_var_int(&delta, &in);
if (n < 0) {
@@ -613,7 +608,6 @@ static int reftable_obj_record_decode(void *rec, struct reftable_buf key,
string_view_consume(&in, n);
last = r->offsets[j] = (delta + last);
- j++;
}
return start.len - in.len;
}
@@ -624,7 +618,7 @@ static int not_a_deletion(const void *p UNUSED)
}
static int reftable_obj_record_equal_void(const void *a, const void *b,
- int hash_size UNUSED)
+ uint32_t hash_size UNUSED)
{
struct reftable_obj_record *ra = (struct reftable_obj_record *) a;
struct reftable_obj_record *rb = (struct reftable_obj_record *) b;
@@ -699,7 +693,7 @@ static int reftable_log_record_key(const void *r, struct reftable_buf *dest)
}
static int reftable_log_record_copy_from(void *rec, const void *src_rec,
- int hash_size)
+ uint32_t hash_size)
{
struct reftable_log_record *dst = rec;
const struct reftable_log_record *src =
@@ -780,7 +774,7 @@ static uint8_t reftable_log_record_val_type(const void *rec)
}
static int reftable_log_record_encode(const void *rec, struct string_view s,
- int hash_size)
+ uint32_t hash_size)
{
const struct reftable_log_record *r = rec;
struct string_view start = s;
@@ -828,7 +822,7 @@ static int reftable_log_record_encode(const void *rec, struct string_view s,
static int reftable_log_record_decode(void *rec, struct reftable_buf key,
uint8_t val_type, struct string_view in,
- int hash_size, struct reftable_buf *scratch)
+ uint32_t hash_size, struct reftable_buf *scratch)
{
struct string_view start = in;
struct reftable_log_record *r = rec;
@@ -839,7 +833,7 @@ static int reftable_log_record_decode(void *rec, struct reftable_buf key,
if (key.len <= 9 || key.buf[key.len - 9] != 0)
return REFTABLE_FORMAT_ERROR;
- REFTABLE_ALLOC_GROW(r->refname, key.len - 8, r->refname_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(r->refname, key.len - 8, r->refname_cap);
if (!r->refname) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -947,8 +941,8 @@ static int reftable_log_record_decode(void *rec, struct reftable_buf key,
}
string_view_consume(&in, n);
- REFTABLE_ALLOC_GROW(r->value.update.message, scratch->len + 1,
- r->value.update.message_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(r->value.update.message, scratch->len + 1,
+ r->value.update.message_cap);
if (!r->value.update.message) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -976,7 +970,7 @@ static int null_streq(const char *a, const char *b)
}
static int reftable_log_record_equal_void(const void *a,
- const void *b, int hash_size)
+ const void *b, uint32_t hash_size)
{
return reftable_log_record_equal((struct reftable_log_record *) a,
(struct reftable_log_record *) b,
@@ -1000,7 +994,7 @@ static int reftable_log_record_cmp_void(const void *_a, const void *_b)
}
int reftable_log_record_equal(const struct reftable_log_record *a,
- const struct reftable_log_record *b, int hash_size)
+ const struct reftable_log_record *b, uint32_t hash_size)
{
if (!(null_streq(a->refname, b->refname) &&
a->update_index == b->update_index &&
@@ -1054,7 +1048,7 @@ static int reftable_index_record_key(const void *r, struct reftable_buf *dest)
}
static int reftable_index_record_copy_from(void *rec, const void *src_rec,
- int hash_size UNUSED)
+ uint32_t hash_size UNUSED)
{
struct reftable_index_record *dst = rec;
const struct reftable_index_record *src = src_rec;
@@ -1081,7 +1075,7 @@ static uint8_t reftable_index_record_val_type(const void *rec UNUSED)
}
static int reftable_index_record_encode(const void *rec, struct string_view out,
- int hash_size UNUSED)
+ uint32_t hash_size UNUSED)
{
const struct reftable_index_record *r =
(const struct reftable_index_record *)rec;
@@ -1099,7 +1093,7 @@ static int reftable_index_record_encode(const void *rec, struct string_view out,
static int reftable_index_record_decode(void *rec, struct reftable_buf key,
uint8_t val_type UNUSED,
struct string_view in,
- int hash_size UNUSED,
+ uint32_t hash_size UNUSED,
struct reftable_buf *scratch UNUSED)
{
struct string_view start = in;
@@ -1120,7 +1114,7 @@ static int reftable_index_record_decode(void *rec, struct reftable_buf key,
}
static int reftable_index_record_equal(const void *a, const void *b,
- int hash_size UNUSED)
+ uint32_t hash_size UNUSED)
{
struct reftable_index_record *ia = (struct reftable_index_record *) a;
struct reftable_index_record *ib = (struct reftable_index_record *) b;
@@ -1154,14 +1148,14 @@ int reftable_record_key(struct reftable_record *rec, struct reftable_buf *dest)
}
int reftable_record_encode(struct reftable_record *rec, struct string_view dest,
- int hash_size)
+ uint32_t hash_size)
{
return reftable_record_vtable(rec)->encode(reftable_record_data(rec),
dest, hash_size);
}
int reftable_record_copy_from(struct reftable_record *rec,
- struct reftable_record *src, int hash_size)
+ struct reftable_record *src, uint32_t hash_size)
{
assert(src->type == rec->type);
@@ -1176,7 +1170,7 @@ uint8_t reftable_record_val_type(struct reftable_record *rec)
}
int reftable_record_decode(struct reftable_record *rec, struct reftable_buf key,
- uint8_t extra, struct string_view src, int hash_size,
+ uint8_t extra, struct string_view src, uint32_t hash_size,
struct reftable_buf *scratch)
{
return reftable_record_vtable(rec)->decode(reftable_record_data(rec),
@@ -1203,7 +1197,7 @@ int reftable_record_cmp(struct reftable_record *a, struct reftable_record *b)
reftable_record_data(a), reftable_record_data(b));
}
-int reftable_record_equal(struct reftable_record *a, struct reftable_record *b, int hash_size)
+int reftable_record_equal(struct reftable_record *a, struct reftable_record *b, uint32_t hash_size)
{
if (a->type != b->type)
return 0;
@@ -1211,7 +1205,7 @@ int reftable_record_equal(struct reftable_record *a, struct reftable_record *b,
reftable_record_data(a), reftable_record_data(b), hash_size);
}
-static int hash_equal(const unsigned char *a, const unsigned char *b, int hash_size)
+static int hash_equal(const unsigned char *a, const unsigned char *b, uint32_t hash_size)
{
if (a && b)
return !memcmp(a, b, hash_size);
@@ -1220,9 +1214,8 @@ static int hash_equal(const unsigned char *a, const unsigned char *b, int hash_s
}
int reftable_ref_record_equal(const struct reftable_ref_record *a,
- const struct reftable_ref_record *b, int hash_size)
+ const struct reftable_ref_record *b, uint32_t hash_size)
{
- assert(hash_size > 0);
if (!null_streq(a->refname, b->refname))
return 0;
diff --git a/reftable/record.h b/reftable/record.h
index 25aa908c85..c7755a4d75 100644
--- a/reftable/record.h
+++ b/reftable/record.h
@@ -32,8 +32,10 @@ static inline void string_view_consume(struct string_view *s, int n)
s->len -= n;
}
-/* utilities for de/encoding varints */
-
+/*
+ * Decode and encode a varint. Returns the number of bytes read/written, or a
+ * negative value in case encoding/decoding the varint has failed.
+ */
int get_var_int(uint64_t *dest, struct string_view *in);
int put_var_int(struct string_view *dest, uint64_t val);
@@ -45,18 +47,18 @@ struct reftable_record_vtable {
/* The record type of ('r' for ref). */
uint8_t type;
- int (*copy_from)(void *dest, const void *src, int hash_size);
+ int (*copy_from)(void *dest, const void *src, uint32_t hash_size);
/* a value of [0..7], indicating record subvariants (eg. ref vs. symref
* vs ref deletion) */
uint8_t (*val_type)(const void *rec);
/* encodes rec into dest, returning how much space was used. */
- int (*encode)(const void *rec, struct string_view dest, int hash_size);
+ int (*encode)(const void *rec, struct string_view dest, uint32_t hash_size);
/* decode data from `src` into the record. */
int (*decode)(void *rec, struct reftable_buf key, uint8_t extra,
- struct string_view src, int hash_size,
+ struct string_view src, uint32_t hash_size,
struct reftable_buf *scratch);
/* deallocate and null the record. */
@@ -66,16 +68,13 @@ struct reftable_record_vtable {
int (*is_deletion)(const void *rec);
/* Are two records equal? This assumes they have the same type. Returns 0 for non-equal. */
- int (*equal)(const void *a, const void *b, int hash_size);
+ int (*equal)(const void *a, const void *b, uint32_t hash_size);
/*
* Compare keys of two records with each other. The records must have
* the same type.
*/
int (*cmp)(const void *a, const void *b);
-
- /* Print on stdout, for debugging. */
- void (*print)(const void *rec, int hash_size);
};
/* returns true for recognized block types. Block start with the block type. */
@@ -136,16 +135,16 @@ void reftable_record_init(struct reftable_record *rec, uint8_t typ);
/* see struct record_vtable */
int reftable_record_cmp(struct reftable_record *a, struct reftable_record *b);
-int reftable_record_equal(struct reftable_record *a, struct reftable_record *b, int hash_size);
+int reftable_record_equal(struct reftable_record *a, struct reftable_record *b, uint32_t hash_size);
int reftable_record_key(struct reftable_record *rec, struct reftable_buf *dest);
int reftable_record_copy_from(struct reftable_record *rec,
- struct reftable_record *src, int hash_size);
+ struct reftable_record *src, uint32_t hash_size);
uint8_t reftable_record_val_type(struct reftable_record *rec);
int reftable_record_encode(struct reftable_record *rec, struct string_view dest,
- int hash_size);
+ uint32_t hash_size);
int reftable_record_decode(struct reftable_record *rec, struct reftable_buf key,
uint8_t extra, struct string_view src,
- int hash_size, struct reftable_buf *scratch);
+ uint32_t hash_size, struct reftable_buf *scratch);
int reftable_record_is_deletion(struct reftable_record *rec);
static inline uint8_t reftable_record_type(struct reftable_record *rec)
diff --git a/reftable/reftable-basics.h b/reftable/reftable-basics.h
index 6e8e636b71..e0397ed583 100644
--- a/reftable/reftable-basics.h
+++ b/reftable/reftable-basics.h
@@ -11,6 +11,19 @@
#include <stddef.h>
+/*
+ * Hash functions understood by the reftable library. Note that the values are
+ * arbitrary and somewhat random such that we can easily detect cases where the
+ * hash hasn't been properly set up.
+ */
+enum reftable_hash {
+ REFTABLE_HASH_SHA1 = 89,
+ REFTABLE_HASH_SHA256 = 247,
+};
+#define REFTABLE_HASH_SIZE_SHA1 20
+#define REFTABLE_HASH_SIZE_SHA256 32
+#define REFTABLE_HASH_SIZE_MAX REFTABLE_HASH_SIZE_SHA256
+
/* Overrides the functions to use for memory management. */
void reftable_set_alloc(void *(*malloc)(size_t),
void *(*realloc)(void *, size_t), void (*free)(void *));
diff --git a/reftable/reftable-blocksource.h b/reftable/reftable-blocksource.h
index 5aa3990a57..6b326aa5ea 100644
--- a/reftable/reftable-blocksource.h
+++ b/reftable/reftable-blocksource.h
@@ -22,7 +22,7 @@ struct reftable_block_source {
* so it can return itself into the pool. */
struct reftable_block {
uint8_t *data;
- int len;
+ size_t len;
struct reftable_block_source source;
};
@@ -31,10 +31,13 @@ struct reftable_block_source_vtable {
/* returns the size of a block source */
uint64_t (*size)(void *source);
- /* reads a segment from the block source. It is an error to read
- beyond the end of the block */
- int (*read_block)(void *source, struct reftable_block *dest,
- uint64_t off, uint32_t size);
+ /*
+ * Reads a segment from the block source. It is an error to read beyond
+ * the end of the block.
+ */
+ ssize_t (*read_block)(void *source, struct reftable_block *dest,
+ uint64_t off, uint32_t size);
+
/* mark the block as read; may return the data back to malloc */
void (*return_block)(void *source, struct reftable_block *blockp);
diff --git a/reftable/reftable-merged.h b/reftable/reftable-merged.h
index a970d5dd89..f2d01c3ef8 100644
--- a/reftable/reftable-merged.h
+++ b/reftable/reftable-merged.h
@@ -34,7 +34,7 @@ struct reftable_reader;
*/
int reftable_merged_table_new(struct reftable_merged_table **dest,
struct reftable_reader **readers, size_t n,
- uint32_t hash_id);
+ enum reftable_hash hash_id);
/* Initialize a merged table iterator for reading refs. */
int reftable_merged_table_init_ref_iterator(struct reftable_merged_table *mt,
@@ -56,6 +56,6 @@ reftable_merged_table_min_update_index(struct reftable_merged_table *mt);
void reftable_merged_table_free(struct reftable_merged_table *m);
/* return the hash ID of the merged table. */
-uint32_t reftable_merged_table_hash_id(struct reftable_merged_table *m);
+enum reftable_hash reftable_merged_table_hash_id(struct reftable_merged_table *m);
#endif
diff --git a/reftable/reftable-reader.h b/reftable/reftable-reader.h
index 6a2d0b693f..0085fbb903 100644
--- a/reftable/reftable-reader.h
+++ b/reftable/reftable-reader.h
@@ -54,7 +54,7 @@ int reftable_reader_init_log_iterator(struct reftable_reader *r,
struct reftable_iterator *it);
/* returns the hash ID used in this table. */
-uint32_t reftable_reader_hash_id(struct reftable_reader *r);
+enum reftable_hash reftable_reader_hash_id(struct reftable_reader *r);
/* return an iterator for the refs pointing to `oid`. */
int reftable_reader_refs_for(struct reftable_reader *r,
diff --git a/reftable/reftable-record.h b/reftable/reftable-record.h
index 2d42463c58..931e594744 100644
--- a/reftable/reftable-record.h
+++ b/reftable/reftable-record.h
@@ -9,7 +9,7 @@ https://developers.google.com/open-source/licenses/bsd
#ifndef REFTABLE_RECORD_H
#define REFTABLE_RECORD_H
-#include "hash.h"
+#include "reftable-basics.h"
#include <stdint.h>
/*
@@ -40,10 +40,10 @@ struct reftable_ref_record {
#define REFTABLE_NR_REF_VALUETYPES 4
} value_type;
union {
- unsigned char val1[GIT_MAX_RAWSZ];
+ unsigned char val1[REFTABLE_HASH_SIZE_MAX];
struct {
- unsigned char value[GIT_MAX_RAWSZ]; /* first hash */
- unsigned char target_value[GIT_MAX_RAWSZ]; /* second hash */
+ unsigned char value[REFTABLE_HASH_SIZE_MAX]; /* first hash */
+ unsigned char target_value[REFTABLE_HASH_SIZE_MAX]; /* second hash */
} val2;
char *symref; /* referent, malloced 0-terminated string */
} value;
@@ -65,7 +65,7 @@ void reftable_ref_record_release(struct reftable_ref_record *ref);
/* returns whether two reftable_ref_records are the same. Useful for testing. */
int reftable_ref_record_equal(const struct reftable_ref_record *a,
- const struct reftable_ref_record *b, int hash_size);
+ const struct reftable_ref_record *b, uint32_t hash_size);
/* reftable_log_record holds a reflog entry */
struct reftable_log_record {
@@ -85,8 +85,8 @@ struct reftable_log_record {
union {
struct {
- unsigned char new_hash[GIT_MAX_RAWSZ];
- unsigned char old_hash[GIT_MAX_RAWSZ];
+ unsigned char new_hash[REFTABLE_HASH_SIZE_MAX];
+ unsigned char old_hash[REFTABLE_HASH_SIZE_MAX];
char *name;
char *email;
uint64_t time;
@@ -105,6 +105,6 @@ void reftable_log_record_release(struct reftable_log_record *log);
/* returns whether two records are equal. Useful for testing. */
int reftable_log_record_equal(const struct reftable_log_record *a,
- const struct reftable_log_record *b, int hash_size);
+ const struct reftable_log_record *b, uint32_t hash_size);
#endif
diff --git a/reftable/reftable-stack.h b/reftable/reftable-stack.h
index 54787f2ef5..ae14270ea7 100644
--- a/reftable/reftable-stack.h
+++ b/reftable/reftable-stack.h
@@ -149,4 +149,7 @@ struct reftable_compaction_stats {
struct reftable_compaction_stats *
reftable_stack_compaction_stats(struct reftable_stack *st);
+/* Return the hash of the stack. */
+enum reftable_hash reftable_stack_hash_id(struct reftable_stack *st);
+
#endif
diff --git a/reftable/reftable-writer.h b/reftable/reftable-writer.h
index e4fc953788..bfef3b1721 100644
--- a/reftable/reftable-writer.h
+++ b/reftable/reftable-writer.h
@@ -33,7 +33,7 @@ struct reftable_write_options {
/* 4-byte identifier ("sha1", "s256") of the hash.
* Defaults to SHA1 if unset
*/
- uint32_t hash_id;
+ enum reftable_hash hash_id;
/* Default mode for creating files. If unset, use 0666 (+umask) */
unsigned int default_permissions;
@@ -62,6 +62,21 @@ struct reftable_write_options {
* negative value will cause us to block indefinitely.
*/
long lock_timeout_ms;
+
+ /*
+ * Optional callback used to fsync files to disk. Falls back to using
+ * fsync(3P) when unset.
+ */
+ int (*fsync)(int fd);
+
+ /*
+ * Callback function to execute whenever the stack is being reloaded.
+ * This can be used e.g. to discard cached information that relies on
+ * the old stack's data. The payload data will be passed as argument to
+ * the callback.
+ */
+ void (*on_reload)(void *payload);
+ void *on_reload_payload;
};
/* reftable_block_stats holds statistics for a single block type */
@@ -69,7 +84,7 @@ struct reftable_block_stats {
/* total number of entries written */
int entries;
/* total number of key restarts */
- int restarts;
+ uint32_t restarts;
/* total number of blocks */
int blocks;
/* total number of index blocks */
diff --git a/reftable/stack.c b/reftable/stack.c
index c33979536e..f7c1845e15 100644
--- a/reftable/stack.c
+++ b/reftable/stack.c
@@ -8,7 +8,6 @@ https://developers.google.com/open-source/licenses/bsd
#include "stack.h"
-#include "../write-or-die.h"
#include "system.h"
#include "constants.h"
#include "merged.h"
@@ -17,7 +16,6 @@ https://developers.google.com/open-source/licenses/bsd
#include "reftable-record.h"
#include "reftable-merged.h"
#include "writer.h"
-#include "tempfile.h"
static int stack_try_add(struct reftable_stack *st,
int (*write_table)(struct reftable_writer *wr,
@@ -43,17 +41,28 @@ static int stack_filename(struct reftable_buf *dest, struct reftable_stack *st,
return 0;
}
-static ssize_t reftable_fd_write(void *arg, const void *data, size_t sz)
+static int stack_fsync(const struct reftable_write_options *opts, int fd)
{
- int *fdp = (int *)arg;
- return write_in_full(*fdp, data, sz);
+ if (opts->fsync)
+ return opts->fsync(fd);
+ return fsync(fd);
}
-static int reftable_fd_flush(void *arg)
+struct fd_writer {
+ const struct reftable_write_options *opts;
+ int fd;
+};
+
+static ssize_t fd_writer_write(void *arg, const void *data, size_t sz)
{
- int *fdp = (int *)arg;
+ struct fd_writer *writer = arg;
+ return write_in_full(writer->fd, data, sz);
+}
- return fsync_component(FSYNC_COMPONENT_REFERENCE, *fdp);
+static int fd_writer_flush(void *arg)
+{
+ struct fd_writer *writer = arg;
+ return stack_fsync(writer->opts, writer->fd);
}
int reftable_new_stack(struct reftable_stack **dest, const char *dir,
@@ -73,7 +82,7 @@ int reftable_new_stack(struct reftable_stack **dest, const char *dir,
if (_opts)
opts = *_opts;
if (opts.hash_id == 0)
- opts.hash_id = GIT_SHA1_FORMAT_ID;
+ opts.hash_id = REFTABLE_HASH_SHA1;
*dest = NULL;
@@ -211,9 +220,9 @@ void reftable_stack_destroy(struct reftable_stack *st)
}
if (st->readers) {
- int i = 0;
struct reftable_buf filename = REFTABLE_BUF_INIT;
- for (i = 0; i < st->readers_len; i++) {
+
+ for (size_t i = 0; i < st->readers_len; i++) {
const char *name = reader_name(st->readers[i]);
int try_unlinking = 1;
@@ -229,6 +238,7 @@ void reftable_stack_destroy(struct reftable_stack *st)
unlink(filename.buf);
}
}
+
reftable_buf_release(&filename);
st->readers_len = 0;
REFTABLE_FREE_AND_NULL(st->readers);
@@ -261,9 +271,9 @@ static int reftable_stack_reload_once(struct reftable_stack *st,
int reuse_open)
{
size_t cur_len = !st->merged ? 0 : st->merged->readers_len;
- struct reftable_reader **cur;
+ struct reftable_reader **cur = NULL;
struct reftable_reader **reused = NULL;
- struct reftable_reader **new_readers;
+ struct reftable_reader **new_readers = NULL;
size_t reused_len = 0, reused_alloc = 0, names_len;
size_t new_readers_len = 0;
struct reftable_merged_table *new_merged = NULL;
@@ -271,18 +281,22 @@ static int reftable_stack_reload_once(struct reftable_stack *st,
int err = 0;
size_t i;
- cur = stack_copy_readers(st, cur_len);
- if (!cur) {
- err = REFTABLE_OUT_OF_MEMORY_ERROR;
- goto done;
+ if (cur_len) {
+ cur = stack_copy_readers(st, cur_len);
+ if (!cur) {
+ err = REFTABLE_OUT_OF_MEMORY_ERROR;
+ goto done;
+ }
}
names_len = names_length(names);
- new_readers = reftable_calloc(names_len, sizeof(*new_readers));
- if (!new_readers) {
- err = REFTABLE_OUT_OF_MEMORY_ERROR;
- goto done;
+ if (names_len) {
+ new_readers = reftable_calloc(names_len, sizeof(*new_readers));
+ if (!new_readers) {
+ err = REFTABLE_OUT_OF_MEMORY_ERROR;
+ goto done;
+ }
}
while (*names) {
@@ -304,7 +318,9 @@ static int reftable_stack_reload_once(struct reftable_stack *st,
* thus need to keep them alive here, which we
* do by bumping their refcount.
*/
- REFTABLE_ALLOC_GROW(reused, reused_len + 1, reused_alloc);
+ REFTABLE_ALLOC_GROW_OR_NULL(reused,
+ reused_len + 1,
+ reused_alloc);
if (!reused) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -478,7 +494,7 @@ static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
close(fd);
fd = -1;
- delay = delay + (delay * rand()) / RAND_MAX + 1;
+ delay = delay + (delay * git_rand(CSPRNG_BYTES_INSECURE)) / UINT32_MAX + 1;
sleep_millisec(delay);
}
@@ -539,6 +555,10 @@ out:
close(fd);
free_names(names);
free_names(names_after);
+
+ if (st->opts.on_reload)
+ st->opts.on_reload(st->opts.on_reload_payload);
+
return err;
}
@@ -549,7 +569,6 @@ static int stack_uptodate(struct reftable_stack *st)
{
char **names = NULL;
int err;
- int i = 0;
/*
* When we have cached stat information available then we use it to
@@ -589,7 +608,7 @@ static int stack_uptodate(struct reftable_stack *st)
if (err < 0)
return err;
- for (i = 0; i < st->readers_len; i++) {
+ for (size_t i = 0; i < st->readers_len; i++) {
if (!names[i]) {
err = 1;
goto done;
@@ -640,7 +659,7 @@ int reftable_stack_add(struct reftable_stack *st,
static int format_name(struct reftable_buf *dest, uint64_t min, uint64_t max)
{
char buf[100];
- uint32_t rnd = (uint32_t)git_rand();
+ uint32_t rnd = git_rand(CSPRNG_BYTES_INSECURE);
snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x",
min, max, rnd);
reftable_buf_reset(dest);
@@ -648,7 +667,7 @@ static int format_name(struct reftable_buf *dest, uint64_t min, uint64_t max)
}
struct reftable_addition {
- struct lock_file tables_list_lock;
+ struct reftable_flock tables_list_lock;
struct reftable_stack *stack;
char **new_tables;
@@ -667,10 +686,8 @@ static int reftable_stack_init_addition(struct reftable_addition *add,
add->stack = st;
- err = hold_lock_file_for_update_timeout(&add->tables_list_lock,
- st->list_file,
- LOCK_NO_DEREF,
- st->opts.lock_timeout_ms);
+ err = flock_acquire(&add->tables_list_lock, st->list_file,
+ st->opts.lock_timeout_ms);
if (err < 0) {
if (errno == EEXIST) {
err = REFTABLE_LOCK_ERROR;
@@ -680,7 +697,7 @@ static int reftable_stack_init_addition(struct reftable_addition *add,
goto done;
}
if (st->opts.default_permissions) {
- if (chmod(get_lock_file_path(&add->tables_list_lock),
+ if (chmod(add->tables_list_lock.path,
st->opts.default_permissions) < 0) {
err = REFTABLE_IO_ERROR;
goto done;
@@ -724,7 +741,7 @@ static void reftable_addition_close(struct reftable_addition *add)
add->new_tables_len = 0;
add->new_tables_cap = 0;
- rollback_lock_file(&add->tables_list_lock);
+ flock_release(&add->tables_list_lock);
reftable_buf_release(&nm);
}
@@ -740,7 +757,6 @@ void reftable_addition_destroy(struct reftable_addition *add)
int reftable_addition_commit(struct reftable_addition *add)
{
struct reftable_buf table_list = REFTABLE_BUF_INIT;
- int lock_file_fd = get_lock_file_fd(&add->tables_list_lock);
int err = 0;
size_t i;
@@ -758,20 +774,20 @@ int reftable_addition_commit(struct reftable_addition *add)
goto done;
}
- err = write_in_full(lock_file_fd, table_list.buf, table_list.len);
+ err = write_in_full(add->tables_list_lock.fd, table_list.buf, table_list.len);
reftable_buf_release(&table_list);
if (err < 0) {
err = REFTABLE_IO_ERROR;
goto done;
}
- err = fsync_component(FSYNC_COMPONENT_REFERENCE, lock_file_fd);
+ err = stack_fsync(&add->stack->opts, add->tables_list_lock.fd);
if (err < 0) {
err = REFTABLE_IO_ERROR;
goto done;
}
- err = commit_lock_file(&add->tables_list_lock);
+ err = flock_commit(&add->tables_list_lock);
if (err < 0) {
err = REFTABLE_IO_ERROR;
goto done;
@@ -857,9 +873,11 @@ int reftable_addition_add(struct reftable_addition *add,
struct reftable_buf tab_file_name = REFTABLE_BUF_INIT;
struct reftable_buf next_name = REFTABLE_BUF_INIT;
struct reftable_writer *wr = NULL;
- struct tempfile *tab_file = NULL;
+ struct reftable_tmpfile tab_file = REFTABLE_TMPFILE_INIT;
+ struct fd_writer writer = {
+ .opts = &add->stack->opts,
+ };
int err = 0;
- int tab_fd;
reftable_buf_reset(&next_name);
@@ -875,22 +893,20 @@ int reftable_addition_add(struct reftable_addition *add,
if (err < 0)
goto done;
- tab_file = mks_tempfile(temp_tab_file_name.buf);
- if (!tab_file) {
- err = REFTABLE_IO_ERROR;
+ err = tmpfile_from_pattern(&tab_file, temp_tab_file_name.buf);
+ if (err < 0)
goto done;
- }
if (add->stack->opts.default_permissions) {
- if (chmod(get_tempfile_path(tab_file),
+ if (chmod(tab_file.path,
add->stack->opts.default_permissions)) {
err = REFTABLE_IO_ERROR;
goto done;
}
}
- tab_fd = get_tempfile_fd(tab_file);
- err = reftable_writer_new(&wr, reftable_fd_write, reftable_fd_flush,
- &tab_fd, &add->stack->opts);
+ writer.fd = tab_file.fd;
+ err = reftable_writer_new(&wr, fd_writer_write, fd_writer_flush,
+ &writer, &add->stack->opts);
if (err < 0)
goto done;
@@ -906,11 +922,9 @@ int reftable_addition_add(struct reftable_addition *add,
if (err < 0)
goto done;
- err = close_tempfile_gently(tab_file);
- if (err < 0) {
- err = REFTABLE_IO_ERROR;
+ err = tmpfile_close(&tab_file);
+ if (err < 0)
goto done;
- }
if (wr->min_update_index < add->next_update_index) {
err = REFTABLE_API_ERROR;
@@ -933,14 +947,12 @@ int reftable_addition_add(struct reftable_addition *add,
On windows, this relies on rand() picking a unique destination name.
Maybe we should do retry loop as well?
*/
- err = rename_tempfile(&tab_file, tab_file_name.buf);
- if (err < 0) {
- err = REFTABLE_IO_ERROR;
+ err = tmpfile_rename(&tab_file, tab_file_name.buf);
+ if (err < 0)
goto done;
- }
- REFTABLE_ALLOC_GROW(add->new_tables, add->new_tables_len + 1,
- add->new_tables_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(add->new_tables, add->new_tables_len + 1,
+ add->new_tables_cap);
if (!add->new_tables) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -948,7 +960,7 @@ int reftable_addition_add(struct reftable_addition *add,
add->new_tables[add->new_tables_len++] = reftable_buf_detach(&next_name);
done:
- delete_tempfile(&tab_file);
+ tmpfile_delete(&tab_file);
reftable_buf_release(&temp_tab_file_name);
reftable_buf_release(&tab_file_name);
reftable_buf_release(&next_name);
@@ -968,13 +980,16 @@ uint64_t reftable_stack_next_update_index(struct reftable_stack *st)
static int stack_compact_locked(struct reftable_stack *st,
size_t first, size_t last,
struct reftable_log_expiry_config *config,
- struct tempfile **tab_file_out)
+ struct reftable_tmpfile *tab_file_out)
{
struct reftable_buf next_name = REFTABLE_BUF_INIT;
struct reftable_buf tab_file_path = REFTABLE_BUF_INIT;
struct reftable_writer *wr = NULL;
- struct tempfile *tab_file;
- int tab_fd, err = 0;
+ struct fd_writer writer= {
+ .opts = &st->opts,
+ };
+ struct reftable_tmpfile tab_file = REFTABLE_TMPFILE_INIT;
+ int err = 0;
err = format_name(&next_name, reftable_reader_min_update_index(st->readers[first]),
reftable_reader_max_update_index(st->readers[last]));
@@ -989,21 +1004,19 @@ static int stack_compact_locked(struct reftable_stack *st,
if (err < 0)
goto done;
- tab_file = mks_tempfile(tab_file_path.buf);
- if (!tab_file) {
- err = REFTABLE_IO_ERROR;
+ err = tmpfile_from_pattern(&tab_file, tab_file_path.buf);
+ if (err < 0)
goto done;
- }
- tab_fd = get_tempfile_fd(tab_file);
if (st->opts.default_permissions &&
- chmod(get_tempfile_path(tab_file), st->opts.default_permissions) < 0) {
+ chmod(tab_file.path, st->opts.default_permissions) < 0) {
err = REFTABLE_IO_ERROR;
goto done;
}
- err = reftable_writer_new(&wr, reftable_fd_write, reftable_fd_flush,
- &tab_fd, &st->opts);
+ writer.fd = tab_file.fd;
+ err = reftable_writer_new(&wr, fd_writer_write, fd_writer_flush,
+ &writer, &st->opts);
if (err < 0)
goto done;
@@ -1015,15 +1028,15 @@ static int stack_compact_locked(struct reftable_stack *st,
if (err < 0)
goto done;
- err = close_tempfile_gently(tab_file);
+ err = tmpfile_close(&tab_file);
if (err < 0)
goto done;
*tab_file_out = tab_file;
- tab_file = NULL;
+ tab_file = REFTABLE_TMPFILE_INIT;
done:
- delete_tempfile(&tab_file);
+ tmpfile_delete(&tab_file);
reftable_writer_free(wr);
reftable_buf_release(&next_name);
reftable_buf_release(&tab_file_path);
@@ -1154,9 +1167,9 @@ static int stack_compact_range(struct reftable_stack *st,
struct reftable_buf new_table_name = REFTABLE_BUF_INIT;
struct reftable_buf new_table_path = REFTABLE_BUF_INIT;
struct reftable_buf table_name = REFTABLE_BUF_INIT;
- struct lock_file tables_list_lock = LOCK_INIT;
- struct lock_file *table_locks = NULL;
- struct tempfile *new_table = NULL;
+ struct reftable_flock tables_list_lock = REFTABLE_FLOCK_INIT;
+ struct reftable_flock *table_locks = NULL;
+ struct reftable_tmpfile new_table = REFTABLE_TMPFILE_INIT;
int is_empty_table = 0, err = 0;
size_t first_to_replace, last_to_replace;
size_t i, nlocks = 0;
@@ -1173,10 +1186,7 @@ static int stack_compact_range(struct reftable_stack *st,
* Hold the lock so that we can read "tables.list" and lock all tables
* which are part of the user-specified range.
*/
- err = hold_lock_file_for_update_timeout(&tables_list_lock,
- st->list_file,
- LOCK_NO_DEREF,
- st->opts.lock_timeout_ms);
+ err = flock_acquire(&tables_list_lock, st->list_file, st->opts.lock_timeout_ms);
if (err < 0) {
if (errno == EEXIST)
err = REFTABLE_LOCK_ERROR;
@@ -1199,19 +1209,20 @@ static int stack_compact_range(struct reftable_stack *st,
* older process is still busy compacting tables which are preexisting
* from the point of view of the newer process.
*/
- REFTABLE_CALLOC_ARRAY(table_locks, last - first + 1);
+ REFTABLE_ALLOC_ARRAY(table_locks, last - first + 1);
if (!table_locks) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
}
+ for (i = 0; i < last - first + 1; i++)
+ table_locks[i] = REFTABLE_FLOCK_INIT;
for (i = last + 1; i > first; i--) {
err = stack_filename(&table_name, st, reader_name(st->readers[i - 1]));
if (err < 0)
goto done;
- err = hold_lock_file_for_update(&table_locks[nlocks],
- table_name.buf, LOCK_NO_DEREF);
+ err = flock_acquire(&table_locks[nlocks], table_name.buf, 0);
if (err < 0) {
/*
* When the table is locked already we may do a
@@ -1247,7 +1258,7 @@ static int stack_compact_range(struct reftable_stack *st,
* run into file descriptor exhaustion when we compress a lot
* of tables.
*/
- err = close_lock_file_gently(&table_locks[nlocks++]);
+ err = flock_close(&table_locks[nlocks++]);
if (err < 0) {
err = REFTABLE_IO_ERROR;
goto done;
@@ -1259,7 +1270,7 @@ static int stack_compact_range(struct reftable_stack *st,
* "tables.list" lock while compacting the locked tables. This allows
* concurrent updates to the stack to proceed.
*/
- err = rollback_lock_file(&tables_list_lock);
+ err = flock_release(&tables_list_lock);
if (err < 0) {
err = REFTABLE_IO_ERROR;
goto done;
@@ -1282,10 +1293,7 @@ static int stack_compact_range(struct reftable_stack *st,
* "tables.list". We'll then replace the compacted range of tables with
* the new table.
*/
- err = hold_lock_file_for_update_timeout(&tables_list_lock,
- st->list_file,
- LOCK_NO_DEREF,
- st->opts.lock_timeout_ms);
+ err = flock_acquire(&tables_list_lock, st->list_file, st->opts.lock_timeout_ms);
if (err < 0) {
if (errno == EEXIST)
err = REFTABLE_LOCK_ERROR;
@@ -1295,7 +1303,7 @@ static int stack_compact_range(struct reftable_stack *st,
}
if (st->opts.default_permissions) {
- if (chmod(get_lock_file_path(&tables_list_lock),
+ if (chmod(tables_list_lock.path,
st->opts.default_permissions) < 0) {
err = REFTABLE_IO_ERROR;
goto done;
@@ -1424,11 +1432,9 @@ static int stack_compact_range(struct reftable_stack *st,
if (err < 0)
goto done;
- err = rename_tempfile(&new_table, new_table_path.buf);
- if (err < 0) {
- err = REFTABLE_IO_ERROR;
+ err = tmpfile_rename(&new_table, new_table_path.buf);
+ if (err < 0)
goto done;
- }
}
/*
@@ -1452,7 +1458,7 @@ static int stack_compact_range(struct reftable_stack *st,
goto done;
}
- err = write_in_full(get_lock_file_fd(&tables_list_lock),
+ err = write_in_full(tables_list_lock.fd,
tables_list_buf.buf, tables_list_buf.len);
if (err < 0) {
err = REFTABLE_IO_ERROR;
@@ -1460,14 +1466,14 @@ static int stack_compact_range(struct reftable_stack *st,
goto done;
}
- err = fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&tables_list_lock));
+ err = stack_fsync(&st->opts, tables_list_lock.fd);
if (err < 0) {
err = REFTABLE_IO_ERROR;
unlink(new_table_path.buf);
goto done;
}
- err = commit_lock_file(&tables_list_lock);
+ err = flock_commit(&tables_list_lock);
if (err < 0) {
err = REFTABLE_IO_ERROR;
unlink(new_table_path.buf);
@@ -1488,19 +1494,24 @@ static int stack_compact_range(struct reftable_stack *st,
* readers, so it is expected that unlinking tables may fail.
*/
for (i = 0; i < nlocks; i++) {
- struct lock_file *table_lock = &table_locks[i];
- char *table_path = get_locked_file_path(table_lock);
- unlink(table_path);
- reftable_free(table_path);
+ struct reftable_flock *table_lock = &table_locks[i];
+
+ reftable_buf_reset(&table_name);
+ err = reftable_buf_add(&table_name, table_lock->path,
+ strlen(table_lock->path) - strlen(".lock"));
+ if (err)
+ continue;
+
+ unlink(table_name.buf);
}
done:
- rollback_lock_file(&tables_list_lock);
+ flock_release(&tables_list_lock);
for (i = 0; table_locks && i < nlocks; i++)
- rollback_lock_file(&table_locks[i]);
+ flock_release(&table_locks[i]);
reftable_free(table_locks);
- delete_tempfile(&new_table);
+ tmpfile_delete(&new_table);
reftable_buf_release(&new_table_name);
reftable_buf_release(&new_table_path);
reftable_buf_release(&tables_list_buf);
@@ -1603,7 +1614,7 @@ struct segment suggest_compaction_segment(uint64_t *sizes, size_t n,
static uint64_t *stack_table_sizes_for_compaction(struct reftable_stack *st)
{
- int version = (st->opts.hash_id == GIT_SHA1_FORMAT_ID) ? 1 : 2;
+ int version = (st->opts.hash_id == REFTABLE_HASH_SHA1) ? 1 : 2;
int overhead = header_size(version) - 1;
uint64_t *sizes;
@@ -1622,6 +1633,9 @@ int reftable_stack_auto_compact(struct reftable_stack *st)
struct segment seg;
uint64_t *sizes;
+ if (st->merged->readers_len < 2)
+ return 0;
+
sizes = stack_table_sizes_for_compaction(st);
if (!sizes)
return REFTABLE_OUT_OF_MEMORY_ERROR;
@@ -1753,14 +1767,12 @@ static int reftable_stack_clean_locked(struct reftable_stack *st)
}
while ((d = readdir(dir))) {
- int i = 0;
int found = 0;
if (!is_table_name(d->d_name))
continue;
- for (i = 0; !found && i < st->readers_len; i++) {
+ for (size_t i = 0; !found && i < st->readers_len; i++)
found = !strcmp(reader_name(st->readers[i]), d->d_name);
- }
if (found)
continue;
@@ -1790,3 +1802,8 @@ done:
reftable_addition_destroy(add);
return err;
}
+
+enum reftable_hash reftable_stack_hash_id(struct reftable_stack *st)
+{
+ return reftable_merged_table_hash_id(st->merged);
+}
diff --git a/reftable/system.c b/reftable/system.c
new file mode 100644
index 0000000000..adf8e4d30b
--- /dev/null
+++ b/reftable/system.c
@@ -0,0 +1,126 @@
+#include "system.h"
+#include "basics.h"
+#include "reftable-error.h"
+#include "../lockfile.h"
+#include "../tempfile.h"
+
+int tmpfile_from_pattern(struct reftable_tmpfile *out, const char *pattern)
+{
+ struct tempfile *tempfile;
+
+ tempfile = mks_tempfile(pattern);
+ if (!tempfile)
+ return REFTABLE_IO_ERROR;
+
+ out->path = tempfile->filename.buf;
+ out->fd = tempfile->fd;
+ out->priv = tempfile;
+
+ return 0;
+}
+
+int tmpfile_close(struct reftable_tmpfile *t)
+{
+ struct tempfile *tempfile = t->priv;
+ int ret = close_tempfile_gently(tempfile);
+ t->fd = -1;
+ if (ret < 0)
+ return REFTABLE_IO_ERROR;
+ return 0;
+}
+
+int tmpfile_delete(struct reftable_tmpfile *t)
+{
+ struct tempfile *tempfile = t->priv;
+ int ret = delete_tempfile(&tempfile);
+ *t = REFTABLE_TMPFILE_INIT;
+ if (ret < 0)
+ return REFTABLE_IO_ERROR;
+ return 0;
+}
+
+int tmpfile_rename(struct reftable_tmpfile *t, const char *path)
+{
+ struct tempfile *tempfile = t->priv;
+ int ret = rename_tempfile(&tempfile, path);
+ *t = REFTABLE_TMPFILE_INIT;
+ if (ret < 0)
+ return REFTABLE_IO_ERROR;
+ return 0;
+}
+
+int flock_acquire(struct reftable_flock *l, const char *target_path,
+ long timeout_ms)
+{
+ struct lock_file *lockfile;
+ int err;
+
+ lockfile = reftable_malloc(sizeof(*lockfile));
+ if (!lockfile)
+ return REFTABLE_OUT_OF_MEMORY_ERROR;
+
+ err = hold_lock_file_for_update_timeout(lockfile, target_path, LOCK_NO_DEREF,
+ timeout_ms);
+ if (err < 0) {
+ reftable_free(lockfile);
+ if (errno == EEXIST)
+ return REFTABLE_LOCK_ERROR;
+ return -1;
+ }
+
+ l->fd = get_lock_file_fd(lockfile);
+ l->path = get_lock_file_path(lockfile);
+ l->priv = lockfile;
+
+ return 0;
+}
+
+int flock_close(struct reftable_flock *l)
+{
+ struct lock_file *lockfile = l->priv;
+ int ret;
+
+ if (!lockfile)
+ return REFTABLE_API_ERROR;
+
+ ret = close_lock_file_gently(lockfile);
+ l->fd = -1;
+ if (ret < 0)
+ return REFTABLE_IO_ERROR;
+
+ return 0;
+}
+
+int flock_release(struct reftable_flock *l)
+{
+ struct lock_file *lockfile = l->priv;
+ int ret;
+
+ if (!lockfile)
+ return 0;
+
+ ret = rollback_lock_file(lockfile);
+ reftable_free(lockfile);
+ *l = REFTABLE_FLOCK_INIT;
+ if (ret < 0)
+ return REFTABLE_IO_ERROR;
+
+ return 0;
+}
+
+int flock_commit(struct reftable_flock *l)
+{
+ struct lock_file *lockfile = l->priv;
+ int ret;
+
+ if (!lockfile)
+ return REFTABLE_API_ERROR;
+
+ ret = commit_lock_file(lockfile);
+ reftable_free(lockfile);
+ *l = REFTABLE_FLOCK_INIT;
+ if (ret < 0)
+ return REFTABLE_IO_ERROR;
+
+ return 0;
+}
diff --git a/reftable/system.h b/reftable/system.h
index 5ec8583343..7d5f803eeb 100644
--- a/reftable/system.h
+++ b/reftable/system.h
@@ -12,11 +12,90 @@ https://developers.google.com/open-source/licenses/bsd
/* This header glues the reftable library to the rest of Git */
#include "git-compat-util.h"
-#include "lockfile.h"
-#include "tempfile.h"
-#include "hash.h" /* hash ID, sizes.*/
-#include "dir.h" /* remove_dir_recursively, for tests.*/
-int hash_size(uint32_t id);
+/*
+ * An implementation-specific temporary file. By making this specific to the
+ * implementation it becomes possible to tie temporary files into any kind of
+ * signal or atexit handlers for cleanup on abnormal situations.
+ */
+struct reftable_tmpfile {
+ const char *path;
+ int fd;
+ void *priv;
+};
+#define REFTABLE_TMPFILE_INIT ((struct reftable_tmpfile) { .fd = -1, })
+
+/*
+ * Create a temporary file from a pattern similar to how mkstemp(3p) would.
+ * The `pattern` shall not be modified. On success, the structure at `out` has
+ * been initialized such that it is ready for use. Returns 0 on success, a
+ * reftable error code on error.
+ */
+int tmpfile_from_pattern(struct reftable_tmpfile *out, const char *pattern);
+
+/*
+ * Close the temporary file's file descriptor without removing the file itself.
+ * This is a no-op in case the file has already been closed beforehand. Returns
+ * 0 on success, a reftable error code on error.
+ */
+int tmpfile_close(struct reftable_tmpfile *t);
+
+/*
+ * Close the temporary file and delete it. This is a no-op in case the file has
+ * already been deleted or renamed beforehand. Returns 0 on success, a reftable
+ * error code on error.
+ */
+int tmpfile_delete(struct reftable_tmpfile *t);
+
+/*
+ * Rename the temporary file to the provided path. The temporary file must be
+ * active. Return 0 on success, a reftable error code on error. Deactivates the
+ * temporary file.
+ */
+int tmpfile_rename(struct reftable_tmpfile *t, const char *path);
+
+/*
+ * An implementation-specific file lock. Same as with `reftable_tmpfile`,
+ * making this specific to the implementation makes it possible to tie this
+ * into signal or atexit handlers such that we know to clean up stale locks on
+ * abnormal exits.
+ */
+struct reftable_flock {
+ const char *path;
+ int fd;
+ void *priv;
+};
+#define REFTABLE_FLOCK_INIT ((struct reftable_flock){ .fd = -1, })
+
+/*
+ * Acquire the lock for the given target path by exclusively creating a file
+ * with ".lock" appended to it. If that lock exists, we wait up to `timeout_ms`
+ * to acquire the lock. If `timeout_ms` is 0 we don't wait, if it is negative
+ * we block indefinitely.
+ *
+ * Retrun 0 on success, a reftable error code on error.
+ */
+int flock_acquire(struct reftable_flock *l, const char *target_path,
+ long timeout_ms);
+
+/*
+ * Close the lockfile's file descriptor without removing the lock itself. This
+ * is a no-op in case the lockfile has already been closed beforehand. Returns
+ * 0 on success, a reftable error code on error.
+ */
+int flock_close(struct reftable_flock *l);
+
+/*
+ * Release the lock by unlinking the lockfile. This is a no-op in case the
+ * lockfile has already been released or committed beforehand. Returns 0 on
+ * success, a reftable error code on error.
+ */
+int flock_release(struct reftable_flock *l);
+
+/*
+ * Commit the lock by renaming the lockfile into place. Returns 0 on success, a
+ * reftable error code on error.
+ */
+int flock_commit(struct reftable_flock *l);
#endif
diff --git a/reftable/writer.c b/reftable/writer.c
index be0fae6cb2..91d6629486 100644
--- a/reftable/writer.c
+++ b/reftable/writer.c
@@ -79,7 +79,7 @@ static void options_set_defaults(struct reftable_write_options *opts)
}
if (opts->hash_id == 0) {
- opts->hash_id = GIT_SHA1_FORMAT_ID;
+ opts->hash_id = REFTABLE_HASH_SHA1;
}
if (opts->block_size == 0) {
opts->block_size = DEFAULT_BLOCK_SIZE;
@@ -88,7 +88,7 @@ static void options_set_defaults(struct reftable_write_options *opts)
static int writer_version(struct reftable_writer *w)
{
- return (w->opts.hash_id == 0 || w->opts.hash_id == GIT_SHA1_FORMAT_ID) ?
+ return (w->opts.hash_id == 0 || w->opts.hash_id == REFTABLE_HASH_SHA1) ?
1 :
2;
}
@@ -103,8 +103,22 @@ static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
put_be64(dest + 8, w->min_update_index);
put_be64(dest + 16, w->max_update_index);
if (writer_version(w) == 2) {
- put_be32(dest + 24, w->opts.hash_id);
+ uint32_t hash_id;
+
+ switch (w->opts.hash_id) {
+ case REFTABLE_HASH_SHA1:
+ hash_id = REFTABLE_FORMAT_ID_SHA1;
+ break;
+ case REFTABLE_HASH_SHA256:
+ hash_id = REFTABLE_FORMAT_ID_SHA256;
+ break;
+ default:
+ return -1;
+ }
+
+ put_be32(dest + 24, hash_id);
}
+
return header_size(writer_version(w));
}
@@ -240,7 +254,8 @@ static int writer_index_hash(struct reftable_writer *w, struct reftable_buf *has
if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off)
return 0;
- REFTABLE_ALLOC_GROW(key->offsets, key->offset_len + 1, key->offset_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(key->offsets, key->offset_len + 1,
+ key->offset_cap);
if (!key->offsets)
return REFTABLE_OUT_OF_MEMORY_ERROR;
key->offsets[key->offset_len++] = off;
@@ -411,6 +426,18 @@ int reftable_writer_add_log(struct reftable_writer *w,
if (log->value_type == REFTABLE_LOG_DELETION)
return reftable_writer_add_log_verbatim(w, log);
+ /*
+ * Verify only the upper limit of the update_index. Each reflog entry
+ * is tied to a specific update_index. Entries in the reflog can be
+ * replaced by adding a new entry with the same update_index,
+ * effectively canceling the old one.
+ *
+ * Consequently, reflog updates may include update_index values lower
+ * than the writer's min_update_index.
+ */
+ if (log->update_index > w->max_update_index)
+ return REFTABLE_API_ERROR;
+
if (!log->refname)
return REFTABLE_API_ERROR;
@@ -550,7 +577,7 @@ static int writer_finish_section(struct reftable_writer *w)
struct common_prefix_arg {
struct reftable_buf *last;
- int max;
+ size_t max;
};
static void update_common(void *void_arg, void *key)
@@ -558,10 +585,9 @@ static void update_common(void *void_arg, void *key)
struct common_prefix_arg *arg = void_arg;
struct obj_index_tree_node *entry = key;
if (arg->last) {
- int n = common_prefix_size(&entry->hash, arg->last);
- if (n > arg->max) {
+ size_t n = common_prefix_size(&entry->hash, arg->last);
+ if (n > arg->max)
arg->max = n;
- }
}
arg->last = &entry->hash;
}
@@ -794,7 +820,7 @@ static int writer_flush_nonempty_block(struct reftable_writer *w)
* Note that this also applies when flushing index blocks, in which
* case we will end up with a multi-level index.
*/
- REFTABLE_ALLOC_GROW(w->index, w->index_len + 1, w->index_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(w->index, w->index_len + 1, w->index_cap);
if (!w->index)
return REFTABLE_OUT_OF_MEMORY_ERROR;