summaryrefslogtreecommitdiff
path: root/reftable/stack_test.c
diff options
context:
space:
mode:
Diffstat (limited to 'reftable/stack_test.c')
-rw-r--r--reftable/stack_test.c285
1 files changed, 232 insertions, 53 deletions
diff --git a/reftable/stack_test.c b/reftable/stack_test.c
index e3c11e6a6e..89cb2be19f 100644
--- a/reftable/stack_test.c
+++ b/reftable/stack_test.c
@@ -10,6 +10,7 @@ https://developers.google.com/open-source/licenses/bsd
#include "system.h"
+#include "copy.h"
#include "reftable-reader.h"
#include "merged.h"
#include "basics.h"
@@ -109,6 +110,35 @@ static int write_test_ref(struct reftable_writer *wr, void *arg)
return reftable_writer_add_ref(wr, ref);
}
+static void write_n_ref_tables(struct reftable_stack *st,
+ size_t n)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int disable_auto_compact;
+ int err;
+
+ disable_auto_compact = st->opts.disable_auto_compact;
+ st->opts.disable_auto_compact = 1;
+
+ for (size_t i = 0; i < n; i++) {
+ struct reftable_ref_record ref = {
+ .update_index = reftable_stack_next_update_index(st),
+ .value_type = REFTABLE_REF_VAL1,
+ };
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "refs/heads/branch-%04u", (unsigned) i);
+ ref.refname = buf.buf;
+ set_test_hash(ref.value.val1, i);
+
+ err = reftable_stack_add(st, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+ }
+
+ st->opts.disable_auto_compact = disable_auto_compact;
+ strbuf_release(&buf);
+}
+
struct write_log_arg {
struct reftable_log_record *log;
uint64_t update_index;
@@ -151,13 +181,6 @@ static void test_reftable_stack_add_one(void)
EXPECT(0 == strcmp("master", dest.value.symref));
EXPECT(st->readers_len > 0);
- printf("testing print functionality:\n");
- err = reftable_stack_print_directory(dir, GIT_SHA1_FORMAT_ID);
- EXPECT_ERR(err);
-
- err = reftable_stack_print_directory(dir, GIT_SHA256_FORMAT_ID);
- EXPECT(err == REFTABLE_FORMAT_ERROR);
-
#ifndef GIT_WINDOWS_NATIVE
strbuf_addstr(&scratch, dir);
strbuf_addstr(&scratch, "/tables.list");
@@ -319,9 +342,9 @@ static void test_reftable_stack_transaction_api_performs_auto_compaction(void)
* all tables in the stack.
*/
if (i != n)
- EXPECT(st->merged->stack_len == i + 1);
+ EXPECT(st->merged->readers_len == i + 1);
else
- EXPECT(st->merged->stack_len == 1);
+ EXPECT(st->merged->readers_len == 1);
}
reftable_stack_destroy(st);
@@ -347,7 +370,7 @@ static void test_reftable_stack_auto_compaction_fails_gracefully(void)
err = reftable_stack_add(st, write_test_ref, &ref);
EXPECT_ERR(err);
- EXPECT(st->merged->stack_len == 1);
+ EXPECT(st->merged->readers_len == 1);
EXPECT(st->stats.attempts == 0);
EXPECT(st->stats.failures == 0);
@@ -362,7 +385,7 @@ static void test_reftable_stack_auto_compaction_fails_gracefully(void)
ref.update_index = 2;
err = reftable_stack_add(st, write_test_ref, &ref);
EXPECT_ERR(err);
- EXPECT(st->merged->stack_len == 2);
+ EXPECT(st->merged->readers_len == 2);
EXPECT(st->stats.attempts == 1);
EXPECT(st->stats.failures == 1);
@@ -371,7 +394,7 @@ static void test_reftable_stack_auto_compaction_fails_gracefully(void)
clear_dir(dir);
}
-static int write_error(struct reftable_writer *wr, void *arg)
+static int write_error(struct reftable_writer *wr UNUSED, void *arg)
{
return *((int *)arg);
}
@@ -788,7 +811,7 @@ static void test_reflog_expire(void)
reftable_log_record_release(&log);
}
-static int write_nothing(struct reftable_writer *wr, void *arg)
+static int write_nothing(struct reftable_writer *wr, void *arg UNUSED)
{
reftable_writer_set_limits(wr, 1, 1);
return 0;
@@ -853,7 +876,7 @@ static void test_reftable_stack_auto_compaction(void)
err = reftable_stack_auto_compact(st);
EXPECT_ERR(err);
- EXPECT(i < 3 || st->merged->stack_len < 2 * fastlog2(i));
+ EXPECT(i < 3 || st->merged->readers_len < 2 * fastlog2(i));
}
EXPECT(reftable_stack_compaction_stats(st)->entries_written <
@@ -863,6 +886,47 @@ static void test_reftable_stack_auto_compaction(void)
clear_dir(dir);
}
+static void test_reftable_stack_auto_compaction_with_locked_tables(void)
+{
+ struct reftable_write_options opts = {
+ .disable_auto_compact = 1,
+ };
+ struct reftable_stack *st = NULL;
+ struct strbuf buf = STRBUF_INIT;
+ char *dir = get_tmp_dir(__LINE__);
+ int err;
+
+ err = reftable_new_stack(&st, dir, &opts);
+ EXPECT_ERR(err);
+
+ write_n_ref_tables(st, 5);
+ EXPECT(st->merged->readers_len == 5);
+
+ /*
+ * Given that all tables we have written should be roughly the same
+ * size, we expect that auto-compaction will want to compact all of the
+ * tables. Locking any of the tables will keep it from doing so.
+ */
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[2]->name);
+ write_file_buf(buf.buf, "", 0);
+
+ /*
+ * When parts of the stack are locked, then auto-compaction does a best
+ * effort compaction of those tables which aren't locked. So while this
+ * would in theory compact all tables, due to the preexisting lock we
+ * only compact the newest two tables.
+ */
+ err = reftable_stack_auto_compact(st);
+ EXPECT_ERR(err);
+ EXPECT(st->stats.failures == 0);
+ EXPECT(st->merged->readers_len == 4);
+
+ reftable_stack_destroy(st);
+ strbuf_release(&buf);
+ clear_dir(dir);
+}
+
static void test_reftable_stack_add_performs_auto_compaction(void)
{
struct reftable_write_options opts = { 0 };
@@ -901,9 +965,9 @@ static void test_reftable_stack_add_performs_auto_compaction(void)
* all tables in the stack.
*/
if (i != n)
- EXPECT(st->merged->stack_len == i + 1);
+ EXPECT(st->merged->readers_len == i + 1);
else
- EXPECT(st->merged->stack_len == 1);
+ EXPECT(st->merged->readers_len == 1);
}
reftable_stack_destroy(st);
@@ -911,30 +975,51 @@ static void test_reftable_stack_add_performs_auto_compaction(void)
clear_dir(dir);
}
+static void test_reftable_stack_compaction_with_locked_tables(void)
+{
+ struct reftable_write_options opts = {
+ .disable_auto_compact = 1,
+ };
+ struct reftable_stack *st = NULL;
+ struct strbuf buf = STRBUF_INIT;
+ char *dir = get_tmp_dir(__LINE__);
+ int err;
+
+ err = reftable_new_stack(&st, dir, &opts);
+ EXPECT_ERR(err);
+
+ write_n_ref_tables(st, 3);
+ EXPECT(st->merged->readers_len == 3);
+
+ /* Lock one of the tables that we're about to compact. */
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[1]->name);
+ write_file_buf(buf.buf, "", 0);
+
+ /*
+ * Compaction is expected to fail given that we were not able to
+ * compact all tables.
+ */
+ err = reftable_stack_compact_all(st, NULL);
+ EXPECT(err == REFTABLE_LOCK_ERROR);
+ EXPECT(st->stats.failures == 1);
+ EXPECT(st->merged->readers_len == 3);
+
+ reftable_stack_destroy(st);
+ strbuf_release(&buf);
+ clear_dir(dir);
+}
+
static void test_reftable_stack_compaction_concurrent(void)
{
struct reftable_write_options opts = { 0 };
struct reftable_stack *st1 = NULL, *st2 = NULL;
char *dir = get_tmp_dir(__LINE__);
- int err, i;
- int N = 3;
+ int err;
err = reftable_new_stack(&st1, dir, &opts);
EXPECT_ERR(err);
-
- for (i = 0; i < N; i++) {
- char name[100];
- struct reftable_ref_record ref = {
- .refname = name,
- .update_index = reftable_stack_next_update_index(st1),
- .value_type = REFTABLE_REF_SYMREF,
- .value.symref = (char *) "master",
- };
- snprintf(name, sizeof(name), "branch%04d", i);
-
- err = reftable_stack_add(st1, &write_test_ref, &ref);
- EXPECT_ERR(err);
- }
+ write_n_ref_tables(st1, 3);
err = reftable_new_stack(&st2, dir, &opts);
EXPECT_ERR(err);
@@ -952,10 +1037,8 @@ static void test_reftable_stack_compaction_concurrent(void)
static void unclean_stack_close(struct reftable_stack *st)
{
/* break abstraction boundary to simulate unclean shutdown. */
- int i = 0;
- for (; i < st->readers_len; i++) {
- reftable_reader_free(st->readers[i]);
- }
+ for (size_t i = 0; i < st->readers_len; i++)
+ reftable_reader_decref(st->readers[i]);
st->readers_len = 0;
FREE_AND_NULL(st->readers);
}
@@ -965,25 +1048,11 @@ static void test_reftable_stack_compaction_concurrent_clean(void)
struct reftable_write_options opts = { 0 };
struct reftable_stack *st1 = NULL, *st2 = NULL, *st3 = NULL;
char *dir = get_tmp_dir(__LINE__);
- int err, i;
- int N = 3;
+ int err;
err = reftable_new_stack(&st1, dir, &opts);
EXPECT_ERR(err);
-
- for (i = 0; i < N; i++) {
- char name[100];
- struct reftable_ref_record ref = {
- .refname = name,
- .update_index = reftable_stack_next_update_index(st1),
- .value_type = REFTABLE_REF_SYMREF,
- .value.symref = (char *) "master",
- };
- snprintf(name, sizeof(name), "branch%04d", i);
-
- err = reftable_stack_add(st1, &write_test_ref, &ref);
- EXPECT_ERR(err);
- }
+ write_n_ref_tables(st1, 3);
err = reftable_new_stack(&st2, dir, &opts);
EXPECT_ERR(err);
@@ -1008,7 +1077,113 @@ static void test_reftable_stack_compaction_concurrent_clean(void)
clear_dir(dir);
}
-int stack_test_main(int argc, const char *argv[])
+static void test_reftable_stack_read_across_reload(void)
+{
+ struct reftable_write_options opts = { 0 };
+ struct reftable_stack *st1 = NULL, *st2 = NULL;
+ struct reftable_ref_record rec = { 0 };
+ struct reftable_iterator it = { 0 };
+ char *dir = get_tmp_dir(__LINE__);
+ int err;
+
+ /* Create a first stack and set up an iterator for it. */
+ err = reftable_new_stack(&st1, dir, &opts);
+ EXPECT_ERR(err);
+ write_n_ref_tables(st1, 2);
+ EXPECT(st1->merged->readers_len == 2);
+ reftable_stack_init_ref_iterator(st1, &it);
+ err = reftable_iterator_seek_ref(&it, "");
+ EXPECT_ERR(err);
+
+ /* Set up a second stack for the same directory and compact it. */
+ err = reftable_new_stack(&st2, dir, &opts);
+ EXPECT_ERR(err);
+ EXPECT(st2->merged->readers_len == 2);
+ err = reftable_stack_compact_all(st2, NULL);
+ EXPECT_ERR(err);
+ EXPECT(st2->merged->readers_len == 1);
+
+ /*
+ * Verify that we can continue to use the old iterator even after we
+ * have reloaded its stack.
+ */
+ err = reftable_stack_reload(st1);
+ EXPECT_ERR(err);
+ EXPECT(st1->merged->readers_len == 1);
+ err = reftable_iterator_next_ref(&it, &rec);
+ EXPECT_ERR(err);
+ EXPECT(!strcmp(rec.refname, "refs/heads/branch-0000"));
+ err = reftable_iterator_next_ref(&it, &rec);
+ EXPECT_ERR(err);
+ EXPECT(!strcmp(rec.refname, "refs/heads/branch-0001"));
+ err = reftable_iterator_next_ref(&it, &rec);
+ EXPECT(err > 0);
+
+ reftable_ref_record_release(&rec);
+ reftable_iterator_destroy(&it);
+ reftable_stack_destroy(st1);
+ reftable_stack_destroy(st2);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_reload_with_missing_table(void)
+{
+ struct reftable_write_options opts = { 0 };
+ struct reftable_stack *st = NULL;
+ struct reftable_ref_record rec = { 0 };
+ struct reftable_iterator it = { 0 };
+ struct strbuf table_path = STRBUF_INIT, content = STRBUF_INIT;
+ char *dir = get_tmp_dir(__LINE__);
+ int err;
+
+ /* Create a first stack and set up an iterator for it. */
+ err = reftable_new_stack(&st, dir, &opts);
+ EXPECT_ERR(err);
+ write_n_ref_tables(st, 2);
+ EXPECT(st->merged->readers_len == 2);
+ reftable_stack_init_ref_iterator(st, &it);
+ err = reftable_iterator_seek_ref(&it, "");
+ EXPECT_ERR(err);
+
+ /*
+ * Update the tables.list file with some garbage data, while reusing
+ * our old readers. This should trigger a partial reload of the stack,
+ * where we try to reuse our old readers.
+ */
+ strbuf_addf(&content, "%s\n", st->readers[0]->name);
+ strbuf_addf(&content, "%s\n", st->readers[1]->name);
+ strbuf_addstr(&content, "garbage\n");
+ strbuf_addf(&table_path, "%s.lock", st->list_file);
+ write_file_buf(table_path.buf, content.buf, content.len);
+ err = rename(table_path.buf, st->list_file);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_reload(st);
+ EXPECT(err == -4);
+ EXPECT(st->merged->readers_len == 2);
+
+ /*
+ * Even though the reload has failed, we should be able to continue
+ * using the iterator.
+ */
+ err = reftable_iterator_next_ref(&it, &rec);
+ EXPECT_ERR(err);
+ EXPECT(!strcmp(rec.refname, "refs/heads/branch-0000"));
+ err = reftable_iterator_next_ref(&it, &rec);
+ EXPECT_ERR(err);
+ EXPECT(!strcmp(rec.refname, "refs/heads/branch-0001"));
+ err = reftable_iterator_next_ref(&it, &rec);
+ EXPECT(err > 0);
+
+ reftable_ref_record_release(&rec);
+ reftable_iterator_destroy(&it);
+ reftable_stack_destroy(st);
+ strbuf_release(&table_path);
+ strbuf_release(&content);
+ clear_dir(dir);
+}
+
+int stack_test_main(int argc UNUSED, const char *argv[] UNUSED)
{
RUN_TEST(test_empty_add);
RUN_TEST(test_read_file);
@@ -1016,9 +1191,11 @@ int stack_test_main(int argc, const char *argv[])
RUN_TEST(test_reftable_stack_add);
RUN_TEST(test_reftable_stack_add_one);
RUN_TEST(test_reftable_stack_auto_compaction);
+ RUN_TEST(test_reftable_stack_auto_compaction_with_locked_tables);
RUN_TEST(test_reftable_stack_add_performs_auto_compaction);
RUN_TEST(test_reftable_stack_compaction_concurrent);
RUN_TEST(test_reftable_stack_compaction_concurrent_clean);
+ RUN_TEST(test_reftable_stack_compaction_with_locked_tables);
RUN_TEST(test_reftable_stack_hash_id);
RUN_TEST(test_reftable_stack_lock_failure);
RUN_TEST(test_reftable_stack_log_normalize);
@@ -1028,6 +1205,8 @@ int stack_test_main(int argc, const char *argv[])
RUN_TEST(test_reftable_stack_auto_compaction_fails_gracefully);
RUN_TEST(test_reftable_stack_update_index_check);
RUN_TEST(test_reftable_stack_uptodate);
+ RUN_TEST(test_reftable_stack_read_across_reload);
+ RUN_TEST(test_reftable_stack_reload_with_missing_table);
RUN_TEST(test_suggest_compaction_segment);
RUN_TEST(test_suggest_compaction_segment_nothing);
return 0;