summaryrefslogtreecommitdiff
path: root/builtin
diff options
context:
space:
mode:
Diffstat (limited to 'builtin')
-rw-r--r--builtin/bisect.c6
-rw-r--r--builtin/cat-file.c3
-rw-r--r--builtin/commit-graph.c2
-rw-r--r--builtin/count-objects.c3
-rw-r--r--builtin/fast-export.c7
-rw-r--r--builtin/fast-import.c47
-rw-r--r--builtin/fsck.c15
-rw-r--r--builtin/gc.c16
-rw-r--r--builtin/grep.c2
-rw-r--r--builtin/pack-objects.c26
-rw-r--r--builtin/pack-redundant.c14
-rw-r--r--builtin/repack.c1360
-rw-r--r--builtin/sparse-checkout.c216
13 files changed, 338 insertions, 1379 deletions
diff --git a/builtin/bisect.c b/builtin/bisect.c
index 8b8d870cd1..993caf545d 100644
--- a/builtin/bisect.c
+++ b/builtin/bisect.c
@@ -1453,9 +1453,13 @@ int cmd_bisect(int argc,
if (!argc)
usage_msg_opt(_("need a command"), git_bisect_usage, options);
+ if (!strcmp(argv[0], "help"))
+ usage_with_options(git_bisect_usage, options);
+
set_terms(&terms, "bad", "good");
get_terms(&terms);
- if (check_and_set_terms(&terms, argv[0]))
+ if (check_and_set_terms(&terms, argv[0]) ||
+ !one_of(argv[0], terms.term_good, terms.term_bad, NULL))
usage_msg_optf(_("unknown command: '%s'"), git_bisect_usage,
options, argv[0]);
res = bisect_state(&terms, argc, argv);
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 5ca2ca3852..983ecec837 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -852,10 +852,9 @@ static void batch_each_object(struct batch_options *opt,
if (bitmap && !for_each_bitmapped_object(bitmap, &opt->objects_filter,
batch_one_object_bitmapped, &payload)) {
- struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *pack;
- for (pack = packfile_store_get_all_packs(packs); pack; pack = pack->next) {
+ repo_for_each_pack(the_repository, pack) {
if (bitmap_index_contains_pack(bitmap, pack) ||
open_pack_index(pack))
continue;
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index fe3ebaadad..d62005edc0 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -210,6 +210,8 @@ static int git_commit_graph_write_config(const char *var, const char *value,
{
if (!strcmp(var, "commitgraph.maxnewfilters"))
write_opts.max_new_filters = git_config_int(var, value, ctx->kvi);
+ else if (!strcmp(var, "commitgraph.changedpaths"))
+ opts.enable_changed_paths = git_config_bool(var, value) ? 1 : -1;
/*
* No need to fall-back to 'git_default_config', since this was already
* called in 'cmd_commit_graph()'.
diff --git a/builtin/count-objects.c b/builtin/count-objects.c
index f2f407c2a7..18f6e33b6f 100644
--- a/builtin/count-objects.c
+++ b/builtin/count-objects.c
@@ -122,7 +122,6 @@ int cmd_count_objects(int argc,
count_loose, count_cruft, NULL, NULL);
if (verbose) {
- struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
unsigned long num_pack = 0;
off_t size_pack = 0;
@@ -130,7 +129,7 @@ int cmd_count_objects(int argc,
struct strbuf pack_buf = STRBUF_INIT;
struct strbuf garbage_buf = STRBUF_INIT;
- for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
+ repo_for_each_pack(the_repository, p) {
if (!p->pack_local)
continue;
if (open_pack_index(p))
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index dc2486f9a8..7adbc55f0d 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -931,9 +931,8 @@ static void handle_tag(const char *name, struct tag *tag)
/* handle signed tags */
if (message) {
- const char *signature = strstr(message,
- "\n-----BEGIN PGP SIGNATURE-----\n");
- if (signature)
+ size_t sig_offset = parse_signed_buffer(message, message_size);
+ if (sig_offset < message_size)
switch (signed_tag_mode) {
case SIGN_ABORT:
die("encountered signed tag %s; use "
@@ -950,7 +949,7 @@ static void handle_tag(const char *name, struct tag *tag)
oid_to_hex(&tag->object.oid));
/* fallthru */
case SIGN_STRIP:
- message_size = signature + 1 - message;
+ message_size = sig_offset;
break;
}
}
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index 606c6aea82..54d3e592c6 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -188,6 +188,7 @@ static int global_argc;
static const char **global_argv;
static const char *global_prefix;
+static enum sign_mode signed_tag_mode = SIGN_VERBATIM;
static enum sign_mode signed_commit_mode = SIGN_VERBATIM;
/* Memory pools */
@@ -978,7 +979,7 @@ static int store_object(
if (e->idx.offset) {
duplicate_count_by_type[type]++;
return 1;
- } else if (find_oid_pack(&oid, packfile_store_get_all_packs(packs))) {
+ } else if (find_oid_pack(&oid, packfile_store_get_packs(packs))) {
e->type = type;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -1179,7 +1180,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
duplicate_count_by_type[OBJ_BLOB]++;
truncate_pack(&checkpoint);
- } else if (find_oid_pack(&oid, packfile_store_get_all_packs(packs))) {
+ } else if (find_oid_pack(&oid, packfile_store_get_packs(packs))) {
e->type = OBJ_BLOB;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -2963,6 +2964,43 @@ static void parse_new_commit(const char *arg)
b->last_commit = object_count_by_type[OBJ_COMMIT];
}
+static void handle_tag_signature(struct strbuf *msg, const char *name)
+{
+ size_t sig_offset = parse_signed_buffer(msg->buf, msg->len);
+
+ /* If there is no signature, there is nothing to do. */
+ if (sig_offset >= msg->len)
+ return;
+
+ switch (signed_tag_mode) {
+
+ /* First, modes that don't change anything */
+ case SIGN_ABORT:
+ die(_("encountered signed tag; use "
+ "--signed-tags=<mode> to handle it"));
+ case SIGN_WARN_VERBATIM:
+ warning(_("importing a tag signature verbatim for tag '%s'"), name);
+ /* fallthru */
+ case SIGN_VERBATIM:
+ /* Nothing to do, the signature will be put into the imported tag. */
+ break;
+
+ /* Second, modes that remove the signature */
+ case SIGN_WARN_STRIP:
+ warning(_("stripping a tag signature for tag '%s'"), name);
+ /* fallthru */
+ case SIGN_STRIP:
+ /* Truncate the buffer to remove the signature */
+ strbuf_setlen(msg, sig_offset);
+ break;
+
+ /* Third, BUG */
+ default:
+ BUG("invalid signed_tag_mode value %d from tag '%s'",
+ signed_tag_mode, name);
+ }
+}
+
static void parse_new_tag(const char *arg)
{
static struct strbuf msg = STRBUF_INIT;
@@ -3026,6 +3064,8 @@ static void parse_new_tag(const char *arg)
/* tag payload/message */
parse_data(&msg, 0, NULL);
+ handle_tag_signature(&msg, t->name);
+
/* build the tag object */
strbuf_reset(&new_data);
@@ -3546,6 +3586,9 @@ static int parse_one_option(const char *option)
} else if (skip_prefix(option, "signed-commits=", &option)) {
if (parse_sign_mode(option, &signed_commit_mode))
usagef(_("unknown --signed-commits mode '%s'"), option);
+ } else if (skip_prefix(option, "signed-tags=", &option)) {
+ if (parse_sign_mode(option, &signed_tag_mode))
+ usagef(_("unknown --signed-tags mode '%s'"), option);
} else if (!strcmp(option, "quiet")) {
show_stats = 0;
quiet = 1;
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 8ee95e0d67..b1a650c673 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -867,20 +867,20 @@ static int mark_packed_for_connectivity(const struct object_id *oid,
static int check_pack_rev_indexes(struct repository *r, int show_progress)
{
- struct packfile_store *packs = r->objects->packfiles;
struct progress *progress = NULL;
+ struct packed_git *p;
uint32_t pack_count = 0;
int res = 0;
if (show_progress) {
- for (struct packed_git *p = packfile_store_get_all_packs(packs); p; p = p->next)
+ repo_for_each_pack(r, p)
pack_count++;
progress = start_delayed_progress(the_repository,
"Verifying reverse pack-indexes", pack_count);
pack_count = 0;
}
- for (struct packed_git *p = packfile_store_get_all_packs(packs); p; p = p->next) {
+ repo_for_each_pack(r, p) {
int load_error = load_pack_revindex_from_disk(p);
if (load_error < 0) {
@@ -1000,8 +1000,6 @@ int cmd_fsck(int argc,
for_each_packed_object(the_repository,
mark_packed_for_connectivity, NULL, 0);
} else {
- struct packfile_store *packs = the_repository->objects->packfiles;
-
odb_prepare_alternates(the_repository->objects);
for (source = the_repository->objects->sources; source; source = source->next)
fsck_source(source);
@@ -1012,8 +1010,7 @@ int cmd_fsck(int argc,
struct progress *progress = NULL;
if (show_progress) {
- for (p = packfile_store_get_all_packs(packs); p;
- p = p->next) {
+ repo_for_each_pack(the_repository, p) {
if (open_pack_index(p))
continue;
total += p->num_objects;
@@ -1022,8 +1019,8 @@ int cmd_fsck(int argc,
progress = start_progress(the_repository,
_("Checking objects"), total);
}
- for (p = packfile_store_get_all_packs(packs); p;
- p = p->next) {
+
+ repo_for_each_pack(the_repository, p) {
/* verify gives error messages itself */
if (verify_pack(the_repository,
p, fsck_obj_buffer,
diff --git a/builtin/gc.c b/builtin/gc.c
index e19e13d978..541d7471f1 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -487,10 +487,9 @@ static int too_many_loose_objects(struct gc_config *cfg)
static struct packed_git *find_base_packs(struct string_list *packs,
unsigned long limit)
{
- struct packfile_store *packfiles = the_repository->objects->packfiles;
struct packed_git *p, *base = NULL;
- for (p = packfile_store_get_all_packs(packfiles); p; p = p->next) {
+ repo_for_each_pack(the_repository, p) {
if (!p->pack_local || p->is_cruft)
continue;
if (limit) {
@@ -509,14 +508,13 @@ static struct packed_git *find_base_packs(struct string_list *packs,
static int too_many_packs(struct gc_config *cfg)
{
- struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
- int cnt;
+ int cnt = 0;
if (cfg->gc_auto_pack_limit <= 0)
return 0;
- for (cnt = 0, p = packfile_store_get_all_packs(packs); p; p = p->next) {
+ repo_for_each_pack(the_repository, p) {
if (!p->pack_local)
continue;
if (p->pack_keep)
@@ -1425,9 +1423,9 @@ static int incremental_repack_auto_condition(struct gc_config *cfg UNUSED)
if (incremental_repack_auto_limit < 0)
return 1;
- for (p = packfile_store_get_packs(the_repository->objects->packfiles);
- count < incremental_repack_auto_limit && p;
- p = p->next) {
+ repo_for_each_pack(the_repository, p) {
+ if (count >= incremental_repack_auto_limit)
+ break;
if (!p->multi_pack_index)
count++;
}
@@ -1494,7 +1492,7 @@ static off_t get_auto_pack_size(void)
struct repository *r = the_repository;
odb_reprepare(r->objects);
- for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
+ repo_for_each_pack(r, p) {
if (p->pack_size > max_size) {
second_largest_size = max_size;
max_size = p->pack_size;
diff --git a/builtin/grep.c b/builtin/grep.c
index 13841fbf00..53cccf2d25 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -1214,7 +1214,7 @@ int cmd_grep(int argc,
if (recurse_submodules)
repo_read_gitmodules(the_repository, 1);
if (startup_info->have_repository)
- (void)packfile_store_get_packs(the_repository->objects->packfiles);
+ packfile_store_prepare(the_repository->objects->packfiles);
start_threads(&opt);
} else {
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 5bdc44fb2d..b5454e5df1 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -3831,12 +3831,10 @@ static int pack_mtime_cmp(const void *_a, const void *_b)
static void read_packs_list_from_stdin(struct rev_info *revs)
{
- struct packfile_store *packs = the_repository->objects->packfiles;
struct strbuf buf = STRBUF_INIT;
struct string_list include_packs = STRING_LIST_INIT_DUP;
struct string_list exclude_packs = STRING_LIST_INIT_DUP;
struct string_list_item *item = NULL;
-
struct packed_git *p;
while (strbuf_getline(&buf, stdin) != EOF) {
@@ -3856,7 +3854,7 @@ static void read_packs_list_from_stdin(struct rev_info *revs)
string_list_sort(&exclude_packs);
string_list_remove_duplicates(&exclude_packs, 0);
- for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
+ repo_for_each_pack(the_repository, p) {
const char *pack_name = pack_basename(p);
if ((item = string_list_lookup(&include_packs, pack_name)))
@@ -4077,7 +4075,6 @@ static void enumerate_cruft_objects(void)
static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs)
{
- struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
struct rev_info revs;
int ret;
@@ -4107,7 +4104,7 @@ static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs
* Re-mark only the fresh packs as kept so that objects in
* unknown packs do not halt the reachability traversal early.
*/
- for (p = packfile_store_get_all_packs(packs); p; p = p->next)
+ repo_for_each_pack(the_repository, p)
p->pack_keep_in_core = 0;
mark_pack_kept_in_core(fresh_packs, 1);
@@ -4124,7 +4121,6 @@ static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs
static void read_cruft_objects(void)
{
- struct packfile_store *packs = the_repository->objects->packfiles;
struct strbuf buf = STRBUF_INIT;
struct string_list discard_packs = STRING_LIST_INIT_DUP;
struct string_list fresh_packs = STRING_LIST_INIT_DUP;
@@ -4145,7 +4141,7 @@ static void read_cruft_objects(void)
string_list_sort(&discard_packs);
string_list_sort(&fresh_packs);
- for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
+ repo_for_each_pack(the_repository, p) {
const char *pack_name = pack_basename(p);
struct string_list_item *item;
@@ -4398,7 +4394,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
struct packed_git *p;
p = (last_found != (void *)1) ? last_found :
- packfile_store_get_all_packs(packs);
+ packfile_store_get_packs(packs);
while (p) {
if ((!p->pack_local || p->pack_keep ||
@@ -4408,7 +4404,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
return 1;
}
if (p == last_found)
- p = packfile_store_get_all_packs(packs);
+ p = packfile_store_get_packs(packs);
else
p = p->next;
if (p == last_found)
@@ -4440,13 +4436,12 @@ static int loosened_object_can_be_discarded(const struct object_id *oid,
static void loosen_unused_packed_objects(void)
{
- struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
uint32_t i;
uint32_t loosened_objects_nr = 0;
struct object_id oid;
- for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
+ repo_for_each_pack(the_repository, p) {
if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
@@ -4747,13 +4742,12 @@ static void get_object_list(struct rev_info *revs, struct strvec *argv)
static void add_extra_kept_packs(const struct string_list *names)
{
- struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
if (!names->nr)
return;
- for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
+ repo_for_each_pack(the_repository, p) {
const char *name = basename(p->pack_name);
int i;
@@ -5191,10 +5185,9 @@ int cmd_pack_objects(int argc,
add_extra_kept_packs(&keep_pack_list);
if (ignore_packed_keep_on_disk) {
- struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
- for (p = packfile_store_get_all_packs(packs); p; p = p->next)
+ repo_for_each_pack(the_repository, p)
if (p->pack_local && p->pack_keep)
break;
if (!p) /* no keep-able packs found */
@@ -5206,10 +5199,9 @@ int cmd_pack_objects(int argc,
* want to unset "local" based on looking at packs, as
* it also covers non-local objects
*/
- struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
- for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
+ repo_for_each_pack(the_repository, p) {
if (!p->pack_local) {
have_non_local_packs = 1;
break;
diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c
index 80743d8806..e4ecf774ca 100644
--- a/builtin/pack-redundant.c
+++ b/builtin/pack-redundant.c
@@ -566,29 +566,23 @@ static struct pack_list * add_pack(struct packed_git *p)
static struct pack_list * add_pack_file(const char *filename)
{
- struct packfile_store *packs = the_repository->objects->packfiles;
- struct packed_git *p = packfile_store_get_all_packs(packs);
+ struct packed_git *p;
if (strlen(filename) < 40)
die("Bad pack filename: %s", filename);
- while (p) {
+ repo_for_each_pack(the_repository, p)
if (strstr(p->pack_name, filename))
return add_pack(p);
- p = p->next;
- }
die("Filename %s not found in packed_git", filename);
}
static void load_all(void)
{
- struct packfile_store *packs = the_repository->objects->packfiles;
- struct packed_git *p = packfile_store_get_all_packs(packs);
+ struct packed_git *p;
- while (p) {
+ repo_for_each_pack(the_repository, p)
add_pack(p);
- p = p->next;
- }
}
int cmd_pack_redundant(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED) {
diff --git a/builtin/repack.c b/builtin/repack.c
index e8730808c5..cfdb4c0920 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -3,27 +3,18 @@
#include "builtin.h"
#include "config.h"
-#include "dir.h"
#include "environment.h"
-#include "gettext.h"
-#include "hex.h"
#include "parse-options.h"
#include "path.h"
#include "run-command.h"
#include "server-info.h"
-#include "strbuf.h"
#include "string-list.h"
-#include "strvec.h"
#include "midx.h"
#include "packfile.h"
#include "prune-packed.h"
-#include "odb.h"
#include "promisor-remote.h"
+#include "repack.h"
#include "shallow.h"
-#include "pack.h"
-#include "pack-bitmap.h"
-#include "refs.h"
-#include "list-objects-filter-options.h"
#define ALL_INTO_ONE 1
#define LOOSEN_UNREACHABLE 2
@@ -33,8 +24,6 @@
#define RETAIN_PACK 2
static int pack_everything;
-static int delta_base_offset = 1;
-static int pack_kept_objects = -1;
static int write_bitmaps = -1;
static int use_delta_islands;
static int run_update_server_info = 1;
@@ -53,31 +42,23 @@ static const char incremental_bitmap_conflict_error[] = N_(
"--no-write-bitmap-index or disable the pack.writeBitmaps configuration."
);
-struct pack_objects_args {
- char *window;
- char *window_memory;
- char *depth;
- char *threads;
- unsigned long max_pack_size;
- int no_reuse_delta;
- int no_reuse_object;
- int quiet;
- int local;
- int name_hash_version;
- int path_walk;
- struct list_objects_filter_options filter_options;
+struct repack_config_ctx {
+ struct pack_objects_args *po_args;
+ struct pack_objects_args *cruft_po_args;
};
static int repack_config(const char *var, const char *value,
const struct config_context *ctx, void *cb)
{
- struct pack_objects_args *cruft_po_args = cb;
+ struct repack_config_ctx *repack_ctx = cb;
+ struct pack_objects_args *po_args = repack_ctx->po_args;
+ struct pack_objects_args *cruft_po_args = repack_ctx->cruft_po_args;
if (!strcmp(var, "repack.usedeltabaseoffset")) {
- delta_base_offset = git_config_bool(var, value);
+ po_args->delta_base_offset = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "repack.packkeptobjects")) {
- pack_kept_objects = git_config_bool(var, value);
+ po_args->pack_kept_objects = git_config_bool(var, value);
return 0;
}
if (!strcmp(var, "repack.writebitmaps") ||
@@ -116,1138 +97,10 @@ static int repack_config(const char *var, const char *value,
return git_default_config(var, value, ctx, cb);
}
-static void pack_objects_args_release(struct pack_objects_args *args)
-{
- free(args->window);
- free(args->window_memory);
- free(args->depth);
- free(args->threads);
- list_objects_filter_release(&args->filter_options);
-}
-
-struct existing_packs {
- struct string_list kept_packs;
- struct string_list non_kept_packs;
- struct string_list cruft_packs;
-};
-
-#define EXISTING_PACKS_INIT { \
- .kept_packs = STRING_LIST_INIT_DUP, \
- .non_kept_packs = STRING_LIST_INIT_DUP, \
- .cruft_packs = STRING_LIST_INIT_DUP, \
-}
-
-static int has_existing_non_kept_packs(const struct existing_packs *existing)
-{
- return existing->non_kept_packs.nr || existing->cruft_packs.nr;
-}
-
-static void pack_mark_for_deletion(struct string_list_item *item)
-{
- item->util = (void*)((uintptr_t)item->util | DELETE_PACK);
-}
-
-static void pack_unmark_for_deletion(struct string_list_item *item)
-{
- item->util = (void*)((uintptr_t)item->util & ~DELETE_PACK);
-}
-
-static int pack_is_marked_for_deletion(struct string_list_item *item)
-{
- return (uintptr_t)item->util & DELETE_PACK;
-}
-
-static void pack_mark_retained(struct string_list_item *item)
-{
- item->util = (void*)((uintptr_t)item->util | RETAIN_PACK);
-}
-
-static int pack_is_retained(struct string_list_item *item)
-{
- return (uintptr_t)item->util & RETAIN_PACK;
-}
-
-static void mark_packs_for_deletion_1(struct string_list *names,
- struct string_list *list)
-{
- struct string_list_item *item;
- const int hexsz = the_hash_algo->hexsz;
-
- for_each_string_list_item(item, list) {
- char *sha1;
- size_t len = strlen(item->string);
- if (len < hexsz)
- continue;
- sha1 = item->string + len - hexsz;
-
- if (pack_is_retained(item)) {
- pack_unmark_for_deletion(item);
- } else if (!string_list_has_string(names, sha1)) {
- /*
- * Mark this pack for deletion, which ensures
- * that this pack won't be included in a MIDX
- * (if `--write-midx` was given) and that we
- * will actually delete this pack (if `-d` was
- * given).
- */
- pack_mark_for_deletion(item);
- }
- }
-}
-
-static void retain_cruft_pack(struct existing_packs *existing,
- struct packed_git *cruft)
-{
- struct strbuf buf = STRBUF_INIT;
- struct string_list_item *item;
-
- strbuf_addstr(&buf, pack_basename(cruft));
- strbuf_strip_suffix(&buf, ".pack");
-
- item = string_list_lookup(&existing->cruft_packs, buf.buf);
- if (!item)
- BUG("could not find cruft pack '%s'", pack_basename(cruft));
-
- pack_mark_retained(item);
- strbuf_release(&buf);
-}
-
-static void mark_packs_for_deletion(struct existing_packs *existing,
- struct string_list *names)
-
-{
- mark_packs_for_deletion_1(names, &existing->non_kept_packs);
- mark_packs_for_deletion_1(names, &existing->cruft_packs);
-}
-
-static void remove_redundant_pack(const char *dir_name, const char *base_name)
-{
- struct strbuf buf = STRBUF_INIT;
- struct odb_source *source = the_repository->objects->sources;
- struct multi_pack_index *m = get_multi_pack_index(source);
- strbuf_addf(&buf, "%s.pack", base_name);
- if (m && source->local && midx_contains_pack(m, buf.buf))
- clear_midx_file(the_repository);
- strbuf_insertf(&buf, 0, "%s/", dir_name);
- unlink_pack_path(buf.buf, 1);
- strbuf_release(&buf);
-}
-
-static void remove_redundant_packs_1(struct string_list *packs)
-{
- struct string_list_item *item;
- for_each_string_list_item(item, packs) {
- if (!pack_is_marked_for_deletion(item))
- continue;
- remove_redundant_pack(packdir, item->string);
- }
-}
-
-static void remove_redundant_existing_packs(struct existing_packs *existing)
-{
- remove_redundant_packs_1(&existing->non_kept_packs);
- remove_redundant_packs_1(&existing->cruft_packs);
-}
-
-static void existing_packs_release(struct existing_packs *existing)
-{
- string_list_clear(&existing->kept_packs, 0);
- string_list_clear(&existing->non_kept_packs, 0);
- string_list_clear(&existing->cruft_packs, 0);
-}
-
-/*
- * Adds all packs hex strings (pack-$HASH) to either packs->non_kept
- * or packs->kept based on whether each pack has a corresponding
- * .keep file or not. Packs without a .keep file are not to be kept
- * if we are going to pack everything into one file.
- */
-static void collect_pack_filenames(struct existing_packs *existing,
- const struct string_list *extra_keep)
-{
- struct packfile_store *packs = the_repository->objects->packfiles;
- struct packed_git *p;
- struct strbuf buf = STRBUF_INIT;
-
- for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
- int i;
- const char *base;
-
- if (!p->pack_local)
- continue;
-
- base = pack_basename(p);
-
- for (i = 0; i < extra_keep->nr; i++)
- if (!fspathcmp(base, extra_keep->items[i].string))
- break;
-
- strbuf_reset(&buf);
- strbuf_addstr(&buf, base);
- strbuf_strip_suffix(&buf, ".pack");
-
- if ((extra_keep->nr > 0 && i < extra_keep->nr) || p->pack_keep)
- string_list_append(&existing->kept_packs, buf.buf);
- else if (p->is_cruft)
- string_list_append(&existing->cruft_packs, buf.buf);
- else
- string_list_append(&existing->non_kept_packs, buf.buf);
- }
-
- string_list_sort(&existing->kept_packs);
- string_list_sort(&existing->non_kept_packs);
- string_list_sort(&existing->cruft_packs);
- strbuf_release(&buf);
-}
-
-static void prepare_pack_objects(struct child_process *cmd,
- const struct pack_objects_args *args,
- const char *out)
-{
- strvec_push(&cmd->args, "pack-objects");
- if (args->window)
- strvec_pushf(&cmd->args, "--window=%s", args->window);
- if (args->window_memory)
- strvec_pushf(&cmd->args, "--window-memory=%s", args->window_memory);
- if (args->depth)
- strvec_pushf(&cmd->args, "--depth=%s", args->depth);
- if (args->threads)
- strvec_pushf(&cmd->args, "--threads=%s", args->threads);
- if (args->max_pack_size)
- strvec_pushf(&cmd->args, "--max-pack-size=%lu", args->max_pack_size);
- if (args->no_reuse_delta)
- strvec_pushf(&cmd->args, "--no-reuse-delta");
- if (args->no_reuse_object)
- strvec_pushf(&cmd->args, "--no-reuse-object");
- if (args->name_hash_version)
- strvec_pushf(&cmd->args, "--name-hash-version=%d", args->name_hash_version);
- if (args->path_walk)
- strvec_pushf(&cmd->args, "--path-walk");
- if (args->local)
- strvec_push(&cmd->args, "--local");
- if (args->quiet)
- strvec_push(&cmd->args, "--quiet");
- if (delta_base_offset)
- strvec_push(&cmd->args, "--delta-base-offset");
- strvec_push(&cmd->args, out);
- cmd->git_cmd = 1;
- cmd->out = -1;
-}
-
-/*
- * Write oid to the given struct child_process's stdin, starting it first if
- * necessary.
- */
-static int write_oid(const struct object_id *oid,
- struct packed_git *pack UNUSED,
- uint32_t pos UNUSED, void *data)
-{
- struct child_process *cmd = data;
-
- if (cmd->in == -1) {
- if (start_command(cmd))
- die(_("could not start pack-objects to repack promisor objects"));
- }
-
- if (write_in_full(cmd->in, oid_to_hex(oid), the_hash_algo->hexsz) < 0 ||
- write_in_full(cmd->in, "\n", 1) < 0)
- die(_("failed to feed promisor objects to pack-objects"));
- return 0;
-}
-
-static struct {
- const char *name;
- unsigned optional:1;
-} exts[] = {
- {".pack"},
- {".rev", 1},
- {".mtimes", 1},
- {".bitmap", 1},
- {".promisor", 1},
- {".idx"},
-};
-
-struct generated_pack_data {
- struct tempfile *tempfiles[ARRAY_SIZE(exts)];
-};
-
-static struct generated_pack_data *populate_pack_exts(const char *name)
-{
- struct stat statbuf;
- struct strbuf path = STRBUF_INIT;
- struct generated_pack_data *data = xcalloc(1, sizeof(*data));
- int i;
-
- for (i = 0; i < ARRAY_SIZE(exts); i++) {
- strbuf_reset(&path);
- strbuf_addf(&path, "%s-%s%s", packtmp, name, exts[i].name);
-
- if (stat(path.buf, &statbuf))
- continue;
-
- data->tempfiles[i] = register_tempfile(path.buf);
- }
-
- strbuf_release(&path);
- return data;
-}
-
-static int has_pack_ext(const struct generated_pack_data *data,
- const char *ext)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(exts); i++) {
- if (strcmp(exts[i].name, ext))
- continue;
- return !!data->tempfiles[i];
- }
- BUG("unknown pack extension: '%s'", ext);
-}
-
-static void repack_promisor_objects(const struct pack_objects_args *args,
- struct string_list *names)
-{
- struct child_process cmd = CHILD_PROCESS_INIT;
- FILE *out;
- struct strbuf line = STRBUF_INIT;
-
- prepare_pack_objects(&cmd, args, packtmp);
- cmd.in = -1;
-
- /*
- * NEEDSWORK: Giving pack-objects only the OIDs without any ordering
- * hints may result in suboptimal deltas in the resulting pack. See if
- * the OIDs can be sent with fake paths such that pack-objects can use a
- * {type -> existing pack order} ordering when computing deltas instead
- * of a {type -> size} ordering, which may produce better deltas.
- */
- for_each_packed_object(the_repository, write_oid, &cmd,
- FOR_EACH_OBJECT_PROMISOR_ONLY);
-
- if (cmd.in == -1) {
- /* No packed objects; cmd was never started */
- child_process_clear(&cmd);
- return;
- }
-
- close(cmd.in);
-
- out = xfdopen(cmd.out, "r");
- while (strbuf_getline_lf(&line, out) != EOF) {
- struct string_list_item *item;
- char *promisor_name;
-
- if (line.len != the_hash_algo->hexsz)
- die(_("repack: Expecting full hex object ID lines only from pack-objects."));
- item = string_list_append(names, line.buf);
-
- /*
- * pack-objects creates the .pack and .idx files, but not the
- * .promisor file. Create the .promisor file, which is empty.
- *
- * NEEDSWORK: fetch-pack sometimes generates non-empty
- * .promisor files containing the ref names and associated
- * hashes at the point of generation of the corresponding
- * packfile, but this would not preserve their contents. Maybe
- * concatenate the contents of all .promisor files instead of
- * just creating a new empty file.
- */
- promisor_name = mkpathdup("%s-%s.promisor", packtmp,
- line.buf);
- write_promisor_file(promisor_name, NULL, 0);
-
- item->util = populate_pack_exts(item->string);
-
- free(promisor_name);
- }
-
- fclose(out);
- if (finish_command(&cmd))
- die(_("could not finish pack-objects to repack promisor objects"));
- strbuf_release(&line);
-}
-
-struct pack_geometry {
- struct packed_git **pack;
- uint32_t pack_nr, pack_alloc;
- uint32_t split;
-
- int split_factor;
-};
-
-static uint32_t geometry_pack_weight(struct packed_git *p)
-{
- if (open_pack_index(p))
- die(_("cannot open index for %s"), p->pack_name);
- return p->num_objects;
-}
-
-static int geometry_cmp(const void *va, const void *vb)
-{
- uint32_t aw = geometry_pack_weight(*(struct packed_git **)va),
- bw = geometry_pack_weight(*(struct packed_git **)vb);
-
- if (aw < bw)
- return -1;
- if (aw > bw)
- return 1;
- return 0;
-}
-
-static void init_pack_geometry(struct pack_geometry *geometry,
- struct existing_packs *existing,
- const struct pack_objects_args *args)
-{
- struct packfile_store *packs = the_repository->objects->packfiles;
- struct packed_git *p;
- struct strbuf buf = STRBUF_INIT;
-
- for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
- if (args->local && !p->pack_local)
- /*
- * When asked to only repack local packfiles we skip
- * over any packfiles that are borrowed from alternate
- * object directories.
- */
- continue;
-
- if (!pack_kept_objects) {
- /*
- * Any pack that has its pack_keep bit set will
- * appear in existing->kept_packs below, but
- * this saves us from doing a more expensive
- * check.
- */
- if (p->pack_keep)
- continue;
-
- /*
- * The pack may be kept via the --keep-pack
- * option; check 'existing->kept_packs' to
- * determine whether to ignore it.
- */
- strbuf_reset(&buf);
- strbuf_addstr(&buf, pack_basename(p));
- strbuf_strip_suffix(&buf, ".pack");
-
- if (string_list_has_string(&existing->kept_packs, buf.buf))
- continue;
- }
- if (p->is_cruft)
- continue;
-
- ALLOC_GROW(geometry->pack,
- geometry->pack_nr + 1,
- geometry->pack_alloc);
-
- geometry->pack[geometry->pack_nr] = p;
- geometry->pack_nr++;
- }
-
- QSORT(geometry->pack, geometry->pack_nr, geometry_cmp);
- strbuf_release(&buf);
-}
-
-static void split_pack_geometry(struct pack_geometry *geometry)
-{
- uint32_t i;
- uint32_t split;
- off_t total_size = 0;
-
- if (!geometry->pack_nr) {
- geometry->split = geometry->pack_nr;
- return;
- }
-
- /*
- * First, count the number of packs (in descending order of size) which
- * already form a geometric progression.
- */
- for (i = geometry->pack_nr - 1; i > 0; i--) {
- struct packed_git *ours = geometry->pack[i];
- struct packed_git *prev = geometry->pack[i - 1];
-
- if (unsigned_mult_overflows(geometry->split_factor,
- geometry_pack_weight(prev)))
- die(_("pack %s too large to consider in geometric "
- "progression"),
- prev->pack_name);
-
- if (geometry_pack_weight(ours) <
- geometry->split_factor * geometry_pack_weight(prev))
- break;
- }
-
- split = i;
-
- if (split) {
- /*
- * Move the split one to the right, since the top element in the
- * last-compared pair can't be in the progression. Only do this
- * when we split in the middle of the array (otherwise if we got
- * to the end, then the split is in the right place).
- */
- split++;
- }
-
- /*
- * Then, anything to the left of 'split' must be in a new pack. But,
- * creating that new pack may cause packs in the heavy half to no longer
- * form a geometric progression.
- *
- * Compute an expected size of the new pack, and then determine how many
- * packs in the heavy half need to be joined into it (if any) to restore
- * the geometric progression.
- */
- for (i = 0; i < split; i++) {
- struct packed_git *p = geometry->pack[i];
-
- if (unsigned_add_overflows(total_size, geometry_pack_weight(p)))
- die(_("pack %s too large to roll up"), p->pack_name);
- total_size += geometry_pack_weight(p);
- }
- for (i = split; i < geometry->pack_nr; i++) {
- struct packed_git *ours = geometry->pack[i];
-
- if (unsigned_mult_overflows(geometry->split_factor,
- total_size))
- die(_("pack %s too large to roll up"), ours->pack_name);
-
- if (geometry_pack_weight(ours) <
- geometry->split_factor * total_size) {
- if (unsigned_add_overflows(total_size,
- geometry_pack_weight(ours)))
- die(_("pack %s too large to roll up"),
- ours->pack_name);
-
- split++;
- total_size += geometry_pack_weight(ours);
- } else
- break;
- }
-
- geometry->split = split;
-}
-
-static struct packed_git *get_preferred_pack(struct pack_geometry *geometry)
-{
- uint32_t i;
-
- if (!geometry) {
- /*
- * No geometry means either an all-into-one repack (in which
- * case there is only one pack left and it is the largest) or an
- * incremental one.
- *
- * If repacking incrementally, then we could check the size of
- * all packs to determine which should be preferred, but leave
- * this for later.
- */
- return NULL;
- }
- if (geometry->split == geometry->pack_nr)
- return NULL;
-
- /*
- * The preferred pack is the largest pack above the split line. In
- * other words, it is the largest pack that does not get rolled up in
- * the geometric repack.
- */
- for (i = geometry->pack_nr; i > geometry->split; i--)
- /*
- * A pack that is not local would never be included in a
- * multi-pack index. We thus skip over any non-local packs.
- */
- if (geometry->pack[i - 1]->pack_local)
- return geometry->pack[i - 1];
-
- return NULL;
-}
-
-static void geometry_remove_redundant_packs(struct pack_geometry *geometry,
- struct string_list *names,
- struct existing_packs *existing)
-{
- struct strbuf buf = STRBUF_INIT;
- uint32_t i;
-
- for (i = 0; i < geometry->split; i++) {
- struct packed_git *p = geometry->pack[i];
- if (string_list_has_string(names, hash_to_hex(p->hash)))
- continue;
-
- strbuf_reset(&buf);
- strbuf_addstr(&buf, pack_basename(p));
- strbuf_strip_suffix(&buf, ".pack");
-
- if ((p->pack_keep) ||
- (string_list_has_string(&existing->kept_packs, buf.buf)))
- continue;
-
- remove_redundant_pack(packdir, buf.buf);
- }
-
- strbuf_release(&buf);
-}
-
-static void free_pack_geometry(struct pack_geometry *geometry)
-{
- if (!geometry)
- return;
-
- free(geometry->pack);
-}
-
-static int midx_has_unknown_packs(char **midx_pack_names,
- size_t midx_pack_names_nr,
- struct string_list *include,
- struct pack_geometry *geometry,
- struct existing_packs *existing)
-{
- size_t i;
-
- string_list_sort(include);
-
- for (i = 0; i < midx_pack_names_nr; i++) {
- const char *pack_name = midx_pack_names[i];
-
- /*
- * Determine whether or not each MIDX'd pack from the existing
- * MIDX (if any) is represented in the new MIDX. For each pack
- * in the MIDX, it must either be:
- *
- * - In the "include" list of packs to be included in the new
- * MIDX. Note this function is called before the include
- * list is populated with any cruft pack(s).
- *
- * - Below the geometric split line (if using pack geometry),
- * indicating that the pack won't be included in the new
- * MIDX, but its contents were rolled up as part of the
- * geometric repack.
- *
- * - In the existing non-kept packs list (if not using pack
- * geometry), and marked as non-deleted.
- */
- if (string_list_has_string(include, pack_name)) {
- continue;
- } else if (geometry) {
- struct strbuf buf = STRBUF_INIT;
- uint32_t j;
-
- for (j = 0; j < geometry->split; j++) {
- strbuf_reset(&buf);
- strbuf_addstr(&buf, pack_basename(geometry->pack[j]));
- strbuf_strip_suffix(&buf, ".pack");
- strbuf_addstr(&buf, ".idx");
-
- if (!strcmp(pack_name, buf.buf)) {
- strbuf_release(&buf);
- break;
- }
- }
-
- strbuf_release(&buf);
-
- if (j < geometry->split)
- continue;
- } else {
- struct string_list_item *item;
-
- item = string_list_lookup(&existing->non_kept_packs,
- pack_name);
- if (item && !pack_is_marked_for_deletion(item))
- continue;
- }
-
- /*
- * If we got to this point, the MIDX includes some pack that we
- * don't know about.
- */
- return 1;
- }
-
- return 0;
-}
-
-struct midx_snapshot_ref_data {
- struct tempfile *f;
- struct oidset seen;
- int preferred;
-};
-
-static int midx_snapshot_ref_one(const char *refname UNUSED,
- const char *referent UNUSED,
- const struct object_id *oid,
- int flag UNUSED, void *_data)
-{
- struct midx_snapshot_ref_data *data = _data;
- struct object_id peeled;
-
- if (!peel_iterated_oid(the_repository, oid, &peeled))
- oid = &peeled;
-
- if (oidset_insert(&data->seen, oid))
- return 0; /* already seen */
-
- if (odb_read_object_info(the_repository->objects, oid, NULL) != OBJ_COMMIT)
- return 0;
-
- fprintf(data->f->fp, "%s%s\n", data->preferred ? "+" : "",
- oid_to_hex(oid));
-
- return 0;
-}
-
-static void midx_snapshot_refs(struct tempfile *f)
-{
- struct midx_snapshot_ref_data data;
- const struct string_list *preferred = bitmap_preferred_tips(the_repository);
-
- data.f = f;
- data.preferred = 0;
- oidset_init(&data.seen, 0);
-
- if (!fdopen_tempfile(f, "w"))
- die(_("could not open tempfile %s for writing"),
- get_tempfile_path(f));
-
- if (preferred) {
- struct string_list_item *item;
-
- data.preferred = 1;
- for_each_string_list_item(item, preferred)
- refs_for_each_ref_in(get_main_ref_store(the_repository),
- item->string,
- midx_snapshot_ref_one, &data);
- data.preferred = 0;
- }
-
- refs_for_each_ref(get_main_ref_store(the_repository),
- midx_snapshot_ref_one, &data);
-
- if (close_tempfile_gently(f)) {
- int save_errno = errno;
- delete_tempfile(&f);
- errno = save_errno;
- die_errno(_("could not close refs snapshot tempfile"));
- }
-
- oidset_clear(&data.seen);
-}
-
-static void midx_included_packs(struct string_list *include,
- struct existing_packs *existing,
- char **midx_pack_names,
- size_t midx_pack_names_nr,
- struct string_list *names,
- struct pack_geometry *geometry)
-{
- struct string_list_item *item;
- struct strbuf buf = STRBUF_INIT;
-
- for_each_string_list_item(item, &existing->kept_packs) {
- strbuf_reset(&buf);
- strbuf_addf(&buf, "%s.idx", item->string);
- string_list_insert(include, buf.buf);
- }
-
- for_each_string_list_item(item, names) {
- strbuf_reset(&buf);
- strbuf_addf(&buf, "pack-%s.idx", item->string);
- string_list_insert(include, buf.buf);
- }
-
- if (geometry->split_factor) {
- uint32_t i;
-
- for (i = geometry->split; i < geometry->pack_nr; i++) {
- struct packed_git *p = geometry->pack[i];
-
- /*
- * The multi-pack index never refers to packfiles part
- * of an alternate object database, so we skip these.
- * While git-multi-pack-index(1) would silently ignore
- * them anyway, this allows us to skip executing the
- * command completely when we have only non-local
- * packfiles.
- */
- if (!p->pack_local)
- continue;
-
- strbuf_reset(&buf);
- strbuf_addstr(&buf, pack_basename(p));
- strbuf_strip_suffix(&buf, ".pack");
- strbuf_addstr(&buf, ".idx");
-
- string_list_insert(include, buf.buf);
- }
- } else {
- for_each_string_list_item(item, &existing->non_kept_packs) {
- if (pack_is_marked_for_deletion(item))
- continue;
-
- strbuf_reset(&buf);
- strbuf_addf(&buf, "%s.idx", item->string);
- string_list_insert(include, buf.buf);
- }
- }
-
- if (midx_must_contain_cruft ||
- midx_has_unknown_packs(midx_pack_names, midx_pack_names_nr,
- include, geometry, existing)) {
- /*
- * If there are one or more unknown pack(s) present (see
- * midx_has_unknown_packs() for what makes a pack
- * "unknown") in the MIDX before the repack, keep them
- * as they may be required to form a reachability
- * closure if the MIDX is bitmapped.
- *
- * For example, a cruft pack can be required to form a
- * reachability closure if the MIDX is bitmapped and one
- * or more of the bitmap's selected commits reaches a
- * once-cruft object that was later made reachable.
- */
- for_each_string_list_item(item, &existing->cruft_packs) {
- /*
- * When doing a --geometric repack, there is no
- * need to check for deleted packs, since we're
- * by definition not doing an ALL_INTO_ONE
- * repack (hence no packs will be deleted).
- * Otherwise we must check for and exclude any
- * packs which are enqueued for deletion.
- *
- * So we could omit the conditional below in the
- * --geometric case, but doing so is unnecessary
- * since no packs are marked as pending
- * deletion (since we only call
- * `mark_packs_for_deletion()` when doing an
- * all-into-one repack).
- */
- if (pack_is_marked_for_deletion(item))
- continue;
-
- strbuf_reset(&buf);
- strbuf_addf(&buf, "%s.idx", item->string);
- string_list_insert(include, buf.buf);
- }
- } else {
- /*
- * Modern versions of Git (with the appropriate
- * configuration setting) will write new copies of
- * once-cruft objects when doing a --geometric repack.
- *
- * If the MIDX has no cruft pack, new packs written
- * during a --geometric repack will not rely on the
- * cruft pack to form a reachability closure, so we can
- * avoid including them in the MIDX in that case.
- */
- ;
- }
-
- strbuf_release(&buf);
-}
-
-static int write_midx_included_packs(struct string_list *include,
- struct pack_geometry *geometry,
- struct string_list *names,
- const char *refs_snapshot,
- int show_progress, int write_bitmaps)
-{
- struct child_process cmd = CHILD_PROCESS_INIT;
- struct string_list_item *item;
- struct packed_git *preferred = get_preferred_pack(geometry);
- FILE *in;
- int ret;
-
- if (!include->nr)
- return 0;
-
- cmd.in = -1;
- cmd.git_cmd = 1;
-
- strvec_push(&cmd.args, "multi-pack-index");
- strvec_pushl(&cmd.args, "write", "--stdin-packs", NULL);
-
- if (show_progress)
- strvec_push(&cmd.args, "--progress");
- else
- strvec_push(&cmd.args, "--no-progress");
-
- if (write_bitmaps)
- strvec_push(&cmd.args, "--bitmap");
-
- if (preferred)
- strvec_pushf(&cmd.args, "--preferred-pack=%s",
- pack_basename(preferred));
- else if (names->nr) {
- /* The largest pack was repacked, meaning that either
- * one or two packs exist depending on whether the
- * repository has a cruft pack or not.
- *
- * Select the non-cruft one as preferred to encourage
- * pack-reuse among packs containing reachable objects
- * over unreachable ones.
- *
- * (Note we could write multiple packs here if
- * `--max-pack-size` was given, but any one of them
- * will suffice, so pick the first one.)
- */
- for_each_string_list_item(item, names) {
- struct generated_pack_data *data = item->util;
- if (has_pack_ext(data, ".mtimes"))
- continue;
-
- strvec_pushf(&cmd.args, "--preferred-pack=pack-%s.pack",
- item->string);
- break;
- }
- } else {
- /*
- * No packs were kept, and no packs were written. The
- * only thing remaining are .keep packs (unless
- * --pack-kept-objects was given).
- *
- * Set the `--preferred-pack` arbitrarily here.
- */
- ;
- }
-
- if (refs_snapshot)
- strvec_pushf(&cmd.args, "--refs-snapshot=%s", refs_snapshot);
-
- ret = start_command(&cmd);
- if (ret)
- return ret;
-
- in = xfdopen(cmd.in, "w");
- for_each_string_list_item(item, include)
- fprintf(in, "%s\n", item->string);
- fclose(in);
-
- return finish_command(&cmd);
-}
-
-static void remove_redundant_bitmaps(struct string_list *include,
- const char *packdir)
-{
- struct strbuf path = STRBUF_INIT;
- struct string_list_item *item;
- size_t packdir_len;
-
- strbuf_addstr(&path, packdir);
- strbuf_addch(&path, '/');
- packdir_len = path.len;
-
- /*
- * Remove any pack bitmaps corresponding to packs which are now
- * included in the MIDX.
- */
- for_each_string_list_item(item, include) {
- strbuf_addstr(&path, item->string);
- strbuf_strip_suffix(&path, ".idx");
- strbuf_addstr(&path, ".bitmap");
-
- if (unlink(path.buf) && errno != ENOENT)
- warning_errno(_("could not remove stale bitmap: %s"),
- path.buf);
-
- strbuf_setlen(&path, packdir_len);
- }
- strbuf_release(&path);
-}
-
-static int finish_pack_objects_cmd(struct child_process *cmd,
- struct string_list *names,
- int local)
-{
- FILE *out;
- struct strbuf line = STRBUF_INIT;
-
- out = xfdopen(cmd->out, "r");
- while (strbuf_getline_lf(&line, out) != EOF) {
- struct string_list_item *item;
-
- if (line.len != the_hash_algo->hexsz)
- die(_("repack: Expecting full hex object ID lines only "
- "from pack-objects."));
- /*
- * Avoid putting packs written outside of the repository in the
- * list of names.
- */
- if (local) {
- item = string_list_append(names, line.buf);
- item->util = populate_pack_exts(line.buf);
- }
- }
- fclose(out);
-
- strbuf_release(&line);
-
- return finish_command(cmd);
-}
-
-static int write_filtered_pack(const struct pack_objects_args *args,
- const char *destination,
- const char *pack_prefix,
- struct existing_packs *existing,
- struct string_list *names)
-{
- struct child_process cmd = CHILD_PROCESS_INIT;
- struct string_list_item *item;
- FILE *in;
- int ret;
- const char *caret;
- const char *scratch;
- int local = skip_prefix(destination, packdir, &scratch);
-
- prepare_pack_objects(&cmd, args, destination);
-
- strvec_push(&cmd.args, "--stdin-packs");
-
- if (!pack_kept_objects)
- strvec_push(&cmd.args, "--honor-pack-keep");
- for_each_string_list_item(item, &existing->kept_packs)
- strvec_pushf(&cmd.args, "--keep-pack=%s", item->string);
-
- cmd.in = -1;
-
- ret = start_command(&cmd);
- if (ret)
- return ret;
-
- /*
- * Here 'names' contains only the pack(s) that were just
- * written, which is exactly the packs we want to keep. Also
- * 'existing_kept_packs' already contains the packs in
- * 'keep_pack_list'.
- */
- in = xfdopen(cmd.in, "w");
- for_each_string_list_item(item, names)
- fprintf(in, "^%s-%s.pack\n", pack_prefix, item->string);
- for_each_string_list_item(item, &existing->non_kept_packs)
- fprintf(in, "%s.pack\n", item->string);
- for_each_string_list_item(item, &existing->cruft_packs)
- fprintf(in, "%s.pack\n", item->string);
- caret = pack_kept_objects ? "" : "^";
- for_each_string_list_item(item, &existing->kept_packs)
- fprintf(in, "%s%s.pack\n", caret, item->string);
- fclose(in);
-
- return finish_pack_objects_cmd(&cmd, names, local);
-}
-
-static void combine_small_cruft_packs(FILE *in, size_t combine_cruft_below_size,
- struct existing_packs *existing)
-{
- struct packfile_store *packs = the_repository->objects->packfiles;
- struct packed_git *p;
- struct strbuf buf = STRBUF_INIT;
- size_t i;
-
- for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
- if (!(p->is_cruft && p->pack_local))
- continue;
-
- strbuf_reset(&buf);
- strbuf_addstr(&buf, pack_basename(p));
- strbuf_strip_suffix(&buf, ".pack");
-
- if (!string_list_has_string(&existing->cruft_packs, buf.buf))
- continue;
-
- if (p->pack_size < combine_cruft_below_size) {
- fprintf(in, "-%s\n", pack_basename(p));
- } else {
- retain_cruft_pack(existing, p);
- fprintf(in, "%s\n", pack_basename(p));
- }
- }
-
- for (i = 0; i < existing->non_kept_packs.nr; i++)
- fprintf(in, "-%s.pack\n",
- existing->non_kept_packs.items[i].string);
-
- strbuf_release(&buf);
-}
-
-static int write_cruft_pack(const struct pack_objects_args *args,
- const char *destination,
- const char *pack_prefix,
- const char *cruft_expiration,
- unsigned long combine_cruft_below_size,
- struct string_list *names,
- struct existing_packs *existing)
-{
- struct child_process cmd = CHILD_PROCESS_INIT;
- struct string_list_item *item;
- FILE *in;
- int ret;
- const char *scratch;
- int local = skip_prefix(destination, packdir, &scratch);
-
- prepare_pack_objects(&cmd, args, destination);
-
- strvec_push(&cmd.args, "--cruft");
- if (cruft_expiration)
- strvec_pushf(&cmd.args, "--cruft-expiration=%s",
- cruft_expiration);
-
- strvec_push(&cmd.args, "--honor-pack-keep");
- strvec_push(&cmd.args, "--non-empty");
-
- cmd.in = -1;
-
- ret = start_command(&cmd);
- if (ret)
- return ret;
-
- /*
- * names has a confusing double use: it both provides the list
- * of just-written new packs, and accepts the name of the cruft
- * pack we are writing.
- *
- * By the time it is read here, it contains only the pack(s)
- * that were just written, which is exactly the set of packs we
- * want to consider kept.
- *
- * If `--expire-to` is given, the double-use served by `names`
- * ensures that the pack written to `--expire-to` excludes any
- * objects contained in the cruft pack.
- */
- in = xfdopen(cmd.in, "w");
- for_each_string_list_item(item, names)
- fprintf(in, "%s-%s.pack\n", pack_prefix, item->string);
- if (combine_cruft_below_size && !cruft_expiration) {
- combine_small_cruft_packs(in, combine_cruft_below_size,
- existing);
- } else {
- for_each_string_list_item(item, &existing->non_kept_packs)
- fprintf(in, "-%s.pack\n", item->string);
- for_each_string_list_item(item, &existing->cruft_packs)
- fprintf(in, "-%s.pack\n", item->string);
- }
- for_each_string_list_item(item, &existing->kept_packs)
- fprintf(in, "%s.pack\n", item->string);
- fclose(in);
-
- return finish_pack_objects_cmd(&cmd, names, local);
-}
-
-static const char *find_pack_prefix(const char *packdir, const char *packtmp)
-{
- const char *pack_prefix;
- if (!skip_prefix(packtmp, packdir, &pack_prefix))
- die(_("pack prefix %s does not begin with objdir %s"),
- packtmp, packdir);
- if (*pack_prefix == '/')
- pack_prefix++;
- return pack_prefix;
-}
-
int cmd_repack(int argc,
const char **argv,
const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
struct child_process cmd = CHILD_PROCESS_INIT;
struct string_list_item *item;
@@ -1255,18 +108,17 @@ int cmd_repack(int argc,
struct existing_packs existing = EXISTING_PACKS_INIT;
struct pack_geometry geometry = { 0 };
struct tempfile *refs_snapshot = NULL;
- int i, ext, ret;
+ int i, ret;
int show_progress;
- char **midx_pack_names = NULL;
- size_t midx_pack_names_nr = 0;
/* variables to be filled by option parsing */
+ struct repack_config_ctx config_ctx;
int delete_redundant = 0;
const char *unpack_unreachable = NULL;
int keep_unreachable = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
- struct pack_objects_args po_args = { 0 };
- struct pack_objects_args cruft_po_args = { 0 };
+ struct pack_objects_args po_args = PACK_OBJECTS_ARGS_INIT;
+ struct pack_objects_args cruft_po_args = PACK_OBJECTS_ARGS_INIT;
int write_midx = 0;
const char *cruft_expiration = NULL;
const char *expire_to = NULL;
@@ -1327,7 +179,7 @@ int cmd_repack(int argc,
OPT_UNSIGNED(0, "max-pack-size", &po_args.max_pack_size,
N_("maximum size of each packfile")),
OPT_PARSE_LIST_OBJECTS_FILTER(&po_args.filter_options),
- OPT_BOOL(0, "pack-kept-objects", &pack_kept_objects,
+ OPT_BOOL(0, "pack-kept-objects", &po_args.pack_kept_objects,
N_("repack objects in packs marked with .keep")),
OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
N_("do not repack this pack")),
@@ -1344,7 +196,11 @@ int cmd_repack(int argc,
list_objects_filter_init(&po_args.filter_options);
- repo_config(the_repository, repack_config, &cruft_po_args);
+ memset(&config_ctx, 0, sizeof(config_ctx));
+ config_ctx.po_args = &po_args;
+ config_ctx.cruft_po_args = &cruft_po_args;
+
+ repo_config(repo, repack_config, &config_ctx);
argc = parse_options(argc, argv, prefix, builtin_repack_options,
git_repack_usage, 0);
@@ -1354,7 +210,7 @@ int cmd_repack(int argc,
po_args.depth = xstrdup_or_null(opt_depth);
po_args.threads = xstrdup_or_null(opt_threads);
- if (delete_redundant && the_repository->repository_format_precious_objects)
+ if (delete_redundant && repo->repository_format_precious_objects)
die(_("cannot delete packs in a precious-objects repo"));
die_for_incompatible_opt3(unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE), "-A",
@@ -1369,14 +225,14 @@ int cmd_repack(int argc,
(!(pack_everything & ALL_INTO_ONE) || !is_bare_repository()))
write_bitmaps = 0;
}
- if (pack_kept_objects < 0)
- pack_kept_objects = write_bitmaps > 0 && !write_midx;
+ if (po_args.pack_kept_objects < 0)
+ po_args.pack_kept_objects = write_bitmaps > 0 && !write_midx;
if (write_bitmaps && !(pack_everything & ALL_INTO_ONE) && !write_midx)
die(_(incremental_bitmap_conflict_error));
if (write_bitmaps && po_args.local &&
- odb_has_alternates(the_repository->objects)) {
+ odb_has_alternates(repo->objects)) {
/*
* When asked to do a local repack, but we have
* packfiles that are inherited from an alternate, then
@@ -1391,26 +247,28 @@ int cmd_repack(int argc,
if (write_midx && write_bitmaps) {
struct strbuf path = STRBUF_INIT;
- strbuf_addf(&path, "%s/%s_XXXXXX", repo_get_object_directory(the_repository),
+ strbuf_addf(&path, "%s/%s_XXXXXX",
+ repo_get_object_directory(repo),
"bitmap-ref-tips");
refs_snapshot = xmks_tempfile(path.buf);
- midx_snapshot_refs(refs_snapshot);
+ midx_snapshot_refs(repo, refs_snapshot);
strbuf_release(&path);
}
- packdir = mkpathdup("%s/pack", repo_get_object_directory(the_repository));
+ packdir = mkpathdup("%s/pack", repo_get_object_directory(repo));
packtmp_name = xstrfmt(".tmp-%d-pack", (int)getpid());
packtmp = mkpathdup("%s/%s", packdir, packtmp_name);
- collect_pack_filenames(&existing, &keep_pack_list);
+ existing.repo = repo;
+ existing_packs_collect(&existing, &keep_pack_list);
if (geometry.split_factor) {
if (pack_everything)
die(_("options '%s' and '%s' cannot be used together"), "--geometric", "-A/-a");
- init_pack_geometry(&geometry, &existing, &po_args);
- split_pack_geometry(&geometry);
+ pack_geometry_init(&geometry, &existing, &po_args);
+ pack_geometry_split(&geometry);
}
prepare_pack_objects(&cmd, &po_args, packtmp);
@@ -1418,8 +276,6 @@ int cmd_repack(int argc,
show_progress = !po_args.quiet && isatty(2);
strvec_push(&cmd.args, "--keep-true-parents");
- if (!pack_kept_objects)
- strvec_push(&cmd.args, "--honor-pack-keep");
for (i = 0; i < keep_pack_list.nr; i++)
strvec_pushf(&cmd.args, "--keep-pack=%s",
keep_pack_list.items[i].string);
@@ -1439,7 +295,7 @@ int cmd_repack(int argc,
strvec_push(&cmd.args, "--reflog");
strvec_push(&cmd.args, "--indexed-objects");
}
- if (repo_has_promisor_remote(the_repository))
+ if (repo_has_promisor_remote(repo))
strvec_push(&cmd.args, "--exclude-promisor-objects");
if (!write_midx) {
if (write_bitmaps > 0)
@@ -1451,9 +307,9 @@ int cmd_repack(int argc,
strvec_push(&cmd.args, "--delta-islands");
if (pack_everything & ALL_INTO_ONE) {
- repack_promisor_objects(&po_args, &names);
+ repack_promisor_objects(repo, &po_args, &names, packtmp);
- if (has_existing_non_kept_packs(&existing) &&
+ if (existing_packs_has_non_kept(&existing) &&
delete_redundant &&
!(pack_everything & PACK_CRUFT)) {
for_each_string_list_item(item, &names) {
@@ -1515,9 +371,17 @@ int cmd_repack(int argc,
fclose(in);
}
- ret = finish_pack_objects_cmd(&cmd, &names, 1);
- if (ret)
- goto cleanup;
+ {
+ struct write_pack_opts opts = {
+ .packdir = packdir,
+ .destination = packdir,
+ .packtmp = packtmp,
+ };
+ ret = finish_pack_objects_cmd(repo->hash_algo, &opts, &cmd,
+ &names);
+ if (ret)
+ goto cleanup;
+ }
if (!names.nr) {
if (!po_args.quiet)
@@ -1535,12 +399,17 @@ int cmd_repack(int argc,
* midx_has_unknown_packs() will make the decision for
* us.
*/
- if (!get_multi_pack_index(the_repository->objects->sources))
+ if (!get_multi_pack_index(repo->objects->sources))
midx_must_contain_cruft = 1;
}
if (pack_everything & PACK_CRUFT) {
- const char *pack_prefix = find_pack_prefix(packdir, packtmp);
+ struct write_pack_opts opts = {
+ .po_args = &cruft_po_args,
+ .destination = packtmp,
+ .packtmp = packtmp,
+ .packdir = packdir,
+ };
if (!cruft_po_args.window)
cruft_po_args.window = xstrdup_or_null(po_args.window);
@@ -1555,9 +424,10 @@ int cmd_repack(int argc,
cruft_po_args.local = po_args.local;
cruft_po_args.quiet = po_args.quiet;
+ cruft_po_args.delta_base_offset = po_args.delta_base_offset;
+ cruft_po_args.pack_kept_objects = 0;
- ret = write_cruft_pack(&cruft_po_args, packtmp, pack_prefix,
- cruft_expiration,
+ ret = write_cruft_pack(&opts, cruft_expiration,
combine_cruft_below_size, &names,
&existing);
if (ret)
@@ -1592,11 +462,8 @@ int cmd_repack(int argc,
* pack, but rather removing all cruft packs from the
* main repository regardless of size.
*/
- ret = write_cruft_pack(&cruft_po_args, expire_to,
- pack_prefix,
- NULL,
- 0ul,
- &names,
+ opts.destination = expire_to;
+ ret = write_cruft_pack(&opts, NULL, 0ul, &names,
&existing);
if (ret)
goto cleanup;
@@ -1604,99 +471,63 @@ int cmd_repack(int argc,
}
if (po_args.filter_options.choice) {
- if (!filter_to)
- filter_to = packtmp;
-
- ret = write_filtered_pack(&po_args,
- filter_to,
- find_pack_prefix(packdir, packtmp),
- &existing,
- &names);
+ struct write_pack_opts opts = {
+ .po_args = &po_args,
+ .destination = filter_to,
+ .packdir = packdir,
+ .packtmp = packtmp,
+ };
+
+ if (!opts.destination)
+ opts.destination = packtmp;
+
+ ret = write_filtered_pack(&opts, &existing, &names);
if (ret)
goto cleanup;
}
string_list_sort(&names);
- if (get_multi_pack_index(the_repository->objects->sources)) {
- struct multi_pack_index *m =
- get_multi_pack_index(the_repository->objects->sources);
-
- ALLOC_ARRAY(midx_pack_names,
- m->num_packs + m->num_packs_in_base);
-
- for (; m; m = m->base_midx)
- for (uint32_t i = 0; i < m->num_packs; i++)
- midx_pack_names[midx_pack_names_nr++] =
- xstrdup(m->pack_names[i]);
- }
-
- close_object_store(the_repository->objects);
+ close_object_store(repo->objects);
/*
* Ok we have prepared all new packfiles.
*/
- for_each_string_list_item(item, &names) {
- struct generated_pack_data *data = item->util;
-
- for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
- char *fname;
-
- fname = mkpathdup("%s/pack-%s%s",
- packdir, item->string, exts[ext].name);
-
- if (data->tempfiles[ext]) {
- const char *fname_old = get_tempfile_path(data->tempfiles[ext]);
- struct stat statbuffer;
-
- if (!stat(fname_old, &statbuffer)) {
- statbuffer.st_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
- chmod(fname_old, statbuffer.st_mode);
- }
-
- if (rename_tempfile(&data->tempfiles[ext], fname))
- die_errno(_("renaming pack to '%s' failed"), fname);
- } else if (!exts[ext].optional)
- die(_("pack-objects did not write a '%s' file for pack %s-%s"),
- exts[ext].name, packtmp, item->string);
- else if (unlink(fname) < 0 && errno != ENOENT)
- die_errno(_("could not unlink: %s"), fname);
-
- free(fname);
- }
- }
+ for_each_string_list_item(item, &names)
+ generated_pack_install(item->util, item->string, packdir,
+ packtmp);
/* End of pack replacement. */
if (delete_redundant && pack_everything & ALL_INTO_ONE)
- mark_packs_for_deletion(&existing, &names);
+ existing_packs_mark_for_deletion(&existing, &names);
if (write_midx) {
- struct string_list include = STRING_LIST_INIT_DUP;
- midx_included_packs(&include, &existing, midx_pack_names,
- midx_pack_names_nr, &names, &geometry);
-
- ret = write_midx_included_packs(&include, &geometry, &names,
- refs_snapshot ? get_tempfile_path(refs_snapshot) : NULL,
- show_progress, write_bitmaps > 0);
-
- if (!ret && write_bitmaps)
- remove_redundant_bitmaps(&include, packdir);
-
- string_list_clear(&include, 0);
+ struct repack_write_midx_opts opts = {
+ .existing = &existing,
+ .geometry = &geometry,
+ .names = &names,
+ .refs_snapshot = refs_snapshot ? get_tempfile_path(refs_snapshot) : NULL,
+ .packdir = packdir,
+ .show_progress = show_progress,
+ .write_bitmaps = write_bitmaps > 0,
+ .midx_must_contain_cruft = midx_must_contain_cruft
+ };
+
+ ret = write_midx_included_packs(&opts);
if (ret)
goto cleanup;
}
- odb_reprepare(the_repository->objects);
+ odb_reprepare(repo->objects);
if (delete_redundant) {
int opts = 0;
- remove_redundant_existing_packs(&existing);
+ existing_packs_remove_redundant(&existing, packdir);
if (geometry.split_factor)
- geometry_remove_redundant_packs(&geometry, &names,
- &existing);
+ pack_geometry_remove_redundant(&geometry, &names,
+ &existing, packdir);
if (show_progress)
opts |= PRUNE_PACKED_VERBOSE;
prune_packed_objects(opts);
@@ -1704,18 +535,18 @@ int cmd_repack(int argc,
if (!keep_unreachable &&
(!(pack_everything & LOOSEN_UNREACHABLE) ||
unpack_unreachable) &&
- is_repository_shallow(the_repository))
+ is_repository_shallow(repo))
prune_shallow(PRUNE_QUICK);
}
if (run_update_server_info)
- update_server_info(the_repository, 0);
+ update_server_info(repo, 0);
if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0)) {
unsigned flags = 0;
if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX_WRITE_INCREMENTAL, 0))
flags |= MIDX_WRITE_INCREMENTAL;
- write_midx_file(the_repository->objects->sources,
+ write_midx_file(repo->objects->sources,
NULL, NULL, flags);
}
@@ -1723,10 +554,7 @@ cleanup:
string_list_clear(&keep_pack_list, 0);
string_list_clear(&names, 1);
existing_packs_release(&existing);
- free_pack_geometry(&geometry);
- for (size_t i = 0; i < midx_pack_names_nr; i++)
- free(midx_pack_names[i]);
- free(midx_pack_names);
+ pack_geometry_release(&geometry);
pack_objects_args_release(&po_args);
pack_objects_args_release(&cruft_po_args);
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 8c333b3e2e..15d51e60a8 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -2,6 +2,7 @@
#define DISABLE_SIGN_COMPARE_WARNINGS
#include "builtin.h"
+#include "abspath.h"
#include "config.h"
#include "dir.h"
#include "environment.h"
@@ -23,7 +24,7 @@
static const char *empty_base = "";
static char const * const builtin_sparse_checkout_usage[] = {
- N_("git sparse-checkout (init | list | set | add | reapply | disable | check-rules) [<options>]"),
+ N_("git sparse-checkout (init | list | set | add | reapply | disable | check-rules | clean) [<options>]"),
NULL
};
@@ -204,12 +205,12 @@ static void clean_tracked_sparse_directories(struct repository *r)
ensure_full_index(r->index);
}
-static int update_working_directory(struct pattern_list *pl)
+static int update_working_directory(struct repository *r,
+ struct pattern_list *pl)
{
enum update_sparsity_result result;
struct unpack_trees_options o;
struct lock_file lock_file = LOCK_INIT;
- struct repository *r = the_repository;
struct pattern_list *old_pl;
/* If no branch has been checked out, there are no updates to make. */
@@ -327,7 +328,8 @@ static void write_cone_to_file(FILE *fp, struct pattern_list *pl)
string_list_clear(&sl, 0);
}
-static int write_patterns_and_update(struct pattern_list *pl)
+static int write_patterns_and_update(struct repository *repo,
+ struct pattern_list *pl)
{
char *sparse_filename;
FILE *fp;
@@ -336,15 +338,15 @@ static int write_patterns_and_update(struct pattern_list *pl)
sparse_filename = get_sparse_checkout_filename();
- if (safe_create_leading_directories(the_repository, sparse_filename))
+ if (safe_create_leading_directories(repo, sparse_filename))
die(_("failed to create directory for sparse-checkout file"));
hold_lock_file_for_update(&lk, sparse_filename, LOCK_DIE_ON_ERROR);
- result = update_working_directory(pl);
+ result = update_working_directory(repo, pl);
if (result) {
rollback_lock_file(&lk);
- update_working_directory(NULL);
+ update_working_directory(repo, NULL);
goto out;
}
@@ -372,25 +374,26 @@ enum sparse_checkout_mode {
MODE_CONE_PATTERNS = 2,
};
-static int set_config(enum sparse_checkout_mode mode)
+static int set_config(struct repository *repo,
+ enum sparse_checkout_mode mode)
{
/* Update to use worktree config, if not already. */
- if (init_worktree_config(the_repository)) {
+ if (init_worktree_config(repo)) {
error(_("failed to initialize worktree config"));
return 1;
}
- if (repo_config_set_worktree_gently(the_repository,
+ if (repo_config_set_worktree_gently(repo,
"core.sparseCheckout",
mode ? "true" : "false") ||
- repo_config_set_worktree_gently(the_repository,
+ repo_config_set_worktree_gently(repo,
"core.sparseCheckoutCone",
mode == MODE_CONE_PATTERNS ?
"true" : "false"))
return 1;
if (mode == MODE_NO_PATTERNS)
- return set_sparse_index_config(the_repository, 0);
+ return set_sparse_index_config(repo, 0);
return 0;
}
@@ -410,7 +413,7 @@ static enum sparse_checkout_mode update_cone_mode(int *cone_mode) {
return MODE_ALL_PATTERNS;
}
-static int update_modes(int *cone_mode, int *sparse_index)
+static int update_modes(struct repository *repo, int *cone_mode, int *sparse_index)
{
int mode, record_mode;
@@ -418,20 +421,20 @@ static int update_modes(int *cone_mode, int *sparse_index)
record_mode = (*cone_mode != -1) || !core_apply_sparse_checkout;
mode = update_cone_mode(cone_mode);
- if (record_mode && set_config(mode))
+ if (record_mode && set_config(repo, mode))
return 1;
/* Set sparse-index/non-sparse-index mode if specified */
if (*sparse_index >= 0) {
- if (set_sparse_index_config(the_repository, *sparse_index) < 0)
+ if (set_sparse_index_config(repo, *sparse_index) < 0)
die(_("failed to modify sparse-index config"));
/* force an index rewrite */
- repo_read_index(the_repository);
- the_repository->index->updated_workdir = 1;
+ repo_read_index(repo);
+ repo->index->updated_workdir = 1;
if (!*sparse_index)
- ensure_full_index(the_repository->index);
+ ensure_full_index(repo->index);
}
return 0;
@@ -448,7 +451,7 @@ static struct sparse_checkout_init_opts {
} init_opts;
static int sparse_checkout_init(int argc, const char **argv, const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
struct pattern_list pl;
char *sparse_filename;
@@ -464,7 +467,7 @@ static int sparse_checkout_init(int argc, const char **argv, const char *prefix,
};
setup_work_tree();
- repo_read_index(the_repository);
+ repo_read_index(repo);
init_opts.cone_mode = -1;
init_opts.sparse_index = -1;
@@ -473,7 +476,7 @@ static int sparse_checkout_init(int argc, const char **argv, const char *prefix,
builtin_sparse_checkout_init_options,
builtin_sparse_checkout_init_usage, 0);
- if (update_modes(&init_opts.cone_mode, &init_opts.sparse_index))
+ if (update_modes(repo, &init_opts.cone_mode, &init_opts.sparse_index))
return 1;
memset(&pl, 0, sizeof(pl));
@@ -485,14 +488,14 @@ static int sparse_checkout_init(int argc, const char **argv, const char *prefix,
if (res >= 0) {
free(sparse_filename);
clear_pattern_list(&pl);
- return update_working_directory(NULL);
+ return update_working_directory(repo, NULL);
}
- if (repo_get_oid(the_repository, "HEAD", &oid)) {
+ if (repo_get_oid(repo, "HEAD", &oid)) {
FILE *fp;
/* assume we are in a fresh repo, but update the sparse-checkout file */
- if (safe_create_leading_directories(the_repository, sparse_filename))
+ if (safe_create_leading_directories(repo, sparse_filename))
die(_("unable to create leading directories of %s"),
sparse_filename);
fp = xfopen(sparse_filename, "w");
@@ -511,7 +514,7 @@ static int sparse_checkout_init(int argc, const char **argv, const char *prefix,
add_pattern("!/*/", empty_base, 0, &pl, 0);
pl.use_cone_patterns = init_opts.cone_mode;
- return write_patterns_and_update(&pl);
+ return write_patterns_and_update(repo, &pl);
}
static void insert_recursive_pattern(struct pattern_list *pl, struct strbuf *path)
@@ -674,7 +677,8 @@ static void add_patterns_literal(int argc, const char **argv,
add_patterns_from_input(pl, argc, argv, use_stdin ? stdin : NULL);
}
-static int modify_pattern_list(struct strvec *args, int use_stdin,
+static int modify_pattern_list(struct repository *repo,
+ struct strvec *args, int use_stdin,
enum modify_type m)
{
int result;
@@ -696,22 +700,23 @@ static int modify_pattern_list(struct strvec *args, int use_stdin,
}
if (!core_apply_sparse_checkout) {
- set_config(MODE_ALL_PATTERNS);
+ set_config(repo, MODE_ALL_PATTERNS);
core_apply_sparse_checkout = 1;
changed_config = 1;
}
- result = write_patterns_and_update(pl);
+ result = write_patterns_and_update(repo, pl);
if (result && changed_config)
- set_config(MODE_NO_PATTERNS);
+ set_config(repo, MODE_NO_PATTERNS);
clear_pattern_list(pl);
free(pl);
return result;
}
-static void sanitize_paths(struct strvec *args,
+static void sanitize_paths(struct repository *repo,
+ struct strvec *args,
const char *prefix, int skip_checks)
{
int i;
@@ -752,7 +757,7 @@ static void sanitize_paths(struct strvec *args,
for (i = 0; i < args->nr; i++) {
struct cache_entry *ce;
- struct index_state *index = the_repository->index;
+ struct index_state *index = repo->index;
int pos = index_name_pos(index, args->v[i], strlen(args->v[i]));
if (pos < 0)
@@ -779,7 +784,7 @@ static struct sparse_checkout_add_opts {
} add_opts;
static int sparse_checkout_add(int argc, const char **argv, const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
static struct option builtin_sparse_checkout_add_options[] = {
OPT_BOOL_F(0, "skip-checks", &add_opts.skip_checks,
@@ -796,7 +801,7 @@ static int sparse_checkout_add(int argc, const char **argv, const char *prefix,
if (!core_apply_sparse_checkout)
die(_("no sparse-checkout to add to"));
- repo_read_index(the_repository);
+ repo_read_index(repo);
argc = parse_options(argc, argv, prefix,
builtin_sparse_checkout_add_options,
@@ -804,9 +809,9 @@ static int sparse_checkout_add(int argc, const char **argv, const char *prefix,
for (int i = 0; i < argc; i++)
strvec_push(&patterns, argv[i]);
- sanitize_paths(&patterns, prefix, add_opts.skip_checks);
+ sanitize_paths(repo, &patterns, prefix, add_opts.skip_checks);
- ret = modify_pattern_list(&patterns, add_opts.use_stdin, ADD);
+ ret = modify_pattern_list(repo, &patterns, add_opts.use_stdin, ADD);
strvec_clear(&patterns);
return ret;
@@ -825,7 +830,7 @@ static struct sparse_checkout_set_opts {
} set_opts;
static int sparse_checkout_set(int argc, const char **argv, const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
int default_patterns_nr = 2;
const char *default_patterns[] = {"/*", "!/*/", NULL};
@@ -847,7 +852,7 @@ static int sparse_checkout_set(int argc, const char **argv, const char *prefix,
int ret;
setup_work_tree();
- repo_read_index(the_repository);
+ repo_read_index(repo);
set_opts.cone_mode = -1;
set_opts.sparse_index = -1;
@@ -856,7 +861,7 @@ static int sparse_checkout_set(int argc, const char **argv, const char *prefix,
builtin_sparse_checkout_set_options,
builtin_sparse_checkout_set_usage, 0);
- if (update_modes(&set_opts.cone_mode, &set_opts.sparse_index))
+ if (update_modes(repo, &set_opts.cone_mode, &set_opts.sparse_index))
return 1;
/*
@@ -870,10 +875,10 @@ static int sparse_checkout_set(int argc, const char **argv, const char *prefix,
} else {
for (int i = 0; i < argc; i++)
strvec_push(&patterns, argv[i]);
- sanitize_paths(&patterns, prefix, set_opts.skip_checks);
+ sanitize_paths(repo, &patterns, prefix, set_opts.skip_checks);
}
- ret = modify_pattern_list(&patterns, set_opts.use_stdin, REPLACE);
+ ret = modify_pattern_list(repo, &patterns, set_opts.use_stdin, REPLACE);
strvec_clear(&patterns);
return ret;
@@ -891,7 +896,7 @@ static struct sparse_checkout_reapply_opts {
static int sparse_checkout_reapply(int argc, const char **argv,
const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
static struct option builtin_sparse_checkout_reapply_options[] = {
OPT_BOOL(0, "cone", &reapply_opts.cone_mode,
@@ -912,12 +917,107 @@ static int sparse_checkout_reapply(int argc, const char **argv,
builtin_sparse_checkout_reapply_options,
builtin_sparse_checkout_reapply_usage, 0);
- repo_read_index(the_repository);
+ repo_read_index(repo);
- if (update_modes(&reapply_opts.cone_mode, &reapply_opts.sparse_index))
+ if (update_modes(repo, &reapply_opts.cone_mode, &reapply_opts.sparse_index))
return 1;
- return update_working_directory(NULL);
+ return update_working_directory(repo, NULL);
+}
+
+static char const * const builtin_sparse_checkout_clean_usage[] = {
+ "git sparse-checkout clean [-n|--dry-run]",
+ NULL
+};
+
+static int list_file_iterator(const char *path, const void *data)
+{
+ const char *msg = data;
+
+ printf(msg, path);
+ return 0;
+}
+
+static void list_every_file_in_dir(const char *msg,
+ const char *directory)
+{
+ struct strbuf path = STRBUF_INIT;
+
+ strbuf_addstr(&path, directory);
+ for_each_file_in_dir(&path, list_file_iterator, msg);
+ strbuf_release(&path);
+}
+
+static const char *msg_remove = N_("Removing %s\n");
+static const char *msg_would_remove = N_("Would remove %s\n");
+
+static int sparse_checkout_clean(int argc, const char **argv,
+ const char *prefix,
+ struct repository *repo)
+{
+ struct strbuf full_path = STRBUF_INIT;
+ const char *msg = msg_remove;
+ size_t worktree_len;
+ int force = 0, dry_run = 0, verbose = 0;
+ int require_force = 1;
+
+ struct option builtin_sparse_checkout_clean_options[] = {
+ OPT__DRY_RUN(&dry_run, N_("dry run")),
+ OPT__FORCE(&force, N_("force"), PARSE_OPT_NOCOMPLETE),
+ OPT__VERBOSE(&verbose, N_("report each affected file, not just directories")),
+ OPT_END(),
+ };
+
+ setup_work_tree();
+ if (!core_apply_sparse_checkout)
+ die(_("must be in a sparse-checkout to clean directories"));
+ if (!core_sparse_checkout_cone)
+ die(_("must be in a cone-mode sparse-checkout to clean directories"));
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_sparse_checkout_clean_options,
+ builtin_sparse_checkout_clean_usage, 0);
+
+ repo_config_get_bool(repo, "clean.requireforce", &require_force);
+ if (require_force && !force && !dry_run)
+ die(_("for safety, refusing to clean without one of --force or --dry-run"));
+
+ if (dry_run)
+ msg = msg_would_remove;
+
+ if (repo_read_index(repo) < 0)
+ die(_("failed to read index"));
+
+ if (convert_to_sparse(repo->index, SPARSE_INDEX_MEMORY_ONLY) ||
+ repo->index->sparse_index == INDEX_EXPANDED)
+ die(_("failed to convert index to a sparse index; resolve merge conflicts and try again"));
+
+ strbuf_addstr(&full_path, repo->worktree);
+ strbuf_addch(&full_path, '/');
+ worktree_len = full_path.len;
+
+ for (size_t i = 0; i < repo->index->cache_nr; i++) {
+ struct cache_entry *ce = repo->index->cache[i];
+ if (!S_ISSPARSEDIR(ce->ce_mode))
+ continue;
+ strbuf_setlen(&full_path, worktree_len);
+ strbuf_add(&full_path, ce->name, ce->ce_namelen);
+
+ if (!is_directory(full_path.buf))
+ continue;
+
+ if (verbose)
+ list_every_file_in_dir(msg, ce->name);
+ else
+ printf(msg, ce->name);
+
+ if (dry_run <= 0 &&
+ remove_dir_recursively(&full_path, 0))
+ warning_errno(_("failed to remove '%s'"), ce->name);
+ }
+
+ strbuf_release(&full_path);
+ return 0;
}
static char const * const builtin_sparse_checkout_disable_usage[] = {
@@ -927,7 +1027,7 @@ static char const * const builtin_sparse_checkout_disable_usage[] = {
static int sparse_checkout_disable(int argc, const char **argv,
const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
static struct option builtin_sparse_checkout_disable_options[] = {
OPT_END(),
@@ -955,7 +1055,7 @@ static int sparse_checkout_disable(int argc, const char **argv,
* are expecting to do that when disabling sparse-checkout.
*/
give_advice_on_expansion = 0;
- repo_read_index(the_repository);
+ repo_read_index(repo);
memset(&pl, 0, sizeof(pl));
hashmap_init(&pl.recursive_hashmap, pl_hashmap_cmp, NULL, 0);
@@ -966,13 +1066,13 @@ static int sparse_checkout_disable(int argc, const char **argv,
add_pattern("/*", empty_base, 0, &pl, 0);
prepare_repo_settings(the_repository);
- the_repository->settings.sparse_index = 0;
+ repo->settings.sparse_index = 0;
- if (update_working_directory(&pl))
+ if (update_working_directory(repo, &pl))
die(_("error while refreshing working directory"));
clear_pattern_list(&pl);
- return set_config(MODE_NO_PATTERNS);
+ return set_config(repo, MODE_NO_PATTERNS);
}
static char const * const builtin_sparse_checkout_check_rules_usage[] = {
@@ -987,14 +1087,17 @@ static struct sparse_checkout_check_rules_opts {
char *rules_file;
} check_rules_opts;
-static int check_rules(struct pattern_list *pl, int null_terminated) {
+static int check_rules(struct repository *repo,
+ struct pattern_list *pl,
+ int null_terminated)
+{
struct strbuf line = STRBUF_INIT;
struct strbuf unquoted = STRBUF_INIT;
char *path;
int line_terminator = null_terminated ? 0 : '\n';
strbuf_getline_fn getline_fn = null_terminated ? strbuf_getline_nul
: strbuf_getline;
- the_repository->index->sparse_checkout_patterns = pl;
+ repo->index->sparse_checkout_patterns = pl;
while (!getline_fn(&line, stdin)) {
path = line.buf;
if (!null_terminated && line.buf[0] == '"') {
@@ -1006,7 +1109,7 @@ static int check_rules(struct pattern_list *pl, int null_terminated) {
path = unquoted.buf;
}
- if (path_in_sparse_checkout(path, the_repository->index))
+ if (path_in_sparse_checkout(path, repo->index))
write_name_quoted(path, stdout, line_terminator);
}
strbuf_release(&line);
@@ -1016,7 +1119,7 @@ static int check_rules(struct pattern_list *pl, int null_terminated) {
}
static int sparse_checkout_check_rules(int argc, const char **argv, const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
static struct option builtin_sparse_checkout_check_rules_options[] = {
OPT_BOOL('z', NULL, &check_rules_opts.null_termination,
@@ -1055,7 +1158,7 @@ static int sparse_checkout_check_rules(int argc, const char **argv, const char *
free(sparse_filename);
}
- ret = check_rules(&pl, check_rules_opts.null_termination);
+ ret = check_rules(repo, &pl, check_rules_opts.null_termination);
clear_pattern_list(&pl);
free(check_rules_opts.rules_file);
return ret;
@@ -1073,6 +1176,7 @@ int cmd_sparse_checkout(int argc,
OPT_SUBCOMMAND("set", &fn, sparse_checkout_set),
OPT_SUBCOMMAND("add", &fn, sparse_checkout_add),
OPT_SUBCOMMAND("reapply", &fn, sparse_checkout_reapply),
+ OPT_SUBCOMMAND("clean", &fn, sparse_checkout_clean),
OPT_SUBCOMMAND("disable", &fn, sparse_checkout_disable),
OPT_SUBCOMMAND("check-rules", &fn, sparse_checkout_check_rules),
OPT_END(),
@@ -1084,8 +1188,8 @@ int cmd_sparse_checkout(int argc,
repo_config(the_repository, git_default_config, NULL);
- prepare_repo_settings(the_repository);
- the_repository->settings.command_requires_full_index = 0;
+ prepare_repo_settings(repo);
+ repo->settings.command_requires_full_index = 0;
return fn(argc, argv, prefix, repo);
}