summaryrefslogtreecommitdiff
path: root/builtin/pack-objects.c
diff options
context:
space:
mode:
Diffstat (limited to 'builtin/pack-objects.c')
-rw-r--r--builtin/pack-objects.c143
1 files changed, 93 insertions, 50 deletions
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index e9d3cfb9e3..4ce6a93281 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -1,5 +1,6 @@
#include "builtin.h"
#include "cache.h"
+#include "repository.h"
#include "config.h"
#include "attr.h"
#include "object.h"
@@ -28,6 +29,8 @@
#include "argv-array.h"
#include "list.h"
#include "packfile.h"
+#include "object-store.h"
+#include "dir.h"
static const char *pack_usage[] = {
N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
@@ -43,7 +46,7 @@ static const char *pack_usage[] = {
static struct packing_data to_pack;
static struct pack_idx_entry **written_list;
-static uint32_t nr_result, nr_written;
+static uint32_t nr_result, nr_written, nr_seen;
static int non_empty;
static int reuse_delta = 1, reuse_object = 1;
@@ -53,7 +56,8 @@ static int pack_loose_unreachable;
static int local;
static int have_non_local_packs;
static int incremental;
-static int ignore_packed_keep;
+static int ignore_packed_keep_on_disk;
+static int ignore_packed_keep_in_core;
static int allow_ofs_delta;
static struct pack_idx_option pack_idx_opts;
static const char *base_name;
@@ -78,7 +82,7 @@ static uint16_t write_bitmap_options;
static int exclude_promisor_objects;
static unsigned long delta_cache_size = 0;
-static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
static unsigned long cache_max_small_delta_size = 1000;
static unsigned long window_memory_limit = 0;
@@ -122,11 +126,10 @@ static void *get_delta(struct object_entry *entry)
void *buf, *base_buf, *delta_buf;
enum object_type type;
- buf = read_sha1_file(entry->idx.oid.hash, &type, &size);
+ buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
die("unable to read %s", oid_to_hex(&entry->idx.oid));
- base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type,
- &base_size);
+ base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size);
if (!base_buf)
die("unable to read %s",
oid_to_hex(&entry->delta->idx.oid));
@@ -267,11 +270,10 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
if (!usable_delta) {
if (entry->type == OBJ_BLOB &&
entry->size > big_file_threshold &&
- (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL)
+ (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
buf = NULL;
else {
- buf = read_sha1_file(entry->idx.oid.hash, &type,
- &size);
+ buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
die(_("unable to read %s"),
oid_to_hex(&entry->idx.oid));
@@ -837,11 +839,11 @@ static void write_pack_file(void)
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- hashclose(f, oid.hash, CSUM_CLOSE);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- hashclose(f, oid.hash, CSUM_FSYNC);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = hashclose(f, oid.hash, 0);
+ int fd = finalize_hashfile(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
@@ -982,13 +984,16 @@ static int want_found_object(int exclude, struct packed_git *p)
* Otherwise, we signal "-1" at the end to tell the caller that we do
* not know either way, and it needs to check more packs.
*/
- if (!ignore_packed_keep &&
+ if (!ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
(!local || !have_non_local_packs))
return 1;
if (local && !p->pack_local)
return 0;
- if (ignore_packed_keep && p->pack_local && p->pack_keep)
+ if (p->pack_local &&
+ ((ignore_packed_keep_on_disk && p->pack_keep) ||
+ (ignore_packed_keep_in_core && p->pack_keep_in_core)))
return 0;
/* we don't know yet; keep looking for more packs */
@@ -1025,8 +1030,7 @@ static int want_object_in_pack(const struct object_id *oid,
if (want != -1)
return want;
}
-
- list_for_each(pos, &packed_git_mru) {
+ list_for_each(pos, get_packed_git_mru(the_repository)) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
off_t offset;
@@ -1044,7 +1048,8 @@ static int want_object_in_pack(const struct object_id *oid,
}
want = want_found_object(exclude, p);
if (!exclude && want > 0)
- list_move(&p->mru, &packed_git_mru);
+ list_move(&p->mru,
+ get_packed_git_mru(the_repository));
if (want != -1)
return want;
}
@@ -1091,6 +1096,8 @@ static int add_object_entry(const struct object_id *oid, enum object_type type,
off_t found_offset = 0;
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, exclude, &index_pos))
return 0;
@@ -1106,8 +1113,6 @@ static int add_object_entry(const struct object_id *oid, enum object_type type,
create_object_entry(oid, type, pack_name_hash(name),
exclude, name && no_try_delta(name),
index_pos, found_pack, found_offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
@@ -1118,6 +1123,8 @@ static int add_object_entry_from_bitmap(const struct object_id *oid,
{
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, 0, &index_pos))
return 0;
@@ -1125,8 +1132,6 @@ static int add_object_entry_from_bitmap(const struct object_id *oid,
return 0;
create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
@@ -1190,7 +1195,7 @@ static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
/* Did not find one. Either we got a bogus request or
* we need to read and perhaps cache.
*/
- data = read_sha1_file(oid->hash, &type, &size);
+ data = read_object_file(oid, &type, &size);
if (!data)
return NULL;
if (type != OBJ_TREE) {
@@ -1351,7 +1356,7 @@ static void add_preferred_base(struct object_id *oid)
if (window <= num_preferred_base++)
return;
- data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash);
+ data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
if (!data)
return;
@@ -1516,7 +1521,8 @@ static void check_object(struct object_entry *entry)
unuse_pack(&w_curs);
}
- entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size);
+ entry->type = oid_object_info(the_repository, &entry->idx.oid,
+ &entry->size);
/*
* The error condition is checked in prepare_pack(). This is
* to permit a missing preferred base object to be ignored
@@ -1571,15 +1577,16 @@ static void drop_reused_delta(struct object_entry *entry)
oi.sizep = &entry->size;
oi.typep = &entry->type;
- if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
+ if (packed_object_info(the_repository, entry->in_pack,
+ entry->in_pack_offset, &oi) < 0) {
/*
* We failed to get the info from this pack for some reason;
* fall back to sha1_object_info, which may find another copy.
* And if that fails, the error will be recorded in entry->type
* and dealt with in prepare_pack().
*/
- entry->type = sha1_object_info(entry->idx.oid.hash,
- &entry->size);
+ entry->type = oid_object_info(the_repository, &entry->idx.oid,
+ &entry->size);
}
}
@@ -1712,6 +1719,10 @@ static void get_object_details(void)
uint32_t i;
struct object_entry **sorted_by_offset;
+ if (progress)
+ progress_state = start_progress(_("Counting objects"),
+ to_pack.nr_objects);
+
sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
for (i = 0; i < to_pack.nr_objects; i++)
sorted_by_offset[i] = to_pack.objects + i;
@@ -1722,7 +1733,9 @@ static void get_object_details(void)
check_object(entry);
if (big_file_threshold < entry->size)
entry->no_try_delta = 1;
+ display_progress(progress_state, i + 1);
}
+ stop_progress(&progress_state);
/*
* This must happen in a second pass, since we rely on the delta
@@ -1871,8 +1884,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
/* Load data if not already done */
if (!trg->data) {
read_lock();
- trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type,
- &sz);
+ trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
read_unlock();
if (!trg->data)
die("object %s cannot be read",
@@ -1885,8 +1897,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
}
if (!src->data) {
read_lock();
- src->data = read_sha1_file(src_entry->idx.oid.hash, &type,
- &sz);
+ src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
read_unlock();
if (!src->data) {
if (src_entry->preferred_base) {
@@ -2674,11 +2685,11 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
memset(&in_pack, 0, sizeof(in_pack));
- for (p = packed_git; p; p = p->next) {
+ for (p = get_packed_git(the_repository); p; p = p->next) {
struct object_id oid;
struct object *o;
- if (!p->pack_local || p->pack_keep)
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
die("cannot open pack index");
@@ -2709,7 +2720,7 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
static int add_loose_object(const struct object_id *oid, const char *path,
void *data)
{
- enum object_type type = sha1_object_info(oid->hash, NULL);
+ enum object_type type = oid_object_info(the_repository, oid, NULL);
if (type < 0) {
warning("loose object at %s could not be examined", path);
@@ -2737,16 +2748,18 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
static struct packed_git *last_found = (void *)1;
struct packed_git *p;
- p = (last_found != (void *)1) ? last_found : packed_git;
+ p = (last_found != (void *)1) ? last_found :
+ get_packed_git(the_repository);
while (p) {
- if ((!p->pack_local || p->pack_keep) &&
+ if ((!p->pack_local || p->pack_keep ||
+ p->pack_keep_in_core) &&
find_pack_entry_one(oid->hash, p)) {
last_found = p;
return 1;
}
if (p == last_found)
- p = packed_git;
+ p = get_packed_git(the_repository);
else
p = p->next;
if (p == last_found)
@@ -2782,8 +2795,8 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
uint32_t i;
struct object_id oid;
- for (p = packed_git; p; p = p->next) {
- if (!p->pack_local || p->pack_keep)
+ for (p = get_packed_git(the_repository); p; p = p->next) {
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
@@ -2809,7 +2822,8 @@ static int pack_options_allow_reuse(void)
{
return pack_to_stdout &&
allow_ofs_delta &&
- !ignore_packed_keep &&
+ !ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
(!local || !have_non_local_packs) &&
!incremental;
}
@@ -2918,6 +2932,32 @@ static void get_object_list(int ac, const char **av)
oid_array_clear(&recent_objects);
}
+static void add_extra_kept_packs(const struct string_list *names)
+{
+ struct packed_git *p;
+
+ if (!names->nr)
+ return;
+
+ for (p = get_packed_git(the_repository); p; p = p->next) {
+ const char *name = basename(p->pack_name);
+ int i;
+
+ if (!p->pack_local)
+ continue;
+
+ for (i = 0; i < names->nr; i++)
+ if (!fspathcmp(name, names->items[i].string))
+ break;
+
+ if (i < names->nr) {
+ p->pack_keep_in_core = 1;
+ ignore_packed_keep_in_core = 1;
+ continue;
+ }
+ }
+}
+
static int option_parse_index_version(const struct option *opt,
const char *arg, int unset)
{
@@ -2957,6 +2997,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
struct argv_array rp = ARGV_ARRAY_INIT;
int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
int rev_list_index = 0;
+ struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
N_("do not show progress meter"), 0),
@@ -3021,8 +3062,10 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
N_("create thin packs")),
OPT_BOOL(0, "shallow", &shallow,
N_("create packs suitable for shallow fetches")),
- OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
+ OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
N_("ignore packs that have companion .keep file")),
+ OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
+ N_("ignore this pack")),
OPT_INTEGER(0, "compression", &pack_compression_level,
N_("pack compression level")),
OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
@@ -3150,23 +3193,23 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (progress && all_progress_implied)
progress = 2;
- prepare_packed_git();
- if (ignore_packed_keep) {
+ add_extra_kept_packs(&keep_pack_list);
+ if (ignore_packed_keep_on_disk) {
struct packed_git *p;
- for (p = packed_git; p; p = p->next)
+ for (p = get_packed_git(the_repository); p; p = p->next)
if (p->pack_local && p->pack_keep)
break;
if (!p) /* no keep-able packs found */
- ignore_packed_keep = 0;
+ ignore_packed_keep_on_disk = 0;
}
if (local) {
/*
- * unlike ignore_packed_keep above, we do not want to
- * unset "local" based on looking at packs, as it
- * also covers non-local objects
+ * unlike ignore_packed_keep_on_disk above, we do not
+ * want to unset "local" based on looking at packs, as
+ * it also covers non-local objects
*/
struct packed_git *p;
- for (p = packed_git; p; p = p->next) {
+ for (p = get_packed_git(the_repository); p; p = p->next) {
if (!p->pack_local) {
have_non_local_packs = 1;
break;
@@ -3175,7 +3218,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
}
if (progress)
- progress_state = start_progress(_("Counting objects"), 0);
+ progress_state = start_progress(_("Enumerating objects"), 0);
if (!use_internal_rev_list)
read_object_list_from_stdin();
else {