diff options
Diffstat (limited to 'cache-tree.c')
| -rw-r--r-- | cache-tree.c | 201 |
1 files changed, 162 insertions, 39 deletions
diff --git a/cache-tree.c b/cache-tree.c index a537a806c1..65ca993361 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -6,6 +6,7 @@ #include "object-store.h" #include "replace-object.h" #include "promisor-remote.h" +#include "sparse-index.h" #ifndef DEBUG_CACHE_TREE #define DEBUG_CACHE_TREE 0 @@ -45,7 +46,7 @@ static int subtree_name_cmp(const char *one, int onelen, return memcmp(one, two, onelen); } -static int subtree_pos(struct cache_tree *it, const char *path, int pathlen) +int cache_tree_subtree_pos(struct cache_tree *it, const char *path, int pathlen) { struct cache_tree_sub **down = it->down; int lo, hi; @@ -72,7 +73,7 @@ static struct cache_tree_sub *find_subtree(struct cache_tree *it, int create) { struct cache_tree_sub *down; - int pos = subtree_pos(it, path, pathlen); + int pos = cache_tree_subtree_pos(it, path, pathlen); if (0 <= pos) return it->down[pos]; if (!create) @@ -123,7 +124,7 @@ static int do_invalidate_path(struct cache_tree *it, const char *path) it->entry_count = -1; if (!*slash) { int pos; - pos = subtree_pos(it, path, namelen); + pos = cache_tree_subtree_pos(it, path, namelen); if (0 <= pos) { cache_tree_free(&it->down[pos]->cache_tree); free(it->down[pos]); @@ -151,16 +152,15 @@ void cache_tree_invalidate_path(struct index_state *istate, const char *path) istate->cache_changed |= CACHE_TREE_CHANGED; } -static int verify_cache(struct cache_entry **cache, - int entries, int flags) +static int verify_cache(struct index_state *istate, int flags) { - int i, funny; + unsigned i, funny; int silent = flags & WRITE_TREE_SILENT; /* Verify that the tree is merged */ funny = 0; - for (i = 0; i < entries; i++) { - const struct cache_entry *ce = cache[i]; + for (i = 0; i < istate->cache_nr; i++) { + const struct cache_entry *ce = istate->cache[i]; if (ce_stage(ce)) { if (silent) return -1; @@ -180,17 +180,19 @@ static int verify_cache(struct cache_entry **cache, * stage 0 entries. */ funny = 0; - for (i = 0; i < entries - 1; i++) { + for (i = 0; i + 1 < istate->cache_nr; i++) { /* path/file always comes after path because of the way * the cache is sorted. Also path can appear only once, * which means conflicting one would immediately follow. */ - const char *this_name = cache[i]->name; - const char *next_name = cache[i+1]->name; - int this_len = strlen(this_name); - if (this_len < strlen(next_name) && - strncmp(this_name, next_name, this_len) == 0 && - next_name[this_len] == '/') { + const struct cache_entry *this_ce = istate->cache[i]; + const struct cache_entry *next_ce = istate->cache[i + 1]; + const char *this_name = this_ce->name; + const char *next_name = next_ce->name; + int this_len = ce_namelen(this_ce); + if (this_len < ce_namelen(next_ce) && + next_name[this_len] == '/' && + strncmp(this_name, next_name, this_len) == 0) { if (10 < ++funny) { fprintf(stderr, "...\n"); break; @@ -235,6 +237,11 @@ int cache_tree_fully_valid(struct cache_tree *it) return 1; } +static int must_check_existence(const struct cache_entry *ce) +{ + return !(has_promisor_remote() && ce_skip_worktree(ce)); +} + static int update_one(struct cache_tree *it, struct cache_entry **cache, int entries, @@ -254,6 +261,24 @@ static int update_one(struct cache_tree *it, *skip_count = 0; + /* + * If the first entry of this region is a sparse directory + * entry corresponding exactly to 'base', then this cache_tree + * struct is a "leaf" in the data structure, pointing to the + * tree OID specified in the entry. + */ + if (entries > 0) { + const struct cache_entry *ce = cache[0]; + + if (S_ISSPARSEDIR(ce->ce_mode) && + ce->ce_namelen == baselen && + !strncmp(ce->name, base, baselen)) { + it->entry_count = 1; + oidcpy(&it->oid, &ce->oid); + return 1; + } + } + if (0 <= it->entry_count && has_object_file(&it->oid)) return it->entry_count; @@ -358,8 +383,7 @@ static int update_one(struct cache_tree *it, } ce_missing_ok = mode == S_IFGITLINK || missing_ok || - (has_promisor_remote() && - ce_skip_worktree(ce)); + !must_check_existence(ce); if (is_null_oid(oid) || (!ce_missing_ok && !has_object_file(oid))) { strbuf_release(&buffer); @@ -416,8 +440,9 @@ static int update_one(struct cache_tree *it, } else if (dryrun) { hash_object_file(the_hash_algo, buffer.buf, buffer.len, tree_type, &it->oid); - } else if (write_object_file(buffer.buf, buffer.len, tree_type, - &it->oid)) { + } else if (write_object_file_flags(buffer.buf, buffer.len, tree_type, + &it->oid, flags & WRITE_TREE_SILENT + ? HASH_SILENT : 0)) { strbuf_release(&buffer); return -1; } @@ -434,15 +459,24 @@ static int update_one(struct cache_tree *it, int cache_tree_update(struct index_state *istate, int flags) { - struct cache_tree *it = istate->cache_tree; - struct cache_entry **cache = istate->cache; - int entries = istate->cache_nr; - int skip, i = verify_cache(cache, entries, flags); + int skip, i; + + i = verify_cache(istate, flags); if (i) return i; + + if (!istate->cache_tree) + istate->cache_tree = cache_tree(); + + if (!(flags & WRITE_TREE_MISSING_OK) && has_promisor_remote()) + prefetch_cache_entries(istate, must_check_existence); + trace_performance_enter(); - i = update_one(it, cache, entries, "", 0, &skip, flags); + trace2_region_enter("cache_tree", "update", the_repository); + i = update_one(istate->cache_tree, istate->cache, istate->cache_nr, + "", 0, &skip, flags); + trace2_region_leave("cache_tree", "update", the_repository); trace_performance_leave("cache_tree_update"); if (i < 0) return i; @@ -492,7 +526,9 @@ static void write_one(struct strbuf *buffer, struct cache_tree *it, void cache_tree_write(struct strbuf *sb, struct cache_tree *root) { + trace2_region_enter("cache_tree", "write", the_repository); write_one(sb, root, "", 0); + trace2_region_leave("cache_tree", "write", the_repository); } static struct cache_tree *read_one(const char **buffer, unsigned long *size_p) @@ -555,7 +591,7 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p) * hence +2. */ it->subtree_alloc = subtree_nr + 2; - it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *)); + CALLOC_ARRAY(it->down, it->subtree_alloc); for (i = 0; i < subtree_nr; i++) { /* read each subtree */ struct cache_tree *sub; @@ -581,9 +617,16 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p) struct cache_tree *cache_tree_read(const char *buffer, unsigned long size) { + struct cache_tree *result; + if (buffer[0]) return NULL; /* not the whole tree */ - return read_one(&buffer, &size); + + trace2_region_enter("cache_tree", "read", the_repository); + result = read_one(&buffer, &size); + trace2_region_leave("cache_tree", "read", the_repository); + + return result; } static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path) @@ -622,9 +665,6 @@ static int write_index_as_tree_internal(struct object_id *oid, cache_tree_valid = 0; } - if (!index_state->cache_tree) - index_state->cache_tree = cache_tree(); - if (!cache_tree_valid && cache_tree_update(index_state, flags) < 0) return WRITE_TREE_UNMERGED_INDEX; @@ -701,15 +741,26 @@ out: return ret; } +static void prime_cache_tree_sparse_dir(struct cache_tree *it, + struct tree *tree) +{ + + oidcpy(&it->oid, &tree->object.oid); + it->entry_count = 1; +} + static void prime_cache_tree_rec(struct repository *r, struct cache_tree *it, - struct tree *tree) + struct tree *tree, + struct strbuf *tree_path) { struct tree_desc desc; struct name_entry entry; int cnt; + int base_path_len = tree_path->len; oidcpy(&it->oid, &tree->object.oid); + init_tree_desc(&desc, tree->buffer, tree->size); cnt = 0; while (tree_entry(&desc, &entry)) { @@ -718,14 +769,40 @@ static void prime_cache_tree_rec(struct repository *r, else { struct cache_tree_sub *sub; struct tree *subtree = lookup_tree(r, &entry.oid); + if (!subtree->object.parsed) parse_tree(subtree); sub = cache_tree_sub(it, entry.path); sub->cache_tree = cache_tree(); - prime_cache_tree_rec(r, sub->cache_tree, subtree); + + /* + * Recursively-constructed subtree path is only needed when working + * in a sparse index (where it's used to determine whether the + * subtree is a sparse directory in the index). + */ + if (r->index->sparse_index) { + strbuf_setlen(tree_path, base_path_len); + strbuf_grow(tree_path, base_path_len + entry.pathlen + 1); + strbuf_add(tree_path, entry.path, entry.pathlen); + strbuf_addch(tree_path, '/'); + } + + /* + * If a sparse index is in use, the directory being processed may be + * sparse. To confirm that, we can check whether an entry with that + * exact name exists in the index. If it does, the created subtree + * should be sparse. Otherwise, cache tree expansion should continue + * as normal. + */ + if (r->index->sparse_index && + index_entry_exists(r->index, tree_path->buf, tree_path->len)) + prime_cache_tree_sparse_dir(sub->cache_tree, subtree); + else + prime_cache_tree_rec(r, sub->cache_tree, subtree, tree_path); cnt += sub->cache_tree->entry_count; } } + it->entry_count = cnt; } @@ -733,10 +810,16 @@ void prime_cache_tree(struct repository *r, struct index_state *istate, struct tree *tree) { + struct strbuf tree_path = STRBUF_INIT; + + trace2_region_enter("cache-tree", "prime_cache_tree", the_repository); cache_tree_free(&istate->cache_tree); istate->cache_tree = cache_tree(); - prime_cache_tree_rec(r, istate->cache_tree, tree); + + prime_cache_tree_rec(r, istate->cache_tree, tree, &tree_path); + strbuf_release(&tree_path); istate->cache_changed |= CACHE_TREE_CHANGED; + trace2_region_leave("cache-tree", "prime_cache_tree", the_repository); } /* @@ -771,10 +854,30 @@ int cache_tree_matches_traversal(struct cache_tree *root, return 0; } -static void verify_one(struct repository *r, - struct index_state *istate, - struct cache_tree *it, - struct strbuf *path) +static void verify_one_sparse(struct repository *r, + struct index_state *istate, + struct cache_tree *it, + struct strbuf *path, + int pos) +{ + struct cache_entry *ce = istate->cache[pos]; + + if (!S_ISSPARSEDIR(ce->ce_mode)) + BUG("directory '%s' is present in index, but not sparse", + path->buf); +} + +/* + * Returns: + * 0 - Verification completed. + * 1 - Restart verification - a call to ensure_full_index() freed the cache + * tree that is being verified and verification needs to be restarted from + * the new toplevel cache tree. + */ +static int verify_one(struct repository *r, + struct index_state *istate, + struct cache_tree *it, + struct strbuf *path) { int i, pos, len = path->len; struct strbuf tree_buf = STRBUF_INIT; @@ -782,17 +885,32 @@ static void verify_one(struct repository *r, for (i = 0; i < it->subtree_nr; i++) { strbuf_addf(path, "%s/", it->down[i]->name); - verify_one(r, istate, it->down[i]->cache_tree, path); + if (verify_one(r, istate, it->down[i]->cache_tree, path)) + return 1; strbuf_setlen(path, len); } if (it->entry_count < 0 || /* no verification on tests (t7003) that replace trees */ lookup_replace_object(r, &it->oid) != &it->oid) - return; + return 0; if (path->len) { + /* + * If the index is sparse and the cache tree is not + * index_name_pos() may trigger ensure_full_index() which will + * free the tree that is being verified. + */ + int is_sparse = istate->sparse_index; pos = index_name_pos(istate, path->buf, path->len); + if (is_sparse && !istate->sparse_index) + return 1; + + if (pos >= 0) { + verify_one_sparse(r, istate, it, path, pos); + return 0; + } + pos = -pos - 1; } else { pos = 0; @@ -838,6 +956,7 @@ static void verify_one(struct repository *r, oid_to_hex(&new_oid), oid_to_hex(&it->oid)); strbuf_setlen(path, len); strbuf_release(&tree_buf); + return 0; } void cache_tree_verify(struct repository *r, struct index_state *istate) @@ -846,6 +965,10 @@ void cache_tree_verify(struct repository *r, struct index_state *istate) if (!istate->cache_tree) return; - verify_one(r, istate, istate->cache_tree, &path); + if (verify_one(r, istate, istate->cache_tree, &path)) { + strbuf_reset(&path); + if (verify_one(r, istate, istate->cache_tree, &path)) + BUG("ensure_full_index() called twice while verifying cache tree"); + } strbuf_release(&path); } |
