diff options
327 files changed, 9810 insertions, 4157 deletions
diff --git a/.clang-format b/.clang-format index 611ab4750b..12a89f95f9 100644 --- a/.clang-format +++ b/.clang-format @@ -163,7 +163,7 @@ PenaltyBreakComment: 10 PenaltyBreakFirstLessLess: 0 PenaltyBreakString: 10 PenaltyExcessCharacter: 100 -PenaltyReturnTypeOnItsOwnLine: 5 +PenaltyReturnTypeOnItsOwnLine: 60 # Don't sort #include's SortIncludes: false diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines index c4cb5ff0d4..48aa4edfbd 100644 --- a/Documentation/CodingGuidelines +++ b/Documentation/CodingGuidelines @@ -386,6 +386,11 @@ For C programs: - Use Git's gettext wrappers to make the user interface translatable. See "Marking strings for translation" in po/README. + - Variables and functions local to a given source file should be marked + with "static". Variables that are visible to other source files + must be declared with "extern" in header files. However, function + declarations should not use "extern", as that is already the default. + For Perl programs: - Most of the C guidelines above apply. diff --git a/Documentation/Makefile b/Documentation/Makefile index 4ae9ba5c86..6232143cb9 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -72,6 +72,7 @@ TECH_DOCS += SubmittingPatches TECH_DOCS += technical/hash-function-transition TECH_DOCS += technical/http-protocol TECH_DOCS += technical/index-format +TECH_DOCS += technical/long-running-process-protocol TECH_DOCS += technical/pack-format TECH_DOCS += technical/pack-heuristics TECH_DOCS += technical/pack-protocol diff --git a/Documentation/RelNotes/2.17.0.txt b/Documentation/RelNotes/2.17.0.txt new file mode 100644 index 0000000000..d6db0e19cf --- /dev/null +++ b/Documentation/RelNotes/2.17.0.txt @@ -0,0 +1,398 @@ +Git 2.17 Release Notes +====================== + +Updates since v2.16 +------------------- + +UI, Workflows & Features + + * "diff" family of commands learned "--find-object=<object-id>" option + to limit the findings to changes that involve the named object. + + * "git format-patch" learned to give 72-cols to diffstat, which is + consistent with other line length limits the subcommand uses for + its output meant for e-mails. + + * The log from "git daemon" can be redirected with a new option; one + relevant use case is to send the log to standard error (instead of + syslog) when running it from inetd. + + * "git rebase" learned to take "--allow-empty-message" option. + + * "git am" has learned the "--quit" option, in addition to the + existing "--abort" option; having the pair mirrors a few other + commands like "rebase" and "cherry-pick". + + * "git worktree add" learned to run the post-checkout hook, just like + "git clone" runs it upon the initial checkout. + + * "git tag" learned an explicit "--edit" option that allows the + message given via "-m" and "-F" to be further edited. + + * "git fetch --prune-tags" may be used as a handy short-hand for + getting rid of stale tags that are locally held. + + * The new "--show-current-patch" option gives an end-user facing way + to get the diff being applied when "git rebase" (and "git am") + stops with a conflict. + + * "git add -p" used to offer "/" (look for a matching hunk) as a + choice, even there was only one hunk, which has been corrected. + Also the single-key help is now given only for keys that are + enabled (e.g. help for '/' won't be shown when there is only one + hunk). + + * Since Git 1.7.9, "git merge" defaulted to --no-ff (i.e. even when + the side branch being merged is a descendant of the current commit, + create a merge commit instead of fast-forwarding) when merging a + tag object. This was appropriate default for integrators who pull + signed tags from their downstream contributors, but caused an + unnecessary merges when used by downstream contributors who + habitually "catch up" their topic branches with tagged releases + from the upstream. Update "git merge" to default to --no-ff only + when merging a tag object that does *not* sit at its usual place in + refs/tags/ hierarchy, and allow fast-forwarding otherwise, to + mitigate the problem. + + * "git status" can spend a lot of cycles to compute the relation + between the current branch and its upstream, which can now be + disabled with "--no-ahead-behind" option. + + * "git diff" and friends learned funcname patterns for Go language + source files. + + * "git send-email" learned "--reply-to=<address>" option. + + * Funcname pattern used for C# now recognizes "async" keyword. + + * In a way similar to how "git tag" learned to honor the pager + setting only in the list mode, "git config" learned to ignore the + pager setting when it is used for setting values (i.e. when the + purpose of the operation is not to "show"). + + +Performance, Internal Implementation, Development Support etc. + + * More perf tests for threaded grep + + * "perf" test output can be sent to codespeed server. + + * The build procedure for perl/ part has been greatly simplified by + weaning ourselves off of MakeMaker. + + * Perl 5.8 or greater has been required since Git 1.7.4 released in + 2010, but we continued to assume some core modules may not exist and + used a conditional "eval { require <<module>> }"; we no longer do + this. Some platforms (Fedora/RedHat/CentOS, for example) ship Perl + without all core modules by default (e.g. Digest::MD5, File::Temp, + File::Spec, Net::Domain, Net::SMTP). Users on such platforms may + need to install these additional modules. + + * As a convenience, we install copies of Perl modules we require which + are not part of the core Perl distribution (e.g. Error and + Mail::Address). Users and packagers whose operating system provides + these modules can set NO_PERL_CPAN_FALLBACKS to avoid installing the + bundled modules. + + * In preparation for implementing narrow/partial clone, the machinery + for checking object connectivity used by gc and fsck has been + taught that a missing object is OK when it is referenced by a + packfile specially marked as coming from trusted repository that + promises to make them available on-demand and lazily. + + * The machinery to clone & fetch, which in turn involves packing and + unpacking objects, has been told how to omit certain objects using + the filtering mechanism introduced by another topic. It now knows + to mark the resulting pack as a promisor pack to tolerate missing + objects, laying foundation for "narrow" clones. + + * The first step to getting rid of mru API and using the + doubly-linked list API directly instead. + + * Retire mru API as it does not give enough abstraction over + underlying list API to be worth it. + + * Rewrite two more "git submodule" subcommands in C. + + * The tracing machinery learned to report tweaking of environment + variables as well. + + * Update Coccinelle rules to catch and optimize strbuf_addf(&buf, "%s", str) + + * Prevent "clang-format" from breaking line after function return type. + + * The sequencer infrastructure is shared across "git cherry-pick", + "git rebase -i", etc., and has always spawned "git commit" when it + needs to create a commit. It has been taught to do so internally, + when able, by reusing the codepath "git commit" itself uses, which + gives performance boost for a few tens of percents in some sample + scenarios. + + * Push the submodule version of collision-detecting SHA-1 hash + implementation a bit harder on builders. + + * Avoid mmapping small files while using packed refs (especially ones + with zero size, which would cause later munmap() to fail). + + * Conversion from uchar[20] to struct object_id continues. + + * More tests for wildmatch functions. + + * The code to binary search starting from a fan-out table (which is + how the packfile is indexed with object names) has been refactored + into a reusable helper. + + * We now avoid using identifiers that clash with C++ keywords. Even + though it is not a goal to compile Git with C++ compilers, changes + like this help use of code analysis tools that targets C++ on our + codebase. + + * The executable is now built in 'script' phase in Travis CI integration, + to follow the established practice, rather than during 'before_script' + phase. This allows the CI categorize the failures better ('failed' + is project's fault, 'errored' is build environment's). + (merge 3c93b82920 sg/travis-build-during-script-phase later to maint). + + * Writing out the index file when the only thing that changed in it + is the untracked cache information is often wasteful, and this has + been optimized out. + + * Various pieces of Perl code we have have been cleaned up. + + * Internal API clean-up to allow write_locked_index() optionally skip + writing the in-core index when it is not modified. + + +Also contains various documentation updates and code clean-ups. + + +Fixes since v2.16 +----------------- + + * An old regression in "git describe --all $annotated_tag^0" has been + fixed. + + * "git status" after moving a path in the working tree (hence making + it appear "removed") and then adding with the -N option (hence + making that appear "added") detected it as a rename, but did not + report the old and new pathnames correctly. + + * "git svn dcommit" did not take into account the fact that a + svn+ssh:// URL with a username@ (typically used for pushing) refers + to the same SVN repository without the username@ and failed when + svn.pushmergeinfo option is set. + + * API clean-up around revision traversal. + + * "git merge -Xours/-Xtheirs" learned to use our/their version when + resolving a conflicting updates to a symbolic link. + + * "git clone $there $here" is allowed even when here directory exists + as long as it is an empty directory, but the command incorrectly + removed it upon a failure of the operation. + + * "git commit --fixup" did not allow "-m<message>" option to be used + at the same time; allow it to annotate resulting commit with more + text. + + * When resetting the working tree files recursively, the working tree + of submodules are now also reset to match. + + * "git stash -- <pathspec>" incorrectly blew away untracked files in + the directory that matched the pathspec, which has been corrected. + + * Instead of maintaining home-grown email address parsing code, ship + a copy of reasonably recent Mail::Address to be used as a fallback + in 'git send-email' when the platform lacks it. + (merge d60be8acab mm/send-email-fallback-to-local-mail-address later to maint). + + * "git add -p" was taught to ignore local changes to submodules as + they do not interfere with the partial addition of regular changes + anyway. + + * Avoid showing a warning message in the middle of a line of "git + diff" output. + (merge 4e056c989f nd/diff-flush-before-warning later to maint). + + * The http tracing code, often used to debug connection issues, + learned to redact potentially sensitive information from its output + so that it can be more safely sharable. + (merge 8ba18e6fa4 jt/http-redact-cookies later to maint). + + * Crash fix for a corner case where an error codepath tried to unlock + what it did not acquire lock on. + (merge 81fcb698e0 mr/packed-ref-store-fix later to maint). + + * The split-index mode had a few corner case bugs fixed. + (merge ae59a4e44f tg/split-index-fixes later to maint). + + * Assorted fixes to "git daemon". + (merge ed15e58efe jk/daemon-fixes later to maint). + + * Completion of "git merge -s<strategy>" (in contrib/) did not work + well in non-C locale. + (merge 7cc763aaa3 nd/list-merge-strategy later to maint). + + * Workaround for segfault with more recent versions of SVN. + (merge 7f6f75e97a ew/svn-branch-segfault-fix later to maint). + + * Plug recently introduced leaks in fsck. + (merge ba3a08ca0e jt/fsck-code-cleanup later to maint). + + * "git pull --rebase" did not pass verbosity setting down when + recursing into a submodule. + (merge a56771a668 sb/pull-rebase-submodule later to maint). + + * The way "git reset --hard" reports the commit the updated HEAD + points at is made consistent with the way how the commit title is + generated by the other parts of the system. This matters when the + title is spread across physically multiple lines. + (merge 1cf823fb68 tg/reset-hard-show-head-with-pretty later to maint). + + * Test fixes. + (merge 63b1a175ee sg/test-i18ngrep later to maint). + + * Some bugs around "untracked cache" feature have been fixed. This + will notice corrupt data in the untracked cache left by old and + buggy code and issue a warning---the index can be fixed by clearing + the untracked cache from it. + (merge 0cacebf099 nd/fix-untracked-cache-invalidation later to maint). + (merge 7bf0be7501 ab/untracked-cache-invalidation-docs later to maint). + + * "git blame HEAD COPYING" in a bare repository failed to run, while + "git blame HEAD -- COPYING" run just fine. This has been corrected. + + * "git add" files in the same directory, but spelling the directory + path in different cases on case insensitive filesystem, corrupted + the name hash data structure and led to unexpected results. This + has been corrected. + (merge c95525e90d bp/name-hash-dirname-fix later to maint). + + * "git rebase -p" mangled log messages of a merge commit, which is + now fixed. + (merge ed5144d7eb js/fix-merge-arg-quoting-in-rebase-p later to maint). + + * Some low level protocol codepath could crash when they get an + unexpected flush packet, which is now fixed. + (merge bb1356dc64 js/packet-read-line-check-null later to maint). + + * "git check-ignore" with multiple paths got confused when one is a + file and the other is a directory, which has been fixed. + (merge d60771e930 rs/check-ignore-multi later to maint). + + * "git describe $garbage" stopped giving any errors when the garbage + happens to be a string with 40 hexadecimal letters. + (merge a8e7a2bf0f sb/describe-blob later to maint). + + * Code to unquote single-quoted string (used in the parser for + configuration files, etc.) did not diagnose bogus input correctly + and produced bogus results instead. + (merge ddbbf8eb25 jk/sq-dequote-on-bogus-input later to maint). + + * Many places in "git apply" knew that "/dev/null" that signals + "there is no such file on this side of the diff" can be followed by + whitespace and garbage when parsing a patch, except for one, which + made an otherwise valid patch (e.g. ones from subversion) rejected. + (merge e454ad4bec tk/apply-dev-null-verify-name-fix later to maint). + + * We no longer create any *.spec file, so "make clean" should not + remove it. + (merge 4321bdcabb tz/do-not-clean-spec-file later to maint). + + * "git push" over http transport did not unquote the push-options + correctly. + (merge 90dce21eb0 jk/push-options-via-transport-fix later to maint). + + * "git send-email" learned to complain when the batch-size option is + not defined when the relogin-delay option is, since these two are + mutually required. + (merge 9caa70697b xz/send-email-batch-size later to maint). + + * Y2k20 fix ;-) for our perl scripts. + (merge a40e06ee33 bw/perl-timegm-timelocal-fix later to maint). + + * Threaded "git grep" has been optimized to avoid allocation in code + section that is covered under a mutex. + (merge 38ef24dccf rv/grep-cleanup later to maint). + + * "git subtree" script (in contrib/) scripted around "git log", whose + output got affected by end-user configuration like log.showsignature + (merge 8841b5222c sg/subtree-signed-commits later to maint). + + * While finding unique object name abbreviation, the code may + accidentally have read beyond the end of the array of object names + in a pack. + (merge 21abed500c ds/find-unique-abbrev-optim later to maint). + + * Micro optimization in revision traversal code. + (merge ebbed3ba04 ds/mark-parents-uninteresting-optim later to maint). + + * "git commit" used to run "gc --auto" near the end, which was lost + when the command was reimplemented in C by mistake. + (merge 095c741edd ab/gc-auto-in-commit later to maint). + + * Allow running a couple of tests with "sh -x". + (merge c20bf94abc sg/cvs-tests-with-x later to maint). + + * The codepath to replace an existing entry in the index had a bug in + updating the name hash structure, which has been fixed. + (merge 0e267b7a24 bp/refresh-cache-ent-rehash-fix later to maint). + + * The transfer.fsckobjects configuration tells "git fetch" to + validate the data and connected-ness of objects in the received + pack; the code to perform this check has been taught about the + narrow clone's convention that missing objects that are reachable + from objects in a pack that came from a promissor remote is OK. + + * There was an unused file-scope static variable left in http.c when + building for versions of libCURL that is older than 7.19.4, which + has been fixed. + (merge b8fd6008ec rj/http-code-cleanup later to maint). + + * Shell script portability fix. + (merge 206a6ae013 ml/filter-branch-portability-fix later to maint). + + * Other minor doc, test and build updates and code cleanups. + (merge e2a5a028c7 bw/oidmap-autoinit later to maint). + (merge ec3b4b06f8 cl/t9001-cleanup later to maint). + (merge e1b3f3dd38 ks/submodule-doc-updates later to maint). + (merge fbac558a9b rs/describe-unique-abbrev later to maint). + (merge 8462ff43e4 tb/crlf-conv-flags later to maint). + (merge 7d68bb0766 rb/hashmap-h-compilation-fix later to maint). + (merge 3449847168 cc/sha1-file-name later to maint). + (merge ad622a256f ds/use-get-be64 later to maint). + (merge f919ffebed sg/cocci-move-array later to maint). + (merge 4e801463c7 jc/mailinfo-cleanup-fix later to maint). + (merge ef5b3a6c5e nd/shared-index-fix later to maint). + (merge 9f5258cbb8 tz/doc-show-defaults-to-head later to maint). + (merge b780e4407d jc/worktree-add-short-help later to maint). + (merge ae239fc8e5 rs/cocci-strbuf-addf-to-addstr later to maint). + (merge 2e22a85e5c nd/ignore-glob-doc-update later to maint). + (merge 3738031581 jk/gettext-poison later to maint). + (merge 54360a1956 rj/sparse-updates later to maint). + (merge 12e31a6b12 sg/doc-test-must-fail-args later to maint). + (merge 760f1ad101 bc/doc-interpret-trailers-grammofix later to maint). + (merge 4ccf461f56 bp/fsmonitor later to maint). + (merge a6119f82b1 jk/test-hashmap-updates later to maint). + (merge 5aea9fe6cc rd/typofix later to maint). + (merge e4e5da2796 sb/status-doc-fix later to maint). + (merge 7976e901c8 gs/test-unset-xdg-cache-home later to maint). + (merge d023df1ee6 tg/worktree-create-tracking later to maint). + (merge 4cbe92fd41 sm/mv-dry-run-update later to maint). + (merge 75e5e9c3f7 sb/color-h-cleanup later to maint). + (merge 2708ef4af6 sg/t6300-modernize later to maint). + (merge d88e92d4e0 bw/doc-submodule-recurse-config-with-clone later to maint). + (merge f74bbc8dd2 jk/cached-commit-buffer later to maint). + (merge 1316416903 ms/non-ascii-ticks later to maint). + (merge 878056005e rs/strbuf-read-file-or-whine later to maint). + (merge 79f0ba1547 jk/strbuf-read-file-close-error later to maint). + (merge edfb8ba068 ot/ref-filter-cleanup later to maint). + (merge 11395a3b4b jc/test-must-be-empty later to maint). + (merge 768b9d6db7 mk/doc-pretty-fill later to maint). + (merge 2caa7b8d27 ab/man-sec-list later to maint). + (merge 40c17eb184 ks/t3200-typofix later to maint). + (merge bd9958c358 dp/merge-strategy-doc-fix later to maint). + (merge 9ee0540a40 js/ming-strftime later to maint). + (merge 1775e990f7 tz/complete-tag-delete-tagname later to maint). + (merge 00a4b03501 rj/warning-uninitialized-fix later to maint). + (merge b635ed97a0 jk/attributes-path-doc later to maint). diff --git a/Documentation/config.txt b/Documentation/config.txt index 0e25b2c92b..ce9102cea8 100644 --- a/Documentation/config.txt +++ b/Documentation/config.txt @@ -1398,7 +1398,16 @@ fetch.unpackLimit:: fetch.prune:: If true, fetch will automatically behave as if the `--prune` - option was given on the command line. See also `remote.<name>.prune`. + option was given on the command line. See also `remote.<name>.prune` + and the PRUNING section of linkgit:git-fetch[1]. + +fetch.pruneTags:: + If true, fetch will automatically behave as if the + `refs/tags/*:refs/tags/*` refspec was provided when pruning, + if not set already. This allows for setting both this option + and `fetch.prune` to maintain a 1=1 mapping to upstream + refs. See also `remote.<name>.pruneTags` and the PRUNING + section of linkgit:git-fetch[1]. fetch.output:: Control how ref update status is printed. Valid values are @@ -2945,6 +2954,15 @@ remote.<name>.prune:: remote (as if the `--prune` option was given on the command line). Overrides `fetch.prune` settings, if any. +remote.<name>.pruneTags:: + When set to true, fetching from this remote by default will also + remove any local tags that no longer exist on the remote if pruning + is activated in general via `remote.<name>.prune`, `fetch.prune` or + `--prune`. Overrides `fetch.pruneTags` settings, if any. ++ +See also `remote.<name>.prune` and the PRUNING section of +linkgit:git-fetch[1]. + remotes.<group>:: The list of remotes which are fetched by "git remote update <group>". See linkgit:git-remote[1]. @@ -3210,7 +3228,8 @@ submodule.active:: submodule.recurse:: Specifies if commands recurse into submodules by default. This - applies to all commands that have a `--recurse-submodules` option. + applies to all commands that have a `--recurse-submodules` option, + except `clone`. Defaults to false. submodule.fetchJobs:: @@ -3343,6 +3362,10 @@ uploadpack.packObjectsHook:: was run. I.e., `upload-pack` will feed input intended for `pack-objects` to the hook, and expects a completed packfile on stdout. + +uploadpack.allowFilter:: + If this option is set, `upload-pack` will advertise partial + clone and partial fetch object filtering. + Note that this configuration variable is ignored if it is seen in the repository-level config (this is a safety measure against fetching from diff --git a/Documentation/diff-options.txt b/Documentation/diff-options.txt index 743af97b06..e3a44f03cd 100644 --- a/Documentation/diff-options.txt +++ b/Documentation/diff-options.txt @@ -128,6 +128,14 @@ have to use `--diff-algorithm=default` option. These parameters can also be set individually with `--stat-width=<width>`, `--stat-name-width=<name-width>` and `--stat-count=<count>`. +--compact-summary:: + Output a condensed summary of extended header information such + as file creations or deletions ("new" or "gone", optionally "+l" + if it's a symlink) and mode changes ("+x" or "-x" for adding + or removing executable bit respectively) in diffstat. The + information is put betwen the filename part and the graph + part. Implies `--stat`. + --numstat:: Similar to `--stat`, but shows number of added and deleted lines in decimal notation and pathname without @@ -508,6 +516,15 @@ occurrences of that string did not change). See the 'pickaxe' entry in linkgit:gitdiffcore[7] for more information. +--find-object=<object-id>:: + Look for differences that change the number of occurrences of + the specified object. Similar to `-S`, just the argument is different + in that it doesn't search for a specific string but for a specific + object id. ++ +The object can be a blob or a submodule commit. It implies the `-t` option in +`git-log` to also find trees. + --pickaxe-all:: When `-S` or `-G` finds a change, show all the changes in that changeset, not just the files that contain the change @@ -516,6 +533,7 @@ information. --pickaxe-regex:: Treat the <string> given to `-S` as an extended POSIX regular expression to match. + endif::git-format-patch[] -O<orderfile>:: diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt index fb6bebbc61..8631e365f4 100644 --- a/Documentation/fetch-options.txt +++ b/Documentation/fetch-options.txt @@ -73,7 +73,22 @@ ifndef::git-pull[] are fetched due to an explicit refspec (either on the command line or in the remote configuration, for example if the remote was cloned with the --mirror option), then they are also - subject to pruning. + subject to pruning. Supplying `--prune-tags` is a shorthand for + providing the tag refspec. ++ +See the PRUNING section below for more details. + +-P:: +--prune-tags:: + Before fetching, remove any local tags that no longer exist on + the remote if `--prune` is enabled. This option should be used + more carefully, unlike `--prune` it will remove any local + references (local tags) that have been created. This option is + a shorthand for providing the explicit tag refspec along with + `--prune`, see the discussion about that in its documentation. ++ +See the PRUNING section below for more details. + endif::git-pull[] ifndef::git-pull[] diff --git a/Documentation/git-am.txt b/Documentation/git-am.txt index 12879e4029..6f6c34b0f4 100644 --- a/Documentation/git-am.txt +++ b/Documentation/git-am.txt @@ -16,7 +16,7 @@ SYNOPSIS [--exclude=<path>] [--include=<path>] [--reject] [-q | --quiet] [--[no-]scissors] [-S[<keyid>]] [--patch-format=<format>] [(<mbox> | <Maildir>)...] -'git am' (--continue | --skip | --abort) +'git am' (--continue | --skip | --abort | --quit | --show-current-patch) DESCRIPTION ----------- @@ -167,6 +167,14 @@ default. You can use `--no-utf8` to override this. --abort:: Restore the original branch and abort the patching operation. +--quit:: + Abort the patching operation but keep HEAD and the index + untouched. + +--show-current-patch:: + Show the patch being applied when "git am" is stopped because + of conflicts. + DISCUSSION ---------- diff --git a/Documentation/git-config.txt b/Documentation/git-config.txt index 14da5fc157..e09ed5d7d5 100644 --- a/Documentation/git-config.txt +++ b/Documentation/git-config.txt @@ -233,6 +233,12 @@ See also <<FILES>>. using `--file`, `--global`, etc) and `on` when searching all config files. +CONFIGURATION +------------- +`pager.config` is only respected when listing configuration, i.e., when +using `--list` or any of the `--get-*` which may return multiple results. +The default is to use a pager. + [[FILES]] FILES ----- diff --git a/Documentation/git-daemon.txt b/Documentation/git-daemon.txt index 3c91db7bed..56d54a4898 100644 --- a/Documentation/git-daemon.txt +++ b/Documentation/git-daemon.txt @@ -20,6 +20,7 @@ SYNOPSIS [--inetd | [--listen=<host_or_ipaddr>] [--port=<n>] [--user=<user> [--group=<group>]]] + [--log-destination=(stderr|syslog|none)] [<directory>...] DESCRIPTION @@ -80,7 +81,8 @@ OPTIONS do not have the 'git-daemon-export-ok' file. --inetd:: - Have the server run as an inetd service. Implies --syslog. + Have the server run as an inetd service. Implies --syslog (may be + overridden with `--log-destination=`). Incompatible with --detach, --port, --listen, --user and --group options. @@ -110,8 +112,28 @@ OPTIONS zero for no limit. --syslog:: - Log to syslog instead of stderr. Note that this option does not imply - --verbose, thus by default only error conditions will be logged. + Short for `--log-destination=syslog`. + +--log-destination=<destination>:: + Send log messages to the specified destination. + Note that this option does not imply --verbose, + thus by default only error conditions will be logged. + The <destination> must be one of: ++ +-- +stderr:: + Write to standard error. + Note that if `--detach` is specified, + the process disconnects from the real standard error, + making this destination effectively equivalent to `none`. +syslog:: + Write to syslog, using the `git-daemon` identifier. +none:: + Disable all logging. +-- ++ +The default destination is `syslog` if `--inetd` or `--detach` is specified, +otherwise `stderr`. --user-path:: --user-path=<path>:: diff --git a/Documentation/git-fetch.txt b/Documentation/git-fetch.txt index b153aefa68..e319935597 100644 --- a/Documentation/git-fetch.txt +++ b/Documentation/git-fetch.txt @@ -99,6 +99,93 @@ The latter use of the `remote.<repository>.fetch` values can be overridden by giving the `--refmap=<refspec>` parameter(s) on the command line. +PRUNING +------- + +Git has a default disposition of keeping data unless it's explicitly +thrown away; this extends to holding onto local references to branches +on remotes that have themselves deleted those branches. + +If left to accumulate, these stale references might make performance +worse on big and busy repos that have a lot of branch churn, and +e.g. make the output of commands like `git branch -a --contains +<commit>` needlessly verbose, as well as impacting anything else +that'll work with the complete set of known references. + +These remote-tracking references can be deleted as a one-off with +either of: + +------------------------------------------------ +# While fetching +$ git fetch --prune <name> + +# Only prune, don't fetch +$ git remote prune <name> +------------------------------------------------ + +To prune references as part of your normal workflow without needing to +remember to run that, set `fetch.prune` globally, or +`remote.<name>.prune` per-remote in the config. See +linkgit:git-config[1]. + +Here's where things get tricky and more specific. The pruning feature +doesn't actually care about branches, instead it'll prune local <-> +remote-references as a function of the refspec of the remote (see +`<refspec>` and <<CRTB,CONFIGURED REMOTE-TRACKING BRANCHES>> above). + +Therefore if the refspec for the remote includes +e.g. `refs/tags/*:refs/tags/*`, or you manually run e.g. `git fetch +--prune <name> "refs/tags/*:refs/tags/*"` it won't be stale remote +tracking branches that are deleted, but any local tag that doesn't +exist on the remote. + +This might not be what you expect, i.e. you want to prune remote +`<name>`, but also explicitly fetch tags from it, so when you fetch +from it you delete all your local tags, most of which may not have +come from the `<name>` remote in the first place. + +So be careful when using this with a refspec like +`refs/tags/*:refs/tags/*`, or any other refspec which might map +references from multiple remotes to the same local namespace. + +Since keeping up-to-date with both branches and tags on the remote is +a common use-case the `--prune-tags` option can be supplied along with +`--prune` to prune local tags that don't exist on the remote, and +force-update those tags that differ. Tag pruning can also be enabled +with `fetch.pruneTags` or `remote.<name>.pruneTags` in the config. See +linkgit:git-config[1]. + +The `--prune-tags` option is equivalent to having +`refs/tags/*:refs/tags/*` declared in the refspecs of the remote. This +can lead to some seemingly strange interactions: + +------------------------------------------------ +# These both fetch tags +$ git fetch --no-tags origin 'refs/tags/*:refs/tags/*' +$ git fetch --no-tags --prune-tags origin +------------------------------------------------ + +The reason it doesn't error out when provided without `--prune` or its +config versions is for flexibility of the configured versions, and to +maintain a 1=1 mapping between what the command line flags do, and +what the configuration versions do. + +It's reasonable to e.g. configure `fetch.pruneTags=true` in +`~/.gitconfig` to have tags pruned whenever `git fetch --prune` is +run, without making every invocation of `git fetch` without `--prune` +an error. + +Pruning tags with `--prune-tags` also works when fetching a URL +instead of a named remote. These will all prune tags not found on +origin: + +------------------------------------------------ +$ git fetch origin --prune --prune-tags +$ git fetch origin --prune 'refs/tags/*:refs/tags/*' +$ git fetch <url of origin> --prune --prune-tags +$ git fetch <url of origin> --prune 'refs/tags/*:refs/tags/*' +------------------------------------------------ + OUTPUT ------ diff --git a/Documentation/git-index-pack.txt b/Documentation/git-index-pack.txt index 1b4b65d665..138edb47b6 100644 --- a/Documentation/git-index-pack.txt +++ b/Documentation/git-index-pack.txt @@ -77,6 +77,9 @@ OPTIONS --check-self-contained-and-connected:: Die if the pack contains broken links. For internal use only. +--fsck-objects:: + Die if the pack contains broken objects. For internal use only. + --threads=<n>:: Specifies the number of threads to spawn when resolving deltas. This requires that index-pack be compiled with diff --git a/Documentation/git-pack-objects.txt b/Documentation/git-pack-objects.txt index aa403d02f3..81bc490ac5 100644 --- a/Documentation/git-pack-objects.txt +++ b/Documentation/git-pack-objects.txt @@ -255,6 +255,17 @@ a missing object is encountered. This is the default action. The form '--missing=allow-any' will allow object traversal to continue if a missing object is encountered. Missing objects will silently be omitted from the results. ++ +The form '--missing=allow-promisor' is like 'allow-any', but will only +allow object traversal to continue for EXPECTED promisor missing objects. +Unexpected missing object will raise an error. + +--exclude-promisor-objects:: + Omit objects that are known to be in the promisor remote. (This + option has the purpose of operating only on locally created objects, + so that when we repack, we still maintain a distinction between + locally created objects [without .promisor] and objects from the + promisor remote [with .promisor].) This is used with partial clone. SEE ALSO -------- diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt index 8a861c1e0d..3277ca1432 100644 --- a/Documentation/git-rebase.txt +++ b/Documentation/git-rebase.txt @@ -12,7 +12,7 @@ SYNOPSIS [<upstream> [<branch>]] 'git rebase' [-i | --interactive] [options] [--exec <cmd>] [--onto <newbase>] --root [<branch>] -'git rebase' --continue | --skip | --abort | --quit | --edit-todo +'git rebase' --continue | --skip | --abort | --quit | --edit-todo | --show-current-patch DESCRIPTION ----------- @@ -244,12 +244,22 @@ leave out at most one of A and B, in which case it defaults to HEAD. Keep the commits that do not change anything from its parents in the result. +--allow-empty-message:: + By default, rebasing commits with an empty message will fail. + This option overrides that behavior, allowing commits with empty + messages to be rebased. + --skip:: Restart the rebasing process by skipping the current patch. --edit-todo:: Edit the todo list during an interactive rebase. +--show-current-patch:: + Show the current patch in an interactive rebase or when rebase + is stopped because of conflicts. This is the equivalent of + `git show REBASE_HEAD`. + -m:: --merge:: Use merging strategies to rebase. When the recursive (default) merge diff --git a/Documentation/git-remote.txt b/Documentation/git-remote.txt index 577b969c1b..4feddc0293 100644 --- a/Documentation/git-remote.txt +++ b/Documentation/git-remote.txt @@ -172,10 +172,14 @@ With `-n` option, the remote heads are not queried first with 'prune':: -Deletes all stale remote-tracking branches under <name>. -These stale branches have already been removed from the remote repository -referenced by <name>, but are still locally available in -"remotes/<name>". +Deletes stale references associated with <name>. By default, stale +remote-tracking branches under <name> are deleted, but depending on +global configuration and the configuration of the remote we might even +prune local tags that haven't been pushed there. Equivalent to `git +fetch --prune <name>`, except that no new references will be fetched. ++ +See the PRUNING section of linkgit:git-fetch[1] for what it'll prune +depending on various configuration. + With `--dry-run` option, report what branches will be pruned, but do not actually prune them. @@ -189,7 +193,7 @@ remotes.default is not defined, all remotes which do not have the configuration parameter remote.<name>.skipDefaultUpdate set to true will be updated. (See linkgit:git-config[1]). + -With `--prune` option, prune all the remotes that are updated. +With `--prune` option, run pruning against all the remotes that are updated. DISCUSSION diff --git a/Documentation/git-send-email.txt b/Documentation/git-send-email.txt index 8060ea35c5..71ef97ba9b 100644 --- a/Documentation/git-send-email.txt +++ b/Documentation/git-send-email.txt @@ -84,6 +84,11 @@ See the CONFIGURATION section for `sendemail.multiEdit`. the value of GIT_AUTHOR_IDENT, or GIT_COMMITTER_IDENT if that is not set, as returned by "git var -l". +--reply-to=<address>:: + Specify the address where replies from recipients should go to. + Use this if replies to messages should go to another address than what + is specified with the --from parameter. + --in-reply-to=<identifier>:: Make the first mail (or all the mails with `--no-thread`) appear as a reply to the given Message-Id, which avoids breaking threads to diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt index f9c91c721e..6c230c0c72 100644 --- a/Documentation/git-status.txt +++ b/Documentation/git-status.txt @@ -130,6 +130,11 @@ ignored, then the directory is not shown, but all contents are shown. without options are equivalent to 'always' and 'never' respectively. +--ahead-behind:: +--no-ahead-behind:: + Display or do not display detailed ahead/behind counts for the + branch relative to its upstream branch. Defaults to true. + <pathspec>...:: See the 'pathspec' entry in linkgit:gitglossary[7]. diff --git a/Documentation/git-tag.txt b/Documentation/git-tag.txt index 956fc019f9..1d17101bac 100644 --- a/Documentation/git-tag.txt +++ b/Documentation/git-tag.txt @@ -9,7 +9,7 @@ git-tag - Create, list, delete or verify a tag object signed with GPG SYNOPSIS -------- [verse] -'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] +'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] [-e] <tagname> [<commit> | <object>] 'git tag' -d <tagname>... 'git tag' [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>] @@ -167,6 +167,12 @@ This option is only applicable when listing tags without annotation lines. Implies `-a` if none of `-a`, `-s`, or `-u <keyid>` is given. +-e:: +--edit:: + The message taken from file with `-F` and command line with + `-m` are usually used as the tag message unmodified. + This option lets you further edit the message taken from these sources. + --cleanup=<mode>:: This option sets how the tag message is cleaned up. The '<mode>' can be one of 'verbatim', 'whitespace' and 'strip'. The diff --git a/Documentation/git-update-index.txt b/Documentation/git-update-index.txt index ad2383d7ed..3897a59ee9 100644 --- a/Documentation/git-update-index.txt +++ b/Documentation/git-update-index.txt @@ -464,6 +464,32 @@ command reads the index; while when `--[no-|force-]untracked-cache` are used, the untracked cache is immediately added to or removed from the index. +Before 2.17, the untracked cache had a bug where replacing a directory +with a symlink to another directory could cause it to incorrectly show +files tracked by git as untracked. See the "status: add a failing test +showing a core.untrackedCache bug" commit to git.git. A workaround for +that is (and this might work for other undiscovered bugs in the +future): + +---------------- +$ git -c core.untrackedCache=false status +---------------- + +This bug has also been shown to affect non-symlink cases of replacing +a directory with a file when it comes to the internal structures of +the untracked cache, but no case has been reported where this resulted in +wrong "git status" output. + +There are also cases where existing indexes written by git versions +before 2.17 will reference directories that don't exist anymore, +potentially causing many "could not open directory" warnings to be +printed on "git status". These are new warnings for existing issues +that were previously silently discarded. + +As with the bug described above the solution is to one-off do a "git +status" run with `core.untrackedCache=false` to flush out the leftover +bad data. + File System Monitor ------------------- diff --git a/Documentation/git-worktree.txt b/Documentation/git-worktree.txt index 5ac3f68ab5..e7eb24ab85 100644 --- a/Documentation/git-worktree.txt +++ b/Documentation/git-worktree.txt @@ -12,7 +12,9 @@ SYNOPSIS 'git worktree add' [-f] [--detach] [--checkout] [--lock] [-b <new-branch>] <path> [<commit-ish>] 'git worktree list' [--porcelain] 'git worktree lock' [--reason <string>] <worktree> +'git worktree move' <worktree> <new-path> 'git worktree prune' [-n] [-v] [--expire <expire>] +'git worktree remove' [--force] <worktree> 'git worktree unlock' <worktree> DESCRIPTION @@ -34,10 +36,6 @@ The working tree's administrative files in the repository (see `git worktree prune` in the main or any linked working tree to clean up any stale administrative files. -If you move a linked working tree, you need to manually update the -administrative files so that they do not get pruned automatically. See -section "DETAILS" for more information. - If a linked working tree is stored on a portable device or network share which is not always mounted, you can prevent its administrative files from being pruned by issuing the `git worktree lock` command, optionally @@ -80,10 +78,22 @@ files from being pruned automatically. This also prevents it from being moved or deleted. Optionally, specify a reason for the lock with `--reason`. +move:: + +Move a working tree to a new location. Note that the main working tree +or linked working trees containing submodules cannot be moved. + prune:: Prune working tree information in $GIT_DIR/worktrees. +remove:: + +Remove a working tree. Only clean working trees (no untracked files +and no modification in tracked files) can be removed. Unclean working +trees or ones with submodules can be removed with `--force`. The main +working tree cannot be removed. + unlock:: Unlock a working tree, allowing it to be pruned, moved or deleted. @@ -93,9 +103,10 @@ OPTIONS -f:: --force:: - By default, `add` refuses to create a new working tree when `<commit-ish>` is a branch name and - is already checked out by another working tree. This option overrides - that safeguard. + By default, `add` refuses to create a new working tree when + `<commit-ish>` is a branch name and is already checked out by + another working tree and `remove` refuses to remove an unclean + working tree. This option overrides that safeguard. -b <new-branch>:: -B <new-branch>:: @@ -197,7 +208,7 @@ thumb is do not make any assumption about whether a path belongs to $GIT_DIR or $GIT_COMMON_DIR when you need to directly access something inside $GIT_DIR. Use `git rev-parse --git-path` to get the final path. -If you move a linked working tree, you need to update the 'gitdir' file +If you manually move a linked working tree, you need to update the 'gitdir' file in the entry's directory. For example, if a linked working tree is moved to `/newpath/test-next` and its `.git` file points to `/path/main/.git/worktrees/test-next`, then update @@ -277,13 +288,6 @@ Multiple checkout in general is still experimental, and the support for submodules is incomplete. It is NOT recommended to make multiple checkouts of a superproject. -git-worktree could provide more automation for tasks currently -performed manually, such as: - -- `remove` to remove a linked working tree and its administrative files (and - warn if the working tree is dirty) -- `mv` to move or rename a working tree and update its administrative files - GIT --- Part of the linkgit:git[1] suite diff --git a/Documentation/git.txt b/Documentation/git.txt index 8163b5796b..4767860e72 100644 --- a/Documentation/git.txt +++ b/Documentation/git.txt @@ -849,6 +849,9 @@ Report bugs to the Git mailing list <git@vger.kernel.org> where the development and maintenance is primarily done. You do not have to be subscribed to the list to send a message there. +Issues which are security relevant should be disclosed privately to +the Git Security mailing list <git-security@googlegroups.com>. + SEE ALSO -------- linkgit:gittutorial[7], linkgit:gittutorial-2[7], diff --git a/Documentation/gitattributes.txt b/Documentation/gitattributes.txt index 30687de81a..1094fe2b5b 100644 --- a/Documentation/gitattributes.txt +++ b/Documentation/gitattributes.txt @@ -56,9 +56,16 @@ Unspecified:: When more than one pattern matches the path, a later line overrides an earlier line. This overriding is done per -attribute. The rules how the pattern matches paths are the -same as in `.gitignore` files; see linkgit:gitignore[5]. -Unlike `.gitignore`, negative patterns are forbidden. +attribute. + +The rules by which the pattern matches paths are the same as in +`.gitignore` files (see linkgit:gitignore[5]), with a few exceptions: + + - negative patterns are forbidden + + - patterns that match a directory do not recursively match paths + inside that directory (so using the trailing-slash `path/` syntax is + pointless in an attributes file; use `path/**` instead) When deciding what attributes are assigned to a path, Git consults `$GIT_DIR/info/attributes` file (which has the highest @@ -392,46 +399,14 @@ Long Running Filter Process If the filter command (a string value) is defined via `filter.<driver>.process` then Git can process all blobs with a single filter invocation for the entire life of a single Git -command. This is achieved by using a packet format (pkt-line, -see technical/protocol-common.txt) based protocol over standard -input and standard output as follows. All packets, except for the -"*CONTENT" packets and the "0000" flush packet, are considered -text and therefore are terminated by a LF. - -Git starts the filter when it encounters the first file -that needs to be cleaned or smudged. After the filter started -Git sends a welcome message ("git-filter-client"), a list of supported -protocol version numbers, and a flush packet. Git expects to read a welcome -response message ("git-filter-server"), exactly one protocol version number -from the previously sent list, and a flush packet. All further -communication will be based on the selected version. The remaining -protocol description below documents "version=2". Please note that -"version=42" in the example below does not exist and is only there -to illustrate how the protocol would look like with more than one -version. - -After the version negotiation Git sends a list of all capabilities that -it supports and a flush packet. Git expects to read a list of desired -capabilities, which must be a subset of the supported capabilities list, -and a flush packet as response: ------------------------- -packet: git> git-filter-client -packet: git> version=2 -packet: git> version=42 -packet: git> 0000 -packet: git< git-filter-server -packet: git< version=2 -packet: git< 0000 -packet: git> capability=clean -packet: git> capability=smudge -packet: git> capability=not-yet-invented -packet: git> 0000 -packet: git< capability=clean -packet: git< capability=smudge -packet: git< 0000 ------------------------- -Supported filter capabilities in version 2 are "clean", "smudge", -and "delay". +command. This is achieved by using the long-running process protocol +(described in technical/long-running-process-protocol.txt). + +When Git encounters the first file that needs to be cleaned or smudged, +it starts the filter and performs the handshake. In the handshake, the +welcome message sent by Git is "git-filter-client", only version 2 is +suppported, and the supported capabilities are "clean", "smudge", and +"delay". Afterwards Git sends a list of "key=value" pairs terminated with a flush packet. The list will contain at least the filter command @@ -517,12 +492,6 @@ the protocol then Git will stop the filter process and restart it with the next file that needs to be processed. Depending on the `filter.<driver>.required` flag Git will interpret that as error. -After the filter has processed a command it is expected to wait for -a "key=value" list containing the next command. Git will close -the command pipe on exit. The filter is expected to detect EOF -and exit gracefully on its own. Git will wait until the filter -process has stopped. - Delay ^^^^^ @@ -752,6 +721,8 @@ patterns are available: - `fountain` suitable for Fountain documents. +- `golang` suitable for source code in the Go language. + - `html` suitable for HTML/XHTML documents. - `java` suitable for source code in the Java language. diff --git a/Documentation/gitremote-helpers.txt b/Documentation/gitremote-helpers.txt index 4a584f3c5d..4b8c93ec59 100644 --- a/Documentation/gitremote-helpers.txt +++ b/Documentation/gitremote-helpers.txt @@ -466,6 +466,13 @@ set by Git if the remote helper has the 'option' capability. Transmit <string> as a push option. As the push option must not contain LF or NUL characters, the string is not encoded. +'option from-promisor' {'true'|'false'}:: + Indicate that these objects are being fetched from a promisor. + +'option no-dependents' {'true'|'false'}:: + Indicate that only the objects wanted need to be fetched, not + their dependents. + SEE ALSO -------- linkgit:git-remote[1] diff --git a/Documentation/merge-options.txt b/Documentation/merge-options.txt index 3888c3ff85..63a3fc0954 100644 --- a/Documentation/merge-options.txt +++ b/Documentation/merge-options.txt @@ -35,7 +35,8 @@ set to `no` at the beginning of them. --no-ff:: Create a merge commit even when the merge resolves as a fast-forward. This is the default behaviour when merging an - annotated (and possibly signed) tag. + annotated (and possibly signed) tag that is not stored in + its natural place in 'refs/tags/' hierarchy. --ff-only:: Refuse to merge and exit with a non-zero status unless the diff --git a/Documentation/merge-strategies.txt b/Documentation/merge-strategies.txt index fd5d748d1b..4a58aad4b8 100644 --- a/Documentation/merge-strategies.txt +++ b/Documentation/merge-strategies.txt @@ -40,7 +40,7 @@ the other tree did, declaring 'our' history contains all that happened in it. theirs;; This is the opposite of 'ours'; note that, unlike 'ours', there is - no 'theirs' merge stragegy to confuse this merge option with. + no 'theirs' merge strategy to confuse this merge option with. patience;; With this option, 'merge-recursive' spends a little extra time diff --git a/Documentation/pretty-formats.txt b/Documentation/pretty-formats.txt index e664c088a5..6109ef09aa 100644 --- a/Documentation/pretty-formats.txt +++ b/Documentation/pretty-formats.txt @@ -202,7 +202,7 @@ endif::git-rev-list[] - '%>>(<N>)', '%>>|(<N>)': similar to '%>(<N>)', '%>|(<N>)' respectively, except that if the next placeholder takes more spaces than given and there are spaces on its left, use those spaces -- '%><(<N>)', '%><|(<N>)': similar to '% <(<N>)', '%<|(<N>)' +- '%><(<N>)', '%><|(<N>)': similar to '%<(<N>)', '%<|(<N>)' respectively, but padding both sides (i.e. the text is centered) - %(trailers[:options]): display the trailers of the body as interpreted by linkgit:git-interpret-trailers[1]. The `trailers` string may be diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt index 22f5c9b43d..7b273635de 100644 --- a/Documentation/rev-list-options.txt +++ b/Documentation/rev-list-options.txt @@ -750,10 +750,21 @@ The form '--missing=allow-any' will allow object traversal to continue if a missing object is encountered. Missing objects will silently be omitted from the results. + +The form '--missing=allow-promisor' is like 'allow-any', but will only +allow object traversal to continue for EXPECTED promisor missing objects. +Unexpected missing objects will raise an error. ++ The form '--missing=print' is like 'allow-any', but will also print a list of the missing objects. Object IDs are prefixed with a ``?'' character. endif::git-rev-list[] +--exclude-promisor-objects:: + (For internal use only.) Prefilter object traversal at + promisor boundary. This is used with partial clone. This is + stronger than `--missing=allow-promisor` because it limits the + traversal, rather than just silencing errors about missing + objects. + --no-walk[=(sorted|unsorted)]:: Only show the given commits, but do not traverse their ancestors. This has no effect if a range is specified. If the argument diff --git a/Documentation/technical/api-object-access.txt b/Documentation/technical/api-object-access.txt index 03bb0e950d..a1162e5bcd 100644 --- a/Documentation/technical/api-object-access.txt +++ b/Documentation/technical/api-object-access.txt @@ -7,7 +7,7 @@ Talk about <sha1_file.c> and <object.h> family, things like * read_object_with_reference() * has_sha1_file() * write_sha1_file() -* pretend_sha1_file() +* pretend_object_file() * lookup_{object,commit,tag,blob,tree} * parse_{object,commit,tag,blob,tree} * Use of object flags diff --git a/Documentation/technical/http-protocol.txt b/Documentation/technical/http-protocol.txt index a0e45f2889..64f49d0bbb 100644 --- a/Documentation/technical/http-protocol.txt +++ b/Documentation/technical/http-protocol.txt @@ -214,10 +214,12 @@ smart server reply: S: Cache-Control: no-cache S: S: 001e# service=git-upload-pack\n + S: 0000 S: 004895dcfa3633004da0049d3d0fa03f80589cbcaf31 refs/heads/maint\0multi_ack\n S: 0042d049f6c27a2244e12041955e262a404c7faba355 refs/heads/master\n S: 003c2cb58b79488a98d2721cea644875a8dd0026b115 refs/tags/v1.0\n S: 003fa3c2e2402b99163d1d59756e5f207ae21cccba4c refs/tags/v1.0^{}\n + S: 0000 The client may send Extra Parameters (see Documentation/technical/pack-protocol.txt) as a colon-separated string @@ -277,6 +279,7 @@ The returned response contains "version 1" if "version=1" was sent as an Extra Parameter. smart_reply = PKT-LINE("# service=$servicename" LF) + "0000" *1("version 1") ref_list "0000" diff --git a/Documentation/technical/long-running-process-protocol.txt b/Documentation/technical/long-running-process-protocol.txt new file mode 100644 index 0000000000..aa0aa9af1c --- /dev/null +++ b/Documentation/technical/long-running-process-protocol.txt @@ -0,0 +1,50 @@ +Long-running process protocol +============================= + +This protocol is used when Git needs to communicate with an external +process throughout the entire life of a single Git command. All +communication is in pkt-line format (see technical/protocol-common.txt) +over standard input and standard output. + +Handshake +--------- + +Git starts by sending a welcome message (for example, +"git-filter-client"), a list of supported protocol version numbers, and +a flush packet. Git expects to read the welcome message with "server" +instead of "client" (for example, "git-filter-server"), exactly one +protocol version number from the previously sent list, and a flush +packet. All further communication will be based on the selected version. +The remaining protocol description below documents "version=2". Please +note that "version=42" in the example below does not exist and is only +there to illustrate how the protocol would look like with more than one +version. + +After the version negotiation Git sends a list of all capabilities that +it supports and a flush packet. Git expects to read a list of desired +capabilities, which must be a subset of the supported capabilities list, +and a flush packet as response: +------------------------ +packet: git> git-filter-client +packet: git> version=2 +packet: git> version=42 +packet: git> 0000 +packet: git< git-filter-server +packet: git< version=2 +packet: git< 0000 +packet: git> capability=clean +packet: git> capability=smudge +packet: git> capability=not-yet-invented +packet: git> 0000 +packet: git< capability=clean +packet: git< capability=smudge +packet: git< 0000 +------------------------ + +Shutdown +-------- + +Git will close +the command pipe on exit. The filter is expected to detect EOF +and exit gracefully on its own. Git will wait until the filter +process has stopped. diff --git a/Documentation/technical/pack-protocol.txt b/Documentation/technical/pack-protocol.txt index cd31edc91e..7fee6b780a 100644 --- a/Documentation/technical/pack-protocol.txt +++ b/Documentation/technical/pack-protocol.txt @@ -241,6 +241,7 @@ out of what the server said it could do with the first 'want' line. upload-request = want-list *shallow-line *1depth-request + [filter-request] flush-pkt want-list = first-want @@ -256,6 +257,8 @@ out of what the server said it could do with the first 'want' line. additional-want = PKT-LINE("want" SP obj-id) depth = 1*DIGIT + + filter-request = PKT-LINE("filter" SP filter-spec) ---- Clients MUST send all the obj-ids it wants from the reference @@ -278,6 +281,11 @@ complete those commits. Commits whose parents are not received as a result are defined as shallow and marked as such in the server. This information is sent back to the client in the next step. +The client can optionally request that pack-objects omit various +objects from the packfile using one of several filtering techniques. +These are intended for use with partial clone and partial fetch +operations. See `rev-list` for possible "filter-spec" values. + Once all the 'want's and 'shallow's (and optional 'deepen') are transferred, clients MUST send a flush-pkt, to tell the server side that it is done sending the list. diff --git a/Documentation/technical/protocol-capabilities.txt b/Documentation/technical/protocol-capabilities.txt index 26dcc6f502..332d209b58 100644 --- a/Documentation/technical/protocol-capabilities.txt +++ b/Documentation/technical/protocol-capabilities.txt @@ -309,3 +309,11 @@ to accept a signed push certificate, and asks the <nonce> to be included in the push certificate. A send-pack client MUST NOT send a push-cert packet unless the receive-pack server advertises this capability. + +filter +------ + +If the upload-pack server advertises the 'filter' capability, +fetch-pack may send "filter" commands to request a partial clone +or partial fetch and request that the server omit various objects +from the packfile. diff --git a/Documentation/technical/repository-version.txt b/Documentation/technical/repository-version.txt index 00ad37986e..e03eaccebc 100644 --- a/Documentation/technical/repository-version.txt +++ b/Documentation/technical/repository-version.txt @@ -86,3 +86,15 @@ for testing format-1 compatibility. When the config key `extensions.preciousObjects` is set to `true`, objects in the repository MUST NOT be deleted (e.g., by `git-prune` or `git repack -d`). + +`partialclone` +~~~~~~~~~~~~~~ + +When the config key `extensions.partialclone` is set, it indicates +that the repo was created with a partial clone (or later performed +a partial fetch) and that the remote may have omitted sending +certain unwanted objects. Such a remote is called a "promisor remote" +and it promises that all such omitted objects can be fetched from it +in the future. + +The value of this key is the name of the promisor remote. diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index 8945e05f52..b4fb7d9a39 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=GIT-VERSION-FILE -DEF_VER=v2.16.3 +DEF_VER=v2.17.0-rc1 LF=' ' @@ -84,9 +84,29 @@ Issues of note: GIT_EXEC_PATH=`pwd` PATH=`pwd`:$PATH - GITPERLLIB=`pwd`/perl/blib/lib + GITPERLLIB=`pwd`/perl/build/lib export GIT_EXEC_PATH PATH GITPERLLIB + - By default (unless NO_PERL is provided) Git will ship various perl + scripts. However, for simplicity it doesn't use the + ExtUtils::MakeMaker toolchain to decide where to place the perl + libraries. Depending on the system this can result in the perl + libraries not being where you'd like them if they're expected to be + used by things other than Git itself. + + Manually supplying a perllibdir prefix should fix this, if this is + a problem you care about, e.g.: + + prefix=/usr perllibdir=/usr/$(/usr/bin/perl -MConfig -wle 'print substr $Config{installsitelib}, 1 + length $Config{siteprefixexp}') + + Will result in e.g. perllibdir=/usr/share/perl/5.26.1 on Debian, + perllibdir=/usr/share/perl5 (which we'd use by default) on CentOS. + + - Unless NO_PERL is provided Git will ship various perl libraries it + needs. Distributors of Git will usually want to set + NO_PERL_CPAN_FALLBACKS if NO_PERL is not provided to use their own + copies of the CPAN modules Git needs. + - Git is reasonably self-sufficient, but does depend on a few external programs and libraries. Git can be used without most of them by adding the approriate "NO_<LIBRARY>=YesPlease" to the make command line or @@ -106,7 +126,8 @@ Issues of note: Redhat/Fedora are reported to ship Perl binary package with some core modules stripped away (see http://lwn.net/Articles/477234/), so you might need to install additional packages other than Perl - itself, e.g. Time::HiRes. + itself, e.g. Digest::MD5, File::Spec, File::Temp, Net::Domain, + Net::SMTP, and Time::HiRes. - git-imap-send needs the OpenSSL library to talk IMAP over SSL if you are using libcurl older than 7.34.0. Otherwise you can use @@ -294,11 +294,14 @@ all:: # # Define PERL_PATH to the path of your Perl binary (usually /usr/bin/perl). # -# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's -# MakeMaker (e.g. using ActiveState under Cygwin). -# # Define NO_PERL if you do not want Perl scripts or libraries at all. # +# Define NO_PERL_CPAN_FALLBACKS if you do not want to install bundled +# copies of CPAN modules that serve as a fallback in case the modules +# are not available on the system. This option is intended for +# distributions that want to use their packaged versions of Perl +# modules, instead of the fallbacks shipped with Git. +# # Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python # but /usr/bin/python2.7 on some platforms). # @@ -479,6 +482,7 @@ gitexecdir = libexec/git-core mergetoolsdir = $(gitexecdir)/mergetools sharedir = $(prefix)/share gitwebdir = $(sharedir)/gitweb +perllibdir = $(sharedir)/perl5 localedir = $(sharedir)/locale template_dir = share/git-core/templates htmldir = $(prefix)/share/doc/git-doc @@ -492,7 +496,7 @@ mandir_relative = $(patsubst $(prefix)/%,%,$(mandir)) infodir_relative = $(patsubst $(prefix)/%,%,$(infodir)) htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir)) -export prefix bindir sharedir sysconfdir gitwebdir localedir +export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir CC = cc AR = ar @@ -804,6 +808,7 @@ LIB_OBJS += ewah/ewah_bitmap.o LIB_OBJS += ewah/ewah_io.o LIB_OBJS += ewah/ewah_rlw.o LIB_OBJS += exec_cmd.o +LIB_OBJS += fetch-object.o LIB_OBJS += fetch-pack.o LIB_OBJS += fsck.o LIB_OBJS += fsmonitor.o @@ -832,7 +837,6 @@ LIB_OBJS += merge.o LIB_OBJS += merge-blobs.o LIB_OBJS += merge-recursive.o LIB_OBJS += mergesort.o -LIB_OBJS += mru.o LIB_OBJS += name-hash.o LIB_OBJS += notes.o LIB_OBJS += notes-cache.o @@ -1515,7 +1519,9 @@ else LIB_OBJS += sha1dc_git.o ifdef DC_SHA1_EXTERNAL ifdef DC_SHA1_SUBMODULE + ifneq ($(DC_SHA1_SUBMODULE),auto) $(error Only set DC_SHA1_EXTERNAL or DC_SHA1_SUBMODULE, not both) + endif endif BASIC_CFLAGS += -DDC_SHA1_EXTERNAL EXTLIBS += -lsha1detectcoll @@ -1543,9 +1549,6 @@ ifdef SHA1_MAX_BLOCK_SIZE LIB_OBJS += compat/sha1-chunked.o BASIC_CFLAGS += -DSHA1_MAX_BLOCK_SIZE="$(SHA1_MAX_BLOCK_SIZE)" endif -ifdef NO_PERL_MAKEMAKER - export NO_PERL_MAKEMAKER -endif ifdef NO_HSTRERROR COMPAT_CFLAGS += -DNO_HSTRERROR COMPAT_OBJS += compat/hstrerror.o @@ -1732,8 +1735,10 @@ ETC_GITATTRIBUTES_SQ = $(subst ','\'',$(ETC_GITATTRIBUTES)) DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) bindir_SQ = $(subst ','\'',$(bindir)) bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) +mandir_SQ = $(subst ','\'',$(mandir)) mandir_relative_SQ = $(subst ','\'',$(mandir_relative)) infodir_relative_SQ = $(subst ','\'',$(infodir_relative)) +perllibdir_SQ = $(subst ','\'',$(perllibdir)) localedir_SQ = $(subst ','\'',$(localedir)) gitexecdir_SQ = $(subst ','\'',$(gitexecdir)) template_dir_SQ = $(subst ','\'',$(template_dir)) @@ -1844,9 +1849,6 @@ ifndef NO_TCLTK $(QUIET_SUBDIR0)git-gui $(QUIET_SUBDIR1) gitexecdir='$(gitexec_instdir_SQ)' all $(QUIET_SUBDIR0)gitk-git $(QUIET_SUBDIR1) all endif -ifndef NO_PERL - $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' localedir='$(localedir_SQ)' all -endif $(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)' please_set_SHELL_PATH_to_a_more_modern_shell: @@ -1928,7 +1930,8 @@ common-cmds.h: $(wildcard Documentation/git-*.txt) SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\ $(localedir_SQ):$(NO_CURL):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\ - $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV) + $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV):\ + $(perllibdir_SQ) define cmd_munge_script $(RM) $@ $@+ && \ sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ @@ -1972,23 +1975,12 @@ git.res: git.rc GIT-VERSION-FILE $(SCRIPT_PERL_GEN): GIT-BUILD-OPTIONS ifndef NO_PERL -$(SCRIPT_PERL_GEN): perl/perl.mak +$(SCRIPT_PERL_GEN): -perl/perl.mak: perl/PM.stamp - -perl/PM.stamp: FORCE - @$(FIND) perl -type f -name '*.pm' | sort >$@+ && \ - $(PERL_PATH) -V >>$@+ && \ - { cmp $@+ $@ >/dev/null 2>/dev/null || mv $@+ $@; } && \ - $(RM) $@+ - -perl/perl.mak: GIT-CFLAGS GIT-PREFIX perl/Makefile perl/Makefile.PL - $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' $(@F) - -PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ) -$(SCRIPT_PERL_GEN): % : %.perl perl/perl.mak GIT-PERL-DEFINES GIT-VERSION-FILE +PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ):$(perllibdir_SQ) +$(SCRIPT_PERL_GEN): % : %.perl GIT-PERL-DEFINES GIT-VERSION-FILE $(QUIET_GEN)$(RM) $@ $@+ && \ - INSTLIBDIR=`MAKEFLAGS= $(MAKE) -C perl -s --no-print-directory instlibdir` && \ + INSTLIBDIR='$(perllibdir_SQ)' && \ INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \ INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \ sed -e '1{' \ @@ -2232,13 +2224,15 @@ $(VCSSVN_LIB): $(VCSSVN_OBJS) export DEFAULT_EDITOR DEFAULT_PAGER -.PHONY: doc man html info pdf -doc: +.PHONY: doc man man-perl html info pdf +doc: man-perl $(MAKE) -C Documentation all -man: +man: man-perl $(MAKE) -C Documentation man +man-perl: perl/build/man/man3/Git.3pm + html: $(MAKE) -C Documentation html @@ -2314,6 +2308,29 @@ endif po/build/locale/%/LC_MESSAGES/git.mo: po/%.po $(QUIET_MSGFMT)mkdir -p $(dir $@) && $(MSGFMT) -o $@ $< +LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm) +LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL)) +LIB_CPAN := $(wildcard perl/FromCPAN/*.pm perl/FromCPAN/*/*.pm) +LIB_CPAN_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_CPAN)) + +ifndef NO_PERL +all:: $(LIB_PERL_GEN) +ifndef NO_PERL_CPAN_FALLBACKS +all:: $(LIB_CPAN_GEN) +endif +NO_PERL_CPAN_FALLBACKS_SQ = $(subst ','\'',$(NO_PERL_CPAN_FALLBACKS)) +endif + +perl/build/lib/%.pm: perl/%.pm + $(QUIET_GEN)mkdir -p $(dir $@) && \ + sed -e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' \ + -e 's|@@NO_PERL_CPAN_FALLBACKS@@|$(NO_PERL_CPAN_FALLBACKS_SQ)|g' \ + < $< > $@ + +perl/build/man/man3/Git.3pm: perl/Git.pm + $(QUIET_GEN)mkdir -p $(dir $@) && \ + pod2man $< $@ + FIND_SOURCE_FILES = ( \ git ls-files \ '*.[hcS]' \ @@ -2574,7 +2591,9 @@ ifndef NO_GETTEXT (cd '$(DESTDIR_SQ)$(localedir_SQ)' && umask 022 && $(TAR) xof -) endif ifndef NO_PERL - $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perllibdir_SQ)' + (cd perl/build/lib && $(TAR) cf - .) | \ + (cd '$(DESTDIR_SQ)$(perllibdir_SQ)' && umask 022 && $(TAR) xof -) $(MAKE) -C gitweb install endif ifndef NO_TCLTK @@ -2619,17 +2638,22 @@ endif done && \ ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X" -.PHONY: install-gitweb install-doc install-man install-html install-info install-pdf +.PHONY: install-gitweb install-doc install-man install-man-perl install-html install-info install-pdf .PHONY: quick-install-doc quick-install-man quick-install-html install-gitweb: $(MAKE) -C gitweb install -install-doc: +install-doc: install-man-perl $(MAKE) -C Documentation install -install-man: +install-man: install-man-perl $(MAKE) -C Documentation install-man +install-man-perl: man-perl + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(mandir_SQ)/man3' + (cd perl/build/man/man3 && $(TAR) cf - .) | \ + (cd '$(DESTDIR_SQ)$(mandir_SQ)/man3' && umask 022 && $(TAR) xof -) + install-html: $(MAKE) -C Documentation install-html @@ -2664,6 +2688,21 @@ dist: git-archive$(X) configure $(GIT_TARNAME)/configure \ $(GIT_TARNAME)/version \ $(GIT_TARNAME)/git-gui/version +ifdef DC_SHA1_SUBMODULE + @mkdir -p $(GIT_TARNAME)/sha1collisiondetection/lib + @cp sha1collisiondetection/LICENSE.txt \ + $(GIT_TARNAME)/sha1collisiondetection/ + @cp sha1collisiondetection/LICENSE.txt \ + $(GIT_TARNAME)/sha1collisiondetection/ + @cp sha1collisiondetection/lib/sha1.[ch] \ + $(GIT_TARNAME)/sha1collisiondetection/lib/ + @cp sha1collisiondetection/lib/ubc_check.[ch] \ + $(GIT_TARNAME)/sha1collisiondetection/lib/ + $(TAR) rf $(GIT_TARNAME).tar \ + $(GIT_TARNAME)/sha1collisiondetection/LICENSE.txt \ + $(GIT_TARNAME)/sha1collisiondetection/lib/sha1.[ch] \ + $(GIT_TARNAME)/sha1collisiondetection/lib/ubc_check.[ch] +endif @$(RM) -r $(GIT_TARNAME) gzip -f -9 $(GIT_TARNAME).tar @@ -2713,7 +2752,7 @@ clean: profile-clean coverage-clean $(RM) $(TEST_PROGRAMS) $(NO_INSTALL) $(RM) -r bin-wrappers $(dep_dirs) $(RM) -r po/build/ - $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope* + $(RM) *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope* $(RM) -r $(GIT_TARNAME) .doc-tmp-dir $(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz $(RM) $(htmldocs).tar.gz $(manpages).tar.gz @@ -2721,7 +2760,7 @@ clean: profile-clean coverage-clean $(MAKE) -C Documentation/ clean ifndef NO_PERL $(MAKE) -C gitweb clean - $(MAKE) -C perl clean + $(RM) -r perl/build/ endif $(MAKE) -C templates/ clean $(MAKE) -C t/ clean @@ -1 +1 @@ -Documentation/RelNotes/2.16.3.txt
\ No newline at end of file +Documentation/RelNotes/2.17.0.txt
\ No newline at end of file @@ -950,7 +950,7 @@ static int gitdiff_verify_name(struct apply_state *state, } free(another); } else { - if (!starts_with(line, "/dev/null\n")) + if (!is_dev_null(line)) return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr); } @@ -2263,8 +2263,8 @@ static void show_stats(struct apply_state *state, struct patch *patch) static int read_old_data(struct stat *st, struct patch *patch, const char *path, struct strbuf *buf) { - enum safe_crlf safe_crlf = patch->crlf_in_old ? - SAFE_CRLF_KEEP_CRLF : SAFE_CRLF_RENORMALIZE; + int conv_flags = patch->crlf_in_old ? + CONV_EOL_KEEP_CRLF : CONV_EOL_RENORMALIZE; switch (st->st_mode & S_IFMT) { case S_IFLNK: if (strbuf_readlink(buf, path, st->st_size) < 0) @@ -2281,7 +2281,7 @@ static int read_old_data(struct stat *st, struct patch *patch, * should never look at the index when explicit crlf option * is given. */ - convert_to_git(NULL, path, buf->buf, buf->len, buf, safe_crlf); + convert_to_git(NULL, path, buf->buf, buf->len, buf, conv_flags); return 0; default: return -1; @@ -2301,7 +2301,7 @@ static void update_pre_post_images(struct image *preimage, size_t len, size_t postlen) { int i, ctx, reduced; - char *new, *old, *fixed; + char *new_buf, *old_buf, *fixed; struct image fixed_preimage; /* @@ -2327,25 +2327,25 @@ static void update_pre_post_images(struct image *preimage, * We trust the caller to tell us if the update can be done * in place (postlen==0) or not. */ - old = postimage->buf; + old_buf = postimage->buf; if (postlen) - new = postimage->buf = xmalloc(postlen); + new_buf = postimage->buf = xmalloc(postlen); else - new = old; + new_buf = old_buf; fixed = preimage->buf; for (i = reduced = ctx = 0; i < postimage->nr; i++) { size_t l_len = postimage->line[i].len; if (!(postimage->line[i].flag & LINE_COMMON)) { /* an added line -- no counterparts in preimage */ - memmove(new, old, l_len); - old += l_len; - new += l_len; + memmove(new_buf, old_buf, l_len); + old_buf += l_len; + new_buf += l_len; continue; } /* a common context -- skip it in the original postimage */ - old += l_len; + old_buf += l_len; /* and find the corresponding one in the fixed preimage */ while (ctx < preimage->nr && @@ -2365,29 +2365,29 @@ static void update_pre_post_images(struct image *preimage, /* and copy it in, while fixing the line length */ l_len = preimage->line[ctx].len; - memcpy(new, fixed, l_len); - new += l_len; + memcpy(new_buf, fixed, l_len); + new_buf += l_len; fixed += l_len; postimage->line[i].len = l_len; ctx++; } if (postlen - ? postlen < new - postimage->buf - : postimage->len < new - postimage->buf) + ? postlen < new_buf - postimage->buf + : postimage->len < new_buf - postimage->buf) die("BUG: caller miscounted postlen: asked %d, orig = %d, used = %d", - (int)postlen, (int) postimage->len, (int)(new - postimage->buf)); + (int)postlen, (int) postimage->len, (int)(new_buf - postimage->buf)); /* Fix the length of the whole thing */ - postimage->len = new - postimage->buf; + postimage->len = new_buf - postimage->buf; postimage->nr -= reduced; } static int line_by_line_fuzzy_match(struct image *img, struct image *preimage, struct image *postimage, - unsigned long try, - int try_lno, + unsigned long current, + int current_lno, int preimage_limit) { int i; @@ -2404,9 +2404,9 @@ static int line_by_line_fuzzy_match(struct image *img, for (i = 0; i < preimage_limit; i++) { size_t prelen = preimage->line[i].len; - size_t imglen = img->line[try_lno+i].len; + size_t imglen = img->line[current_lno+i].len; - if (!fuzzy_matchlines(img->buf + try + imgoff, imglen, + if (!fuzzy_matchlines(img->buf + current + imgoff, imglen, preimage->buf + preoff, prelen)) return 0; if (preimage->line[i].flag & LINE_COMMON) @@ -2443,7 +2443,7 @@ static int line_by_line_fuzzy_match(struct image *img, */ extra_chars = preimage_end - preimage_eof; strbuf_init(&fixed, imgoff + extra_chars); - strbuf_add(&fixed, img->buf + try, imgoff); + strbuf_add(&fixed, img->buf + current, imgoff); strbuf_add(&fixed, preimage_eof, extra_chars); fixed_buf = strbuf_detach(&fixed, &fixed_len); update_pre_post_images(preimage, postimage, @@ -2455,8 +2455,8 @@ static int match_fragment(struct apply_state *state, struct image *img, struct image *preimage, struct image *postimage, - unsigned long try, - int try_lno, + unsigned long current, + int current_lno, unsigned ws_rule, int match_beginning, int match_end) { @@ -2466,12 +2466,12 @@ static int match_fragment(struct apply_state *state, size_t fixed_len, postlen; int preimage_limit; - if (preimage->nr + try_lno <= img->nr) { + if (preimage->nr + current_lno <= img->nr) { /* * The hunk falls within the boundaries of img. */ preimage_limit = preimage->nr; - if (match_end && (preimage->nr + try_lno != img->nr)) + if (match_end && (preimage->nr + current_lno != img->nr)) return 0; } else if (state->ws_error_action == correct_ws_error && (ws_rule & WS_BLANK_AT_EOF)) { @@ -2482,7 +2482,7 @@ static int match_fragment(struct apply_state *state, * match with img, and the remainder of the preimage * must be blank. */ - preimage_limit = img->nr - try_lno; + preimage_limit = img->nr - current_lno; } else { /* * The hunk extends beyond the end of the img and @@ -2492,27 +2492,27 @@ static int match_fragment(struct apply_state *state, return 0; } - if (match_beginning && try_lno) + if (match_beginning && current_lno) return 0; /* Quick hash check */ for (i = 0; i < preimage_limit; i++) - if ((img->line[try_lno + i].flag & LINE_PATCHED) || - (preimage->line[i].hash != img->line[try_lno + i].hash)) + if ((img->line[current_lno + i].flag & LINE_PATCHED) || + (preimage->line[i].hash != img->line[current_lno + i].hash)) return 0; if (preimage_limit == preimage->nr) { /* * Do we have an exact match? If we were told to match - * at the end, size must be exactly at try+fragsize, - * otherwise try+fragsize must be still within the preimage, + * at the end, size must be exactly at current+fragsize, + * otherwise current+fragsize must be still within the preimage, * and either case, the old piece should match the preimage * exactly. */ if ((match_end - ? (try + preimage->len == img->len) - : (try + preimage->len <= img->len)) && - !memcmp(img->buf + try, preimage->buf, preimage->len)) + ? (current + preimage->len == img->len) + : (current + preimage->len <= img->len)) && + !memcmp(img->buf + current, preimage->buf, preimage->len)) return 1; } else { /* @@ -2543,7 +2543,7 @@ static int match_fragment(struct apply_state *state, */ if (state->ws_ignore_action == ignore_ws_change) return line_by_line_fuzzy_match(img, preimage, postimage, - try, try_lno, preimage_limit); + current, current_lno, preimage_limit); if (state->ws_error_action != correct_ws_error) return 0; @@ -2577,10 +2577,10 @@ static int match_fragment(struct apply_state *state, */ strbuf_init(&fixed, preimage->len + 1); orig = preimage->buf; - target = img->buf + try; + target = img->buf + current; for (i = 0; i < preimage_limit; i++) { size_t oldlen = preimage->line[i].len; - size_t tgtlen = img->line[try_lno + i].len; + size_t tgtlen = img->line[current_lno + i].len; size_t fixstart = fixed.len; struct strbuf tgtfix; int match; @@ -2666,8 +2666,8 @@ static int find_pos(struct apply_state *state, int match_beginning, int match_end) { int i; - unsigned long backwards, forwards, try; - int backwards_lno, forwards_lno, try_lno; + unsigned long backwards, forwards, current; + int backwards_lno, forwards_lno, current_lno; /* * If match_beginning or match_end is specified, there is no @@ -2687,25 +2687,25 @@ static int find_pos(struct apply_state *state, if ((size_t) line > img->nr) line = img->nr; - try = 0; + current = 0; for (i = 0; i < line; i++) - try += img->line[i].len; + current += img->line[i].len; /* * There's probably some smart way to do this, but I'll leave * that to the smart and beautiful people. I'm simple and stupid. */ - backwards = try; + backwards = current; backwards_lno = line; - forwards = try; + forwards = current; forwards_lno = line; - try_lno = line; + current_lno = line; for (i = 0; ; i++) { if (match_fragment(state, img, preimage, postimage, - try, try_lno, ws_rule, + current, current_lno, ws_rule, match_beginning, match_end)) - return try_lno; + return current_lno; again: if (backwards_lno == 0 && forwards_lno == img->nr) @@ -2718,8 +2718,8 @@ static int find_pos(struct apply_state *state, } backwards_lno--; backwards -= img->line[backwards_lno].len; - try = backwards; - try_lno = backwards_lno; + current = backwards; + current_lno = backwards_lno; } else { if (forwards_lno == img->nr) { i++; @@ -2727,8 +2727,8 @@ static int find_pos(struct apply_state *state, } forwards += img->line[forwards_lno].len; forwards_lno++; - try = forwards; - try_lno = forwards_lno; + current = forwards; + current_lno = forwards_lno; } } @@ -3154,7 +3154,7 @@ static int apply_binary(struct apply_state *state, * See if the old one matches what the patch * applies to. */ - hash_sha1_file(img->buf, img->len, blob_type, oid.hash); + hash_object_file(img->buf, img->len, blob_type, &oid); if (strcmp(oid_to_hex(&oid), patch->old_sha1_prefix)) return error(_("the patch applies to '%s' (%s), " "which does not match the " @@ -3199,7 +3199,7 @@ static int apply_binary(struct apply_state *state, name); /* verify that the result matches */ - hash_sha1_file(img->buf, img->len, blob_type, oid.hash); + hash_object_file(img->buf, img->len, blob_type, &oid); if (strcmp(oid_to_hex(&oid), patch->new_sha1_prefix)) return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"), name, patch->new_sha1_prefix, oid_to_hex(&oid)); @@ -3554,7 +3554,7 @@ static int try_threeway(struct apply_state *state, /* Preimage the patch was prepared for */ if (patch->is_new) - write_sha1_file("", 0, blob_type, pre_oid.hash); + write_object_file("", 0, blob_type, &pre_oid); else if (get_oid(patch->old_sha1_prefix, &pre_oid) || read_blob_object(&buf, &pre_oid, patch->old_mode)) return error(_("repository lacks the necessary blob to fall back on 3-way merge.")); @@ -3570,7 +3570,7 @@ static int try_threeway(struct apply_state *state, return -1; } /* post_oid is theirs */ - write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, post_oid.hash); + write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid); clear_image(&tmp_image); /* our_oid is ours */ @@ -3583,7 +3583,7 @@ static int try_threeway(struct apply_state *state, return error(_("cannot read the current contents of '%s'"), patch->old_name); } - write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, our_oid.hash); + write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid); clear_image(&tmp_image); /* in-core three-way merge between post and our using pre as base */ @@ -4163,30 +4163,30 @@ static void show_mode_change(struct patch *p, int show_name) static void show_rename_copy(struct patch *p) { const char *renamecopy = p->is_rename ? "rename" : "copy"; - const char *old, *new; + const char *old_name, *new_name; /* Find common prefix */ - old = p->old_name; - new = p->new_name; + old_name = p->old_name; + new_name = p->new_name; while (1) { const char *slash_old, *slash_new; - slash_old = strchr(old, '/'); - slash_new = strchr(new, '/'); + slash_old = strchr(old_name, '/'); + slash_new = strchr(new_name, '/'); if (!slash_old || !slash_new || - slash_old - old != slash_new - new || - memcmp(old, new, slash_new - new)) + slash_old - old_name != slash_new - new_name || + memcmp(old_name, new_name, slash_new - new_name)) break; - old = slash_old + 1; - new = slash_new + 1; + old_name = slash_old + 1; + new_name = slash_new + 1; } - /* p->old_name thru old is the common prefix, and old and new + /* p->old_name thru old_name is the common prefix, and old_name and new_name * through the end of names are renames */ - if (old != p->old_name) + if (old_name != p->old_name) printf(" %s %.*s{%s => %s} (%d%%)\n", renamecopy, - (int)(old - p->old_name), p->old_name, - old, new, p->score); + (int)(old_name - p->old_name), p->old_name, + old_name, new_name, p->score); else printf(" %s %s => %s (%d%%)\n", renamecopy, p->old_name, p->new_name, p->score); @@ -4291,7 +4291,7 @@ static int add_index_file(struct apply_state *state, } fill_stat_cache_info(ce, &st); } - if (write_sha1_file(buf, size, blob_type, ce->oid.hash) < 0) { + if (write_object_file(buf, size, blob_type, &ce->oid) < 0) { free(ce); return error(_("unable to create backing store " "for newly created file %s"), path); @@ -4943,8 +4943,9 @@ int apply_parse_options(int argc, const char **argv, N_("make sure the patch is applicable to the current index")), OPT_BOOL(0, "cached", &state->cached, N_("apply a patch without touching the working tree")), - OPT_BOOL(0, "unsafe-paths", &state->unsafe_paths, - N_("accept a patch that touches outside the working area")), + OPT_BOOL_F(0, "unsafe-paths", &state->unsafe_paths, + N_("accept a patch that touches outside the working area"), + PARSE_OPT_NOCOMPLETE), OPT_BOOL(0, "apply", force_apply, N_("also apply the patch (use with --stat/--summary/--check)")), OPT_BOOL('3', "3way", &state->threeway, @@ -232,7 +232,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt, convert_to_git(&the_index, path, buf.buf, buf.len, &buf, 0); origin->file.ptr = buf.buf; origin->file.size = buf.len; - pretend_sha1_file(buf.buf, buf.len, OBJ_BLOB, origin->blob_oid.hash); + pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid); /* * Read the current index, replace the path entry with @@ -998,28 +998,29 @@ unsigned blame_entry_score(struct blame_scoreboard *sb, struct blame_entry *e) } /* - * best_so_far[] and this[] are both a split of an existing blame_entry - * that passes blame to the parent. Maintain best_so_far the best split - * so far, by comparing this and best_so_far and copying this into + * best_so_far[] and potential[] are both a split of an existing blame_entry + * that passes blame to the parent. Maintain best_so_far the best split so + * far, by comparing potential and best_so_far and copying potential into * bst_so_far as needed. */ static void copy_split_if_better(struct blame_scoreboard *sb, struct blame_entry *best_so_far, - struct blame_entry *this) + struct blame_entry *potential) { int i; - if (!this[1].suspect) + if (!potential[1].suspect) return; if (best_so_far[1].suspect) { - if (blame_entry_score(sb, &this[1]) < blame_entry_score(sb, &best_so_far[1])) + if (blame_entry_score(sb, &potential[1]) < + blame_entry_score(sb, &best_so_far[1])) return; } for (i = 0; i < 3; i++) - blame_origin_incref(this[i].suspect); + blame_origin_incref(potential[i].suspect); decref_split(best_so_far); - memcpy(best_so_far, this, sizeof(struct blame_entry [3])); + memcpy(best_so_far, potential, sizeof(struct blame_entry[3])); } /* @@ -1046,12 +1047,12 @@ static void handle_split(struct blame_scoreboard *sb, if (ent->num_lines <= tlno) return; if (tlno < same) { - struct blame_entry this[3]; + struct blame_entry potential[3]; tlno += ent->s_lno; same += ent->s_lno; - split_overlap(this, ent, tlno, plno, same, parent); - copy_split_if_better(sb, split, this); - decref_split(this); + split_overlap(potential, ent, tlno, plno, same, parent); + copy_split_if_better(sb, split, potential); + decref_split(potential); } } @@ -1273,7 +1274,7 @@ static void find_copy_in_parent(struct blame_scoreboard *sb, struct diff_filepair *p = diff_queued_diff.queue[i]; struct blame_origin *norigin; mmfile_t file_p; - struct blame_entry this[3]; + struct blame_entry potential[3]; if (!DIFF_FILE_VALID(p->one)) continue; /* does not exist in parent */ @@ -1292,10 +1293,10 @@ static void find_copy_in_parent(struct blame_scoreboard *sb, for (j = 0; j < num_ents; j++) { find_copy_in_blob(sb, blame_list[j].ent, - norigin, this, &file_p); + norigin, potential, &file_p); copy_split_if_better(sb, blame_list[j].split, - this); - decref_split(this); + potential); + decref_split(potential); } blame_origin_decref(norigin); } diff --git a/builtin/add.c b/builtin/add.c index bf01d89e28..9ef7fb02d5 100644 --- a/builtin/add.c +++ b/builtin/add.c @@ -294,7 +294,7 @@ static struct option builtin_add_options[] = { OPT_BOOL('i', "interactive", &add_interactive, N_("interactive picking")), OPT_BOOL('p', "patch", &patch_interactive, N_("select hunks interactively")), OPT_BOOL('e', "edit", &edit_interactive, N_("edit current diff and apply")), - OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files")), + OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files"), 0), OPT_BOOL('u', "update", &take_worktree_changes, N_("update tracked files")), OPT_BOOL(0, "renormalize", &add_renormalize, N_("renormalize EOL of tracked files (implies -u)")), OPT_BOOL('N', "intent-to-add", &intent_to_add, N_("record only the fact that the path will be added later")), @@ -534,10 +534,9 @@ int cmd_add(int argc, const char **argv, const char *prefix) unplug_bulk_checkin(); finish: - if (active_cache_changed) { - if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK)) - die(_("Unable to write new index file")); - } + if (write_locked_index(&the_index, &lock_file, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) + die(_("Unable to write new index file")); UNLEAK(pathspec); UNLEAK(dir); diff --git a/builtin/am.c b/builtin/am.c index acfe9d3c8c..1151b5c73a 100644 --- a/builtin/am.c +++ b/builtin/am.c @@ -1011,6 +1011,7 @@ static void am_setup(struct am_state *state, enum patch_format patch_format, if (mkdir(state->dir, 0777) < 0 && errno != EEXIST) die_errno(_("failed to create directory '%s'"), state->dir); + delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); if (split_mail(state, patch_format, paths, keep_cr) < 0) { am_destroy(state); @@ -1061,7 +1062,7 @@ static void am_setup(struct am_state *state, enum patch_format patch_format, } write_state_text(state, "scissors", str); - sq_quote_argv(&sb, state->git_apply_opts.argv, 0); + sq_quote_argv(&sb, state->git_apply_opts.argv); write_state_text(state, "apply-opt", sb.buf); if (state->rebasing) @@ -1110,6 +1111,7 @@ static void am_next(struct am_state *state) oidclr(&state->orig_commit); unlink(am_path(state, "original-commit")); + delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); if (!get_oid("HEAD", &head)) write_state_text(state, "abort-safety", oid_to_hex(&head)); @@ -1441,6 +1443,8 @@ static int parse_mail_rebase(struct am_state *state, const char *mail) oidcpy(&state->orig_commit, &commit_oid); write_state_text(state, "original-commit", oid_to_hex(&commit_oid)); + update_ref("am", "REBASE_HEAD", &commit_oid, + NULL, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR); return 0; } @@ -1641,8 +1645,8 @@ static void do_commit(const struct am_state *state) setenv("GIT_COMMITTER_DATE", state->ignore_date ? "" : state->author_date, 1); - if (commit_tree(state->msg, state->msg_len, tree.hash, parents, commit.hash, - author, state->sign_commit)) + if (commit_tree(state->msg, state->msg_len, &tree, parents, &commit, + author, state->sign_commit)) die(_("failed to write commit object")); reflog_msg = getenv("GIT_REFLOG_ACTION"); @@ -1831,8 +1835,7 @@ static void am_run(struct am_state *state, int resume) git_config_get_bool("advice.amworkdir", &advice_amworkdir); if (advice_amworkdir) - printf_ln(_("The copy of the patch that failed is found in: %s"), - am_path(state, "patch")); + printf_ln(_("Use 'git am --show-current-patch' to see the failed patch")); die_user_resolve(state); } @@ -2121,6 +2124,34 @@ static void am_abort(struct am_state *state) am_destroy(state); } +static int show_patch(struct am_state *state) +{ + struct strbuf sb = STRBUF_INIT; + const char *patch_path; + int len; + + if (!is_null_oid(&state->orig_commit)) { + const char *av[4] = { "show", NULL, "--", NULL }; + char *new_oid_str; + int ret; + + av[1] = new_oid_str = xstrdup(oid_to_hex(&state->orig_commit)); + ret = run_command_v_opt(av, RUN_GIT_CMD); + free(new_oid_str); + return ret; + } + + patch_path = am_path(state, msgnum(state)); + len = strbuf_read_file(&sb, patch_path, 0); + if (len < 0) + die_errno(_("failed to read '%s'"), patch_path); + + setup_pager(); + write_in_full(1, sb.buf, sb.len); + strbuf_release(&sb); + return 0; +} + /** * parse_options() callback that validates and sets opt->value to the * PATCH_FORMAT_* enum value corresponding to `arg`. @@ -2149,7 +2180,9 @@ enum resume_mode { RESUME_APPLY, RESUME_RESOLVED, RESUME_SKIP, - RESUME_ABORT + RESUME_ABORT, + RESUME_QUIT, + RESUME_SHOW_PATCH }; static int git_am_config(const char *k, const char *v, void *cb) @@ -2171,6 +2204,7 @@ int cmd_am(int argc, const char **argv, const char *prefix) int patch_format = PATCH_FORMAT_UNKNOWN; enum resume_mode resume = RESUME_FALSE; int in_progress; + int ret = 0; const char * const usage[] = { N_("git am [<options>] [(<mbox> | <Maildir>)...]"), @@ -2249,6 +2283,12 @@ int cmd_am(int argc, const char **argv, const char *prefix) OPT_CMDMODE(0, "abort", &resume, N_("restore the original branch and abort the patching operation."), RESUME_ABORT), + OPT_CMDMODE(0, "quit", &resume, + N_("abort the patching operation but keep HEAD where it is."), + RESUME_QUIT), + OPT_CMDMODE(0, "show-current-patch", &resume, + N_("show the patch being applied."), + RESUME_SHOW_PATCH), OPT_BOOL(0, "committer-date-is-author-date", &state.committer_date_is_author_date, N_("lie about committer date")), @@ -2317,7 +2357,7 @@ int cmd_am(int argc, const char **argv, const char *prefix) * stray directories. */ if (file_exists(state.dir) && !state.rebasing) { - if (resume == RESUME_ABORT) { + if (resume == RESUME_ABORT || resume == RESUME_QUIT) { am_destroy(&state); am_state_release(&state); return 0; @@ -2359,11 +2399,18 @@ int cmd_am(int argc, const char **argv, const char *prefix) case RESUME_ABORT: am_abort(&state); break; + case RESUME_QUIT: + am_rerere_clear(); + am_destroy(&state); + break; + case RESUME_SHOW_PATCH: + ret = show_patch(&state); + break; default: die("BUG: invalid resume value"); } am_state_release(&state); - return 0; + return ret; } diff --git a/builtin/archive.c b/builtin/archive.c index f863465a0f..73971d0dd2 100644 --- a/builtin/archive.c +++ b/builtin/archive.c @@ -55,7 +55,7 @@ static int run_remote_archiver(int argc, const char **argv, buf = packet_read_line(fd[0], NULL); if (!buf) - die(_("git archive: expected ACK/NAK, got EOF")); + die(_("git archive: expected ACK/NAK, got a flush packet")); if (strcmp(buf, "ACK")) { if (starts_with(buf, "NACK ")) die(_("git archive: NACK %s"), buf + 5); diff --git a/builtin/blame.c b/builtin/blame.c index 005f55aaa2..9dcb367b90 100644 --- a/builtin/blame.c +++ b/builtin/blame.c @@ -649,6 +649,15 @@ static int blame_move_callback(const struct option *option, const char *arg, int return 0; } +static int is_a_rev(const char *name) +{ + struct object_id oid; + + if (get_oid(name, &oid)) + return 0; + return OBJ_NONE < sha1_object_info(oid.hash, NULL); +} + int cmd_blame(int argc, const char **argv, const char *prefix) { struct rev_info revs; @@ -845,16 +854,15 @@ parse_done: } else { if (argc < 2) usage_with_options(blame_opt_usage, options); - path = add_prefix(prefix, argv[argc - 1]); - if (argc == 3 && !file_exists(path)) { /* (2b) */ + if (argc == 3 && is_a_rev(argv[argc - 1])) { /* (2b) */ path = add_prefix(prefix, argv[1]); argv[1] = argv[2]; + } else { /* (2a) */ + if (argc == 2 && is_a_rev(argv[1]) && !get_git_work_tree()) + die("missing <path> to blame"); + path = add_prefix(prefix, argv[argc - 1]); } argv[argc - 1] = "--"; - - setup_work_tree(); - if (!file_exists(path)) - die_errno("cannot stat path '%s'", path); } revs.disable_stdin = 1; diff --git a/builtin/branch.c b/builtin/branch.c index 8dcc2ed058..6d0cea9d4b 100644 --- a/builtin/branch.c +++ b/builtin/branch.c @@ -615,7 +615,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix) OPT_BOOL('l', "create-reflog", &reflog, N_("create the branch's reflog")), OPT_BOOL(0, "edit-description", &edit_description, N_("edit the description for the branch")), - OPT__FORCE(&force, N_("force creation, move/rename, deletion")), + OPT__FORCE(&force, N_("force creation, move/rename, deletion"), PARSE_OPT_NOCOMPLETE), OPT_MERGED(&filter, N_("print only branches that are merged")), OPT_NO_MERGED(&filter, N_("print only branches that are not merged")), OPT_COLUMN(0, "column", &colopts, N_("list branches in columns")), diff --git a/builtin/cat-file.c b/builtin/cat-file.c index f5fa4fd75a..d90170f070 100644 --- a/builtin/cat-file.c +++ b/builtin/cat-file.c @@ -76,7 +76,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, buf = NULL; switch (opt) { case 't': - oi.typename = &sb; + oi.type_name = &sb; if (sha1_object_info_extended(oid.hash, &oi, flags) < 0) die("git cat-file: could not get object info"); if (sb.len) { @@ -229,7 +229,7 @@ static void expand_atom(struct strbuf *sb, const char *atom, int len, if (data->mark_query) data->info.typep = &data->type; else - strbuf_addstr(sb, typename(data->type)); + strbuf_addstr(sb, type_name(data->type)); } else if (is_atom("objectsize", atom, len)) { if (data->mark_query) data->info.sizep = &data->size; @@ -475,6 +475,8 @@ static int batch_objects(struct batch_options *opt) for_each_loose_object(batch_loose_object, &sa, 0); for_each_packed_object(batch_packed_object, &sa, 0); + if (repository_format_partial_clone) + warning("This repository has extensions.partialClone set. Some objects may not be loaded."); cb.opt = opt; cb.expand = &data; diff --git a/builtin/check-ignore.c b/builtin/check-ignore.c index 3e280b9c7a..ec9a959e08 100644 --- a/builtin/check-ignore.c +++ b/builtin/check-ignore.c @@ -72,7 +72,7 @@ static int check_ignore(struct dir_struct *dir, { const char *full_path; char *seen; - int num_ignored = 0, dtype = DT_UNKNOWN, i; + int num_ignored = 0, i; struct exclude *exclude; struct pathspec pathspec; @@ -104,6 +104,7 @@ static int check_ignore(struct dir_struct *dir, full_path = pathspec.items[i].match; exclude = NULL; if (!seen[i]) { + int dtype = DT_UNKNOWN; exclude = last_exclude_matching(dir, &the_index, full_path, &dtype); } diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c index b0e78b819d..a730f6a1aa 100644 --- a/builtin/checkout-index.c +++ b/builtin/checkout-index.c @@ -157,7 +157,7 @@ int cmd_checkout_index(int argc, const char **argv, const char *prefix) struct option builtin_checkout_index_options[] = { OPT_BOOL('a', "all", &all, N_("check out all files in the index")), - OPT__FORCE(&force, N_("force overwrite of existing files")), + OPT__FORCE(&force, N_("force overwrite of existing files"), 0), OPT__QUIET(&quiet, N_("no warning for existing files and files not in index")), OPT_BOOL('n', "no-create", ¬_new, diff --git a/builtin/checkout.c b/builtin/checkout.c index c54c78df54..d76e13c852 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -54,14 +54,14 @@ struct checkout_opts { struct tree *source_tree; }; -static int post_checkout_hook(struct commit *old, struct commit *new, +static int post_checkout_hook(struct commit *old_commit, struct commit *new_commit, int changed) { return run_hook_le(NULL, "post-checkout", - oid_to_hex(old ? &old->object.oid : &null_oid), - oid_to_hex(new ? &new->object.oid : &null_oid), + oid_to_hex(old_commit ? &old_commit->object.oid : &null_oid), + oid_to_hex(new_commit ? &new_commit->object.oid : &null_oid), changed ? "1" : "0", NULL); - /* "new" can be NULL when checking out from the index before + /* "new_commit" can be NULL when checking out from the index before a commit exists. */ } @@ -227,8 +227,7 @@ static int checkout_merged(int pos, const struct checkout *state) * (it also writes the merge result to the object database even * when it may contain conflicts). */ - if (write_sha1_file(result_buf.ptr, result_buf.size, - blob_type, oid.hash)) + if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid)) die(_("Unable to add merge result for '%s'"), path); free(result_buf.ptr); ce = make_cache_entry(mode, oid.hash, path, 2, 0); @@ -472,8 +471,8 @@ static void setup_branch_path(struct branch_info *branch) } static int merge_working_tree(const struct checkout_opts *opts, - struct branch_info *old, - struct branch_info *new, + struct branch_info *old_branch_info, + struct branch_info *new_branch_info, int *writeout_error) { int ret; @@ -485,7 +484,7 @@ static int merge_working_tree(const struct checkout_opts *opts, resolve_undo_clear(); if (opts->force) { - ret = reset_tree(new->commit->tree, opts, 1, writeout_error); + ret = reset_tree(new_branch_info->commit->tree, opts, 1, writeout_error); if (ret) return ret; } else { @@ -511,7 +510,7 @@ static int merge_working_tree(const struct checkout_opts *opts, topts.initial_checkout = is_cache_unborn(); topts.update = 1; topts.merge = 1; - topts.gently = opts->merge && old->commit; + topts.gently = opts->merge && old_branch_info->commit; topts.verbose_update = opts->show_progress; topts.fn = twoway_merge; if (opts->overwrite_ignore) { @@ -519,11 +518,11 @@ static int merge_working_tree(const struct checkout_opts *opts, topts.dir->flags |= DIR_SHOW_IGNORED; setup_standard_excludes(topts.dir); } - tree = parse_tree_indirect(old->commit ? - &old->commit->object.oid : + tree = parse_tree_indirect(old_branch_info->commit ? + &old_branch_info->commit->object.oid : the_hash_algo->empty_tree); init_tree_desc(&trees[0], tree->buffer, tree->size); - tree = parse_tree_indirect(&new->commit->object.oid); + tree = parse_tree_indirect(&new_branch_info->commit->object.oid); init_tree_desc(&trees[1], tree->buffer, tree->size); ret = unpack_trees(2, trees, &topts); @@ -540,10 +539,10 @@ static int merge_working_tree(const struct checkout_opts *opts, return 1; /* - * Without old->commit, the below is the same as + * Without old_branch_info->commit, the below is the same as * the two-tree unpack we already tried and failed. */ - if (!old->commit) + if (!old_branch_info->commit) return 1; /* Do more real merge */ @@ -571,18 +570,18 @@ static int merge_working_tree(const struct checkout_opts *opts, o.verbosity = 0; work = write_tree_from_memory(&o); - ret = reset_tree(new->commit->tree, opts, 1, + ret = reset_tree(new_branch_info->commit->tree, opts, 1, writeout_error); if (ret) return ret; - o.ancestor = old->name; - o.branch1 = new->name; + o.ancestor = old_branch_info->name; + o.branch1 = new_branch_info->name; o.branch2 = "local"; - ret = merge_trees(&o, new->commit->tree, work, - old->commit->tree, &result); + ret = merge_trees(&o, new_branch_info->commit->tree, work, + old_branch_info->commit->tree, &result); if (ret < 0) exit(128); - ret = reset_tree(new->commit->tree, opts, 0, + ret = reset_tree(new_branch_info->commit->tree, opts, 0, writeout_error); strbuf_release(&o.obuf); if (ret) @@ -600,25 +599,25 @@ static int merge_working_tree(const struct checkout_opts *opts, die(_("unable to write new index file")); if (!opts->force && !opts->quiet) - show_local_changes(&new->commit->object, &opts->diff_options); + show_local_changes(&new_branch_info->commit->object, &opts->diff_options); return 0; } -static void report_tracking(struct branch_info *new) +static void report_tracking(struct branch_info *new_branch_info) { struct strbuf sb = STRBUF_INIT; - struct branch *branch = branch_get(new->name); + struct branch *branch = branch_get(new_branch_info->name); - if (!format_tracking_info(branch, &sb)) + if (!format_tracking_info(branch, &sb, AHEAD_BEHIND_FULL)) return; fputs(sb.buf, stdout); strbuf_release(&sb); } static void update_refs_for_switch(const struct checkout_opts *opts, - struct branch_info *old, - struct branch_info *new) + struct branch_info *old_branch_info, + struct branch_info *new_branch_info) { struct strbuf msg = STRBUF_INIT; const char *old_desc, *reflog_msg; @@ -645,69 +644,69 @@ static void update_refs_for_switch(const struct checkout_opts *opts, free(refname); } else - create_branch(opts->new_branch, new->name, + create_branch(opts->new_branch, new_branch_info->name, opts->new_branch_force ? 1 : 0, opts->new_branch_force ? 1 : 0, opts->new_branch_log, opts->quiet, opts->track); - new->name = opts->new_branch; - setup_branch_path(new); + new_branch_info->name = opts->new_branch; + setup_branch_path(new_branch_info); } - old_desc = old->name; - if (!old_desc && old->commit) - old_desc = oid_to_hex(&old->commit->object.oid); + old_desc = old_branch_info->name; + if (!old_desc && old_branch_info->commit) + old_desc = oid_to_hex(&old_branch_info->commit->object.oid); reflog_msg = getenv("GIT_REFLOG_ACTION"); if (!reflog_msg) strbuf_addf(&msg, "checkout: moving from %s to %s", - old_desc ? old_desc : "(invalid)", new->name); + old_desc ? old_desc : "(invalid)", new_branch_info->name); else strbuf_insert(&msg, 0, reflog_msg, strlen(reflog_msg)); - if (!strcmp(new->name, "HEAD") && !new->path && !opts->force_detach) { + if (!strcmp(new_branch_info->name, "HEAD") && !new_branch_info->path && !opts->force_detach) { /* Nothing to do. */ - } else if (opts->force_detach || !new->path) { /* No longer on any branch. */ - update_ref(msg.buf, "HEAD", &new->commit->object.oid, NULL, + } else if (opts->force_detach || !new_branch_info->path) { /* No longer on any branch. */ + update_ref(msg.buf, "HEAD", &new_branch_info->commit->object.oid, NULL, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR); if (!opts->quiet) { - if (old->path && + if (old_branch_info->path && advice_detached_head && !opts->force_detach) - detach_advice(new->name); - describe_detached_head(_("HEAD is now at"), new->commit); + detach_advice(new_branch_info->name); + describe_detached_head(_("HEAD is now at"), new_branch_info->commit); } - } else if (new->path) { /* Switch branches. */ - if (create_symref("HEAD", new->path, msg.buf) < 0) + } else if (new_branch_info->path) { /* Switch branches. */ + if (create_symref("HEAD", new_branch_info->path, msg.buf) < 0) die(_("unable to update HEAD")); if (!opts->quiet) { - if (old->path && !strcmp(new->path, old->path)) { + if (old_branch_info->path && !strcmp(new_branch_info->path, old_branch_info->path)) { if (opts->new_branch_force) fprintf(stderr, _("Reset branch '%s'\n"), - new->name); + new_branch_info->name); else fprintf(stderr, _("Already on '%s'\n"), - new->name); + new_branch_info->name); } else if (opts->new_branch) { if (opts->branch_exists) - fprintf(stderr, _("Switched to and reset branch '%s'\n"), new->name); + fprintf(stderr, _("Switched to and reset branch '%s'\n"), new_branch_info->name); else - fprintf(stderr, _("Switched to a new branch '%s'\n"), new->name); + fprintf(stderr, _("Switched to a new branch '%s'\n"), new_branch_info->name); } else { fprintf(stderr, _("Switched to branch '%s'\n"), - new->name); + new_branch_info->name); } } - if (old->path && old->name) { - if (!ref_exists(old->path) && reflog_exists(old->path)) - delete_reflog(old->path); + if (old_branch_info->path && old_branch_info->name) { + if (!ref_exists(old_branch_info->path) && reflog_exists(old_branch_info->path)) + delete_reflog(old_branch_info->path); } } remove_branch_state(); strbuf_release(&msg); if (!opts->quiet && - (new->path || (!opts->force_detach && !strcmp(new->name, "HEAD")))) - report_tracking(new); + (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD")))) + report_tracking(new_branch_info); } static int add_pending_uninteresting_ref(const char *refname, @@ -787,10 +786,10 @@ static void suggest_reattach(struct commit *commit, struct rev_info *revs) * HEAD. If it is not reachable from any ref, this is the last chance * for the user to do so without resorting to reflog. */ -static void orphaned_commit_warning(struct commit *old, struct commit *new) +static void orphaned_commit_warning(struct commit *old_commit, struct commit *new_commit) { struct rev_info revs; - struct object *object = &old->object; + struct object *object = &old_commit->object; init_revisions(&revs, NULL); setup_revisions(0, NULL, &revs, NULL); @@ -799,57 +798,57 @@ static void orphaned_commit_warning(struct commit *old, struct commit *new) add_pending_object(&revs, object, oid_to_hex(&object->oid)); for_each_ref(add_pending_uninteresting_ref, &revs); - add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING); + add_pending_oid(&revs, "HEAD", &new_commit->object.oid, UNINTERESTING); if (prepare_revision_walk(&revs)) die(_("internal error in revision walk")); - if (!(old->object.flags & UNINTERESTING)) - suggest_reattach(old, &revs); + if (!(old_commit->object.flags & UNINTERESTING)) + suggest_reattach(old_commit, &revs); else - describe_detached_head(_("Previous HEAD position was"), old); + describe_detached_head(_("Previous HEAD position was"), old_commit); /* Clean up objects used, as they will be reused. */ clear_commit_marks_all(ALL_REV_FLAGS); } static int switch_branches(const struct checkout_opts *opts, - struct branch_info *new) + struct branch_info *new_branch_info) { int ret = 0; - struct branch_info old; + struct branch_info old_branch_info; void *path_to_free; struct object_id rev; int flag, writeout_error = 0; - memset(&old, 0, sizeof(old)); - old.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag); - if (old.path) - old.commit = lookup_commit_reference_gently(&rev, 1); + memset(&old_branch_info, 0, sizeof(old_branch_info)); + old_branch_info.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag); + if (old_branch_info.path) + old_branch_info.commit = lookup_commit_reference_gently(&rev, 1); if (!(flag & REF_ISSYMREF)) - old.path = NULL; + old_branch_info.path = NULL; - if (old.path) - skip_prefix(old.path, "refs/heads/", &old.name); + if (old_branch_info.path) + skip_prefix(old_branch_info.path, "refs/heads/", &old_branch_info.name); - if (!new->name) { - new->name = "HEAD"; - new->commit = old.commit; - if (!new->commit) + if (!new_branch_info->name) { + new_branch_info->name = "HEAD"; + new_branch_info->commit = old_branch_info.commit; + if (!new_branch_info->commit) die(_("You are on a branch yet to be born")); - parse_commit_or_die(new->commit); + parse_commit_or_die(new_branch_info->commit); } - ret = merge_working_tree(opts, &old, new, &writeout_error); + ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error); if (ret) { free(path_to_free); return ret; } - if (!opts->quiet && !old.path && old.commit && new->commit != old.commit) - orphaned_commit_warning(old.commit, new->commit); + if (!opts->quiet && !old_branch_info.path && old_branch_info.commit && new_branch_info->commit != old_branch_info.commit) + orphaned_commit_warning(old_branch_info.commit, new_branch_info->commit); - update_refs_for_switch(opts, &old, new); + update_refs_for_switch(opts, &old_branch_info, new_branch_info); - ret = post_checkout_hook(old.commit, new->commit, 1); + ret = post_checkout_hook(old_branch_info.commit, new_branch_info->commit, 1); free(path_to_free); return ret || writeout_error; } @@ -870,7 +869,7 @@ static int git_checkout_config(const char *var, const char *value, void *cb) static int parse_branchname_arg(int argc, const char **argv, int dwim_new_local_branch_ok, - struct branch_info *new, + struct branch_info *new_branch_info, struct checkout_opts *opts, struct object_id *rev) { @@ -988,22 +987,22 @@ static int parse_branchname_arg(int argc, const char **argv, argv++; argc--; - new->name = arg; - setup_branch_path(new); + new_branch_info->name = arg; + setup_branch_path(new_branch_info); - if (!check_refname_format(new->path, 0) && - !read_ref(new->path, &branch_rev)) + if (!check_refname_format(new_branch_info->path, 0) && + !read_ref(new_branch_info->path, &branch_rev)) oidcpy(rev, &branch_rev); else - new->path = NULL; /* not an existing branch */ + new_branch_info->path = NULL; /* not an existing branch */ - new->commit = lookup_commit_reference_gently(rev, 1); - if (!new->commit) { + new_branch_info->commit = lookup_commit_reference_gently(rev, 1); + if (!new_branch_info->commit) { /* not a commit */ *source_tree = parse_tree_indirect(rev); } else { - parse_commit_or_die(new->commit); - *source_tree = new->commit->tree; + parse_commit_or_die(new_branch_info->commit); + *source_tree = new_branch_info->commit->tree; } if (!*source_tree) /* case (1): want a tree */ @@ -1043,7 +1042,7 @@ static int switch_unborn_to_new_branch(const struct checkout_opts *opts) } static int checkout_branch(struct checkout_opts *opts, - struct branch_info *new) + struct branch_info *new_branch_info) { if (opts->pathspec.nr) die(_("paths cannot be used with switching branches")); @@ -1072,21 +1071,21 @@ static int checkout_branch(struct checkout_opts *opts, } else if (opts->track == BRANCH_TRACK_UNSPECIFIED) opts->track = git_branch_track; - if (new->name && !new->commit) + if (new_branch_info->name && !new_branch_info->commit) die(_("Cannot switch branch to a non-commit '%s'"), - new->name); + new_branch_info->name); - if (new->path && !opts->force_detach && !opts->new_branch && + if (new_branch_info->path && !opts->force_detach && !opts->new_branch && !opts->ignore_other_worktrees) { int flag; char *head_ref = resolve_refdup("HEAD", 0, NULL, &flag); if (head_ref && - (!(flag & REF_ISSYMREF) || strcmp(head_ref, new->path))) - die_if_checked_out(new->path, 1); + (!(flag & REF_ISSYMREF) || strcmp(head_ref, new_branch_info->path))) + die_if_checked_out(new_branch_info->path, 1); free(head_ref); } - if (!new->commit && opts->new_branch) { + if (!new_branch_info->commit && opts->new_branch) { struct object_id rev; int flag; @@ -1094,13 +1093,13 @@ static int checkout_branch(struct checkout_opts *opts, (flag & REF_ISSYMREF) && is_null_oid(&rev)) return switch_unborn_to_new_branch(opts); } - return switch_branches(opts, new); + return switch_branches(opts, new_branch_info); } int cmd_checkout(int argc, const char **argv, const char *prefix) { struct checkout_opts opts; - struct branch_info new; + struct branch_info new_branch_info; char *conflict_style = NULL; int dwim_new_local_branch = 1; struct option options[] = { @@ -1118,9 +1117,12 @@ int cmd_checkout(int argc, const char **argv, const char *prefix) 2), OPT_SET_INT('3', "theirs", &opts.writeout_stage, N_("checkout their version for unmerged files"), 3), - OPT__FORCE(&opts.force, N_("force checkout (throw away local modifications)")), + OPT__FORCE(&opts.force, N_("force checkout (throw away local modifications)"), + PARSE_OPT_NOCOMPLETE), OPT_BOOL('m', "merge", &opts.merge, N_("perform a 3-way merge with the new branch")), - OPT_BOOL(0, "overwrite-ignore", &opts.overwrite_ignore, N_("update ignored files (default)")), + OPT_BOOL_F(0, "overwrite-ignore", &opts.overwrite_ignore, + N_("update ignored files (default)"), + PARSE_OPT_NOCOMPLETE), OPT_STRING(0, "conflict", &conflict_style, N_("style"), N_("conflict style (merge or diff3)")), OPT_BOOL('p', "patch", &opts.patch_mode, N_("select hunks interactively")), @@ -1138,7 +1140,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix) }; memset(&opts, 0, sizeof(opts)); - memset(&new, 0, sizeof(new)); + memset(&new_branch_info, 0, sizeof(new_branch_info)); opts.overwrite_ignore = 1; opts.prefix = prefix; opts.show_progress = -1; @@ -1210,7 +1212,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix) opts.track == BRANCH_TRACK_UNSPECIFIED && !opts.new_branch; int n = parse_branchname_arg(argc, argv, dwim_ok, - &new, &opts, &rev); + &new_branch_info, &opts, &rev); argv += n; argc -= n; } @@ -1253,7 +1255,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix) UNLEAK(opts); if (opts.patch_mode || opts.pathspec.nr) - return checkout_paths(&opts, new.name); + return checkout_paths(&opts, new_branch_info.name); else - return checkout_branch(&opts, &new); + return checkout_branch(&opts, &new_branch_info); } diff --git a/builtin/clean.c b/builtin/clean.c index 189e20628c..fad533a0a7 100644 --- a/builtin/clean.c +++ b/builtin/clean.c @@ -909,7 +909,7 @@ int cmd_clean(int argc, const char **argv, const char *prefix) struct option options[] = { OPT__QUIET(&quiet, N_("do not print names of files removed")), OPT__DRY_RUN(&dry_run, N_("dry run")), - OPT__FORCE(&force, N_("force")), + OPT__FORCE(&force, N_("force"), PARSE_OPT_NOCOMPLETE), OPT_BOOL('i', "interactive", &interactive, N_("interactive cleaning")), OPT_BOOL('d', NULL, &remove_directories, N_("remove whole directories")), diff --git a/builtin/clone.c b/builtin/clone.c index 284651797e..101c27a593 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -26,6 +26,7 @@ #include "run-command.h" #include "connected.h" #include "packfile.h" +#include "list-objects-filter-options.h" /* * Overall FIXMEs: @@ -60,6 +61,7 @@ static struct string_list option_optional_reference = STRING_LIST_INIT_NODUP; static int option_dissociate; static int max_jobs = -1; static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP; +static struct list_objects_filter_options filter_options; static int recurse_submodules_cb(const struct option *opt, const char *arg, int unset) @@ -135,6 +137,7 @@ static struct option builtin_clone_options[] = { TRANSPORT_FAMILY_IPV4), OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"), TRANSPORT_FAMILY_IPV6), + OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options), OPT_END() }; @@ -893,6 +896,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix) struct refspec *refspec; const char *fetch_pattern; + fetch_if_missing = 0; + packet_trace_identity("clone"); argc = parse_options(argc, argv, prefix, builtin_clone_options, builtin_clone_usage, 0); @@ -1090,6 +1095,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix) warning(_("--shallow-since is ignored in local clones; use file:// instead.")); if (option_not.nr) warning(_("--shallow-exclude is ignored in local clones; use file:// instead.")); + if (filter_options.choice) + warning(_("--filter is ignored in local clones; use file:// instead.")); if (!access(mkpath("%s/shallow", path), F_OK)) { if (option_local > 0) warning(_("source repository is shallow, ignoring --local")); @@ -1118,7 +1125,13 @@ int cmd_clone(int argc, const char **argv, const char *prefix) transport_set_option(transport, TRANS_OPT_UPLOADPACK, option_upload_pack); - if (transport->smart_options && !deepen) + if (filter_options.choice) { + transport_set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, + filter_options.filter_spec); + transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); + } + + if (transport->smart_options && !deepen && !filter_options.choice) transport->smart_options->check_self_contained_and_connected = 1; refs = transport_get_remote_refs(transport); @@ -1178,13 +1191,17 @@ int cmd_clone(int argc, const char **argv, const char *prefix) write_refspec_config(src_ref_prefix, our_head_points_at, remote_head_points_at, &branch_top); + if (filter_options.choice) + partial_clone_register("origin", &filter_options); + if (is_local) clone_local(path, git_dir); else if (refs && complete_refs_before_fetch) transport_fetch_refs(transport, mapped_refs); update_remote_refs(refs, mapped_refs, remote_head_points_at, - branch_top.buf, reflog_msg.buf, transport, !is_local); + branch_top.buf, reflog_msg.buf, transport, + !is_local && !filter_options.choice); update_head(our_head_points_at, remote_head, reflog_msg.buf); @@ -1205,6 +1222,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix) } junk_mode = JUNK_LEAVE_REPO; + fetch_if_missing = 1; err = checkout(submodule_progress); strbuf_release(&reflog_msg); diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c index 2177251e24..e5bdf57b1e 100644 --- a/builtin/commit-tree.c +++ b/builtin/commit-tree.c @@ -117,8 +117,8 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix) die_errno("git commit-tree: failed to read"); } - if (commit_tree(buffer.buf, buffer.len, tree_oid.hash, parents, - commit_oid.hash, NULL, sign_commit)) { + if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid, + NULL, sign_commit)) { strbuf_release(&buffer); return 1; } diff --git a/builtin/commit.c b/builtin/commit.c index 4610e3d8e3..37fcb55ab0 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -31,9 +31,7 @@ #include "gpg-interface.h" #include "column.h" #include "sequencer.h" -#include "notes-utils.h" #include "mailmap.h" -#include "sigchain.h" static const char * const builtin_commit_usage[] = { N_("git commit [<options>] [--] <pathspec>..."), @@ -45,31 +43,6 @@ static const char * const builtin_status_usage[] = { NULL }; -static const char implicit_ident_advice_noconfig[] = -N_("Your name and email address were configured automatically based\n" -"on your username and hostname. Please check that they are accurate.\n" -"You can suppress this message by setting them explicitly. Run the\n" -"following command and follow the instructions in your editor to edit\n" -"your configuration file:\n" -"\n" -" git config --global --edit\n" -"\n" -"After doing this, you may fix the identity used for this commit with:\n" -"\n" -" git commit --amend --reset-author\n"); - -static const char implicit_ident_advice_config[] = -N_("Your name and email address were configured automatically based\n" -"on your username and hostname. Please check that they are accurate.\n" -"You can suppress this message by setting them explicitly:\n" -"\n" -" git config --global user.name \"Your Name\"\n" -" git config --global user.email you@example.com\n" -"\n" -"After doing this, you may fix the identity used for this commit with:\n" -"\n" -" git commit --amend --reset-author\n"); - static const char empty_amend_advice[] = N_("You asked to amend the most recent commit, but doing so would make\n" "it empty. You can repeat your command with --allow-empty, or you can\n" @@ -93,8 +66,6 @@ N_("If you wish to skip this commit, use:\n" "Then \"git cherry-pick --continue\" will resume cherry-picking\n" "the remaining commits.\n"); -static GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG") - static const char *use_message_buffer; static struct lock_file index_lock; /* real index */ static struct lock_file false_lock; /* used only for partial commits */ @@ -128,12 +99,7 @@ static char *sign_commit; * if editor is used, and only the whitespaces if the message * is specified explicitly. */ -static enum { - CLEANUP_SPACE, - CLEANUP_NONE, - CLEANUP_SCISSORS, - CLEANUP_ALL -} cleanup_mode; +static enum commit_msg_cleanup_mode cleanup_mode; static const char *cleanup_arg; static enum commit_whence whence; @@ -423,13 +389,9 @@ static const char *prepare_index(int argc, const char **argv, const char *prefix if (active_cache_changed || !cache_tree_fully_valid(active_cache_tree)) update_main_cache_tree(WRITE_TREE_SILENT); - if (active_cache_changed) { - if (write_locked_index(&the_index, &index_lock, - COMMIT_LOCK)) - die(_("unable to write new_index file")); - } else { - rollback_lock_file(&index_lock); - } + if (write_locked_index(&the_index, &index_lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) + die(_("unable to write new_index file")); commit_style = COMMIT_AS_IS; ret = get_index_file(); goto out; @@ -673,7 +635,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, struct strbuf sb = STRBUF_INIT; const char *hook_arg1 = NULL; const char *hook_arg2 = NULL; - int clean_message_contents = (cleanup_mode != CLEANUP_NONE); + int clean_message_contents = (cleanup_mode != COMMIT_MSG_CLEANUP_NONE); int old_display_comment_prefix; /* This checks and barfs if author is badly specified */ @@ -814,7 +776,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix, struct ident_split ci, ai; if (whence != FROM_COMMIT) { - if (cleanup_mode == CLEANUP_SCISSORS) + if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) wt_status_add_cut_line(s->fp); status_printf_ln(s, GIT_COLOR_NORMAL, whence == FROM_MERGE @@ -834,14 +796,15 @@ static int prepare_to_commit(const char *index_file, const char *prefix, } fprintf(s->fp, "\n"); - if (cleanup_mode == CLEANUP_ALL) + if (cleanup_mode == COMMIT_MSG_CLEANUP_ALL) status_printf(s, GIT_COLOR_NORMAL, _("Please enter the commit message for your changes." " Lines starting\nwith '%c' will be ignored, and an empty" " message aborts the commit.\n"), comment_line_char); - else if (cleanup_mode == CLEANUP_SCISSORS && whence == FROM_COMMIT) + else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS && + whence == FROM_COMMIT) wt_status_add_cut_line(s->fp); - else /* CLEANUP_SPACE, that is. */ + else /* COMMIT_MSG_CLEANUP_SPACE, that is. */ status_printf(s, GIT_COLOR_NORMAL, _("Please enter the commit message for your changes." " Lines starting\n" @@ -986,65 +949,6 @@ static int prepare_to_commit(const char *index_file, const char *prefix, return 1; } -static int rest_is_empty(struct strbuf *sb, int start) -{ - int i, eol; - const char *nl; - - /* Check if the rest is just whitespace and Signed-off-by's. */ - for (i = start; i < sb->len; i++) { - nl = memchr(sb->buf + i, '\n', sb->len - i); - if (nl) - eol = nl - sb->buf; - else - eol = sb->len; - - if (strlen(sign_off_header) <= eol - i && - starts_with(sb->buf + i, sign_off_header)) { - i = eol; - continue; - } - while (i < eol) - if (!isspace(sb->buf[i++])) - return 0; - } - - return 1; -} - -/* - * Find out if the message in the strbuf contains only whitespace and - * Signed-off-by lines. - */ -static int message_is_empty(struct strbuf *sb) -{ - if (cleanup_mode == CLEANUP_NONE && sb->len) - return 0; - return rest_is_empty(sb, 0); -} - -/* - * See if the user edited the message in the editor or left what - * was in the template intact - */ -static int template_untouched(struct strbuf *sb) -{ - struct strbuf tmpl = STRBUF_INIT; - const char *start; - - if (cleanup_mode == CLEANUP_NONE && sb->len) - return 0; - - if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0) - return 0; - - strbuf_stripspace(&tmpl, cleanup_mode == CLEANUP_ALL); - if (!skip_prefix(sb->buf, tmpl.buf, &start)) - start = sb->buf; - strbuf_release(&tmpl); - return rest_is_empty(sb, start - sb->buf); -} - static const char *find_author_by_nickname(const char *name) { struct rev_info revs; @@ -1153,6 +1057,9 @@ static void finalize_deferred_config(struct wt_status *s) s->show_branch = status_deferred_config.show_branch; if (s->show_branch < 0) s->show_branch = 0; + + if (s->ahead_behind_flags == AHEAD_BEHIND_UNSPECIFIED) + s->ahead_behind_flags = AHEAD_BEHIND_FULL; } static int parse_and_validate_options(int argc, const char *argv[], @@ -1229,15 +1136,17 @@ static int parse_and_validate_options(int argc, const char *argv[], if (argc == 0 && (also || (only && !amend && !allow_empty))) die(_("No paths with --include/--only does not make sense.")); if (!cleanup_arg || !strcmp(cleanup_arg, "default")) - cleanup_mode = use_editor ? CLEANUP_ALL : CLEANUP_SPACE; + cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_ALL : + COMMIT_MSG_CLEANUP_SPACE; else if (!strcmp(cleanup_arg, "verbatim")) - cleanup_mode = CLEANUP_NONE; + cleanup_mode = COMMIT_MSG_CLEANUP_NONE; else if (!strcmp(cleanup_arg, "whitespace")) - cleanup_mode = CLEANUP_SPACE; + cleanup_mode = COMMIT_MSG_CLEANUP_SPACE; else if (!strcmp(cleanup_arg, "strip")) - cleanup_mode = CLEANUP_ALL; + cleanup_mode = COMMIT_MSG_CLEANUP_ALL; else if (!strcmp(cleanup_arg, "scissors")) - cleanup_mode = use_editor ? CLEANUP_SCISSORS : CLEANUP_SPACE; + cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_SCISSORS : + COMMIT_MSG_CLEANUP_SPACE; else die(_("Invalid cleanup mode %s"), cleanup_arg); @@ -1367,6 +1276,8 @@ int cmd_status(int argc, const char **argv, const char *prefix) N_("show branch information")), OPT_BOOL(0, "show-stash", &s.show_stash, N_("show stash information")), + OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags, + N_("compute full ahead/behind values")), { OPTION_CALLBACK, 0, "porcelain", &status_format, N_("version"), N_("machine-readable output"), PARSE_OPT_OPTARG, opt_parse_porcelain }, @@ -1439,98 +1350,6 @@ int cmd_status(int argc, const char **argv, const char *prefix) return 0; } -static const char *implicit_ident_advice(void) -{ - char *user_config = expand_user_path("~/.gitconfig", 0); - char *xdg_config = xdg_config_home("config"); - int config_exists = file_exists(user_config) || file_exists(xdg_config); - - free(user_config); - free(xdg_config); - - if (config_exists) - return _(implicit_ident_advice_config); - else - return _(implicit_ident_advice_noconfig); - -} - -static void print_summary(const char *prefix, const struct object_id *oid, - int initial_commit) -{ - struct rev_info rev; - struct commit *commit; - struct strbuf format = STRBUF_INIT; - const char *head; - struct pretty_print_context pctx = {0}; - struct strbuf author_ident = STRBUF_INIT; - struct strbuf committer_ident = STRBUF_INIT; - - commit = lookup_commit(oid); - if (!commit) - die(_("couldn't look up newly created commit")); - if (parse_commit(commit)) - die(_("could not parse newly created commit")); - - strbuf_addstr(&format, "format:%h] %s"); - - format_commit_message(commit, "%an <%ae>", &author_ident, &pctx); - format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx); - if (strbuf_cmp(&author_ident, &committer_ident)) { - strbuf_addstr(&format, "\n Author: "); - strbuf_addbuf_percentquote(&format, &author_ident); - } - if (author_date_is_interesting()) { - struct strbuf date = STRBUF_INIT; - format_commit_message(commit, "%ad", &date, &pctx); - strbuf_addstr(&format, "\n Date: "); - strbuf_addbuf_percentquote(&format, &date); - strbuf_release(&date); - } - if (!committer_ident_sufficiently_given()) { - strbuf_addstr(&format, "\n Committer: "); - strbuf_addbuf_percentquote(&format, &committer_ident); - if (advice_implicit_identity) { - strbuf_addch(&format, '\n'); - strbuf_addstr(&format, implicit_ident_advice()); - } - } - strbuf_release(&author_ident); - strbuf_release(&committer_ident); - - init_revisions(&rev, prefix); - setup_revisions(0, NULL, &rev, NULL); - - rev.diff = 1; - rev.diffopt.output_format = - DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY; - - rev.verbose_header = 1; - rev.show_root_diff = 1; - get_commit_format(format.buf, &rev); - rev.always_show_header = 0; - rev.diffopt.detect_rename = DIFF_DETECT_RENAME; - rev.diffopt.break_opt = 0; - diff_setup_done(&rev.diffopt); - - head = resolve_ref_unsafe("HEAD", 0, NULL, NULL); - if (!head) - die_errno(_("unable to resolve HEAD after creating commit")); - if (!strcmp(head, "HEAD")) - head = _("detached HEAD"); - else - skip_prefix(head, "refs/heads/", &head); - printf("[%s%s ", head, initial_commit ? _(" (root-commit)") : ""); - - if (!log_tree_commit(&rev, commit)) { - rev.always_show_header = 1; - rev.use_terminator = 1; - log_tree_commit(&rev, commit); - } - - strbuf_release(&format); -} - static int git_commit_config(const char *k, const char *v, void *cb) { struct wt_status *s = cb; @@ -1560,37 +1379,6 @@ static int git_commit_config(const char *k, const char *v, void *cb) return git_status_config(k, v, s); } -static int run_rewrite_hook(const struct object_id *oldoid, - const struct object_id *newoid) -{ - struct child_process proc = CHILD_PROCESS_INIT; - const char *argv[3]; - int code; - struct strbuf sb = STRBUF_INIT; - - argv[0] = find_hook("post-rewrite"); - if (!argv[0]) - return 0; - - argv[1] = "amend"; - argv[2] = NULL; - - proc.argv = argv; - proc.in = -1; - proc.stdout_to_stderr = 1; - - code = start_command(&proc); - if (code) - return code; - strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid)); - sigchain_push(SIGPIPE, SIG_IGN); - write_in_full(proc.in, sb.buf, sb.len); - close(proc.in); - strbuf_release(&sb); - sigchain_pop(SIGPIPE); - return finish_command(&proc); -} - int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...) { struct argv_array hook_env = ARGV_ARRAY_INIT; @@ -1615,6 +1403,7 @@ int run_commit_hook(int editor_is_used, const char *index_file, const char *name int cmd_commit(int argc, const char **argv, const char *prefix) { + const char *argv_gc_auto[] = {"gc", "--auto", NULL}; static struct wt_status s; static struct option builtin_commit_options[] = { OPT__QUIET(&quiet, N_("suppress summary after successful commit")), @@ -1650,6 +1439,8 @@ int cmd_commit(int argc, const char **argv, const char *prefix) OPT_SET_INT(0, "short", &status_format, N_("show status concisely"), STATUS_FORMAT_SHORT), OPT_BOOL(0, "branch", &s.show_branch, N_("show branch information")), + OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags, + N_("compute full ahead/behind values")), OPT_SET_INT(0, "porcelain", &status_format, N_("machine-readable output"), STATUS_FORMAT_PORCELAIN), OPT_SET_INT(0, "long", &status_format, @@ -1673,13 +1464,11 @@ int cmd_commit(int argc, const char **argv, const char *prefix) struct strbuf sb = STRBUF_INIT; struct strbuf author_ident = STRBUF_INIT; const char *index_file, *reflog_msg; - char *nl; struct object_id oid; struct commit_list *parents = NULL; struct stat statbuf; struct commit *current_head = NULL; struct commit_extra_header *extra = NULL; - struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; if (argc == 2 && !strcmp(argv[1], "-h")) @@ -1770,17 +1559,17 @@ int cmd_commit(int argc, const char **argv, const char *prefix) } if (verbose || /* Truncate the message just before the diff, if any. */ - cleanup_mode == CLEANUP_SCISSORS) + cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) strbuf_setlen(&sb, wt_status_locate_end(sb.buf, sb.len)); - if (cleanup_mode != CLEANUP_NONE) - strbuf_stripspace(&sb, cleanup_mode == CLEANUP_ALL); + if (cleanup_mode != COMMIT_MSG_CLEANUP_NONE) + strbuf_stripspace(&sb, cleanup_mode == COMMIT_MSG_CLEANUP_ALL); - if (message_is_empty(&sb) && !allow_empty_message) { + if (message_is_empty(&sb, cleanup_mode) && !allow_empty_message) { rollback_index_files(); fprintf(stderr, _("Aborting commit due to empty commit message.\n")); exit(1); } - if (template_untouched(&sb) && !allow_empty_message) { + if (template_untouched(&sb, template_file, cleanup_mode) && !allow_empty_message) { rollback_index_files(); fprintf(stderr, _("Aborting commit; you did not edit the message.\n")); exit(1); @@ -1794,33 +1583,20 @@ int cmd_commit(int argc, const char **argv, const char *prefix) append_merge_tag_headers(parents, &tail); } - if (commit_tree_extended(sb.buf, sb.len, active_cache_tree->oid.hash, - parents, oid.hash, author_ident.buf, sign_commit, extra)) { + if (commit_tree_extended(sb.buf, sb.len, &active_cache_tree->oid, + parents, &oid, author_ident.buf, sign_commit, + extra)) { rollback_index_files(); die(_("failed to write commit object")); } strbuf_release(&author_ident); free_commit_extra_headers(extra); - nl = strchr(sb.buf, '\n'); - if (nl) - strbuf_setlen(&sb, nl + 1 - sb.buf); - else - strbuf_addch(&sb, '\n'); - strbuf_insert(&sb, 0, reflog_msg, strlen(reflog_msg)); - strbuf_insert(&sb, strlen(reflog_msg), ": ", 2); - - transaction = ref_transaction_begin(&err); - if (!transaction || - ref_transaction_update(transaction, "HEAD", &oid, - current_head - ? ¤t_head->object.oid : &null_oid, - 0, sb.buf, &err) || - ref_transaction_commit(transaction, &err)) { + if (update_head_with_reflog(current_head, &oid, reflog_msg, &sb, + &err)) { rollback_index_files(); die("%s", err.buf); } - ref_transaction_free(transaction); unlink(git_path_cherry_pick_head()); unlink(git_path_revert_head()); @@ -1835,19 +1611,20 @@ int cmd_commit(int argc, const char **argv, const char *prefix) "not exceeded, and then \"git reset HEAD\" to recover.")); rerere(0); + run_command_v_opt(argv_gc_auto, RUN_GIT_CMD); run_commit_hook(use_editor, get_index_file(), "post-commit", NULL); if (amend && !no_post_rewrite) { - struct notes_rewrite_cfg *cfg; - cfg = init_copy_notes_for_rewrite("amend"); - if (cfg) { - /* we are amending, so current_head is not NULL */ - copy_note_for_rewrite(cfg, ¤t_head->object.oid, &oid); - finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'"); - } - run_rewrite_hook(¤t_head->object.oid, &oid); + commit_post_rewrite(current_head, &oid); + } + if (!quiet) { + unsigned int flags = 0; + + if (!current_head) + flags |= SUMMARY_INITIAL_COMMIT; + if (author_date_is_interesting()) + flags |= SUMMARY_SHOW_AUTHOR_DATE; + print_commit_summary(prefix, &oid, flags); } - if (!quiet) - print_summary(prefix, &oid, !current_head); UNLEAK(err); UNLEAK(sb); diff --git a/builtin/config.c b/builtin/config.c index ab5f95476e..01169dd628 100644 --- a/builtin/config.c +++ b/builtin/config.c @@ -48,6 +48,13 @@ static int show_origin; #define ACTION_GET_COLORBOOL (1<<14) #define ACTION_GET_URLMATCH (1<<15) +/* + * The actions "ACTION_LIST | ACTION_GET_*" which may produce more than + * one line of output and which should therefore be paged. + */ +#define PAGING_ACTIONS (ACTION_LIST | ACTION_GET_ALL | \ + ACTION_GET_REGEXP | ACTION_GET_URLMATCH) + #define TYPE_BOOL (1<<0) #define TYPE_INT (1<<1) #define TYPE_BOOL_OR_INT (1<<2) @@ -594,6 +601,9 @@ int cmd_config(int argc, const char **argv, const char *prefix) usage_with_options(builtin_config_usage, builtin_config_options); } + if (actions & PAGING_ACTIONS) + setup_auto_pager("config", 1); + if (actions == ACTION_LIST) { check_argc(argc, 0, 0); if (config_with_options(show_all_config, NULL, diff --git a/builtin/describe.c b/builtin/describe.c index c428984706..e4869df7b4 100644 --- a/builtin/describe.c +++ b/builtin/describe.c @@ -502,7 +502,7 @@ static void describe(const char *arg, int last_one) if (cmit) describe_commit(&oid, &sb); - else if (lookup_blob(&oid)) + else if (sha1_object_info(oid.hash, NULL) == OBJ_BLOB) describe_blob(oid, &sb); else die(_("%s is neither a commit nor blob"), arg); diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c index b775a75647..473615117e 100644 --- a/builtin/diff-tree.c +++ b/builtin/diff-tree.c @@ -76,7 +76,7 @@ static int diff_tree_stdin(char *line) if (obj->type == OBJ_TREE) return stdin_diff_trees((struct tree *)obj, p); error("Object %s is a %s, not a commit or tree", - oid_to_hex(&oid), typename(obj->type)); + oid_to_hex(&oid), type_name(obj->type)); return -1; } diff --git a/builtin/fast-export.c b/builtin/fast-export.c index 796d0cd66c..27b2cc138e 100644 --- a/builtin/fast-export.c +++ b/builtin/fast-export.c @@ -240,7 +240,7 @@ static void export_blob(const struct object_id *oid) buf = read_sha1_file(oid->hash, &type, &size); if (!buf) die ("Could not read blob %s", oid_to_hex(oid)); - if (check_sha1_signature(oid->hash, buf, size, typename(type)) < 0) + if (check_sha1_signature(oid->hash, buf, size, type_name(type)) < 0) die("sha1 mismatch in blob %s", oid_to_hex(oid)); object = parse_object_buffer(oid, type, size, buf, &eaten); } @@ -757,7 +757,7 @@ static void handle_tag(const char *name, struct tag *tag) if (tagged->type != OBJ_COMMIT) { die ("Tag %s tags unexported %s!", oid_to_hex(&tag->object.oid), - typename(tagged->type)); + type_name(tagged->type)); } p = (struct commit *)tagged; for (;;) { @@ -839,7 +839,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info) if (!commit) { warning("%s: Unexpected object of type %s, skipping.", e->name, - typename(e->item->type)); + type_name(e->item->type)); continue; } @@ -851,7 +851,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info) continue; default: /* OBJ_TAG (nested tags) is already handled */ warning("Tag points to object of unexpected type %s, skipping.", - typename(commit->object.type)); + type_name(commit->object.type)); continue; } diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c index 366b9d13f9..a7bc1366ab 100644 --- a/builtin/fetch-pack.c +++ b/builtin/fetch-pack.c @@ -53,6 +53,8 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) struct oid_array shallow = OID_ARRAY_INIT; struct string_list deepen_not = STRING_LIST_INIT_DUP; + fetch_if_missing = 0; + packet_trace_identity("fetch-pack"); memset(&args, 0, sizeof(args)); @@ -143,6 +145,22 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix) args.update_shallow = 1; continue; } + if (!strcmp("--from-promisor", arg)) { + args.from_promisor = 1; + continue; + } + if (!strcmp("--no-dependents", arg)) { + args.no_dependents = 1; + continue; + } + if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) { + parse_list_objects_filter(&args.filter_options, arg); + continue; + } + if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) { + list_objects_filter_set_no_filter(&args.filter_options); + continue; + } usage(fetch_pack_usage); } if (deepen_not.nr) diff --git a/builtin/fetch.c b/builtin/fetch.c index 7bbcd26faf..6d73656a48 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -19,6 +19,7 @@ #include "argv-array.h" #include "utf8.h" #include "packfile.h" +#include "list-objects-filter-options.h" static const char * const builtin_fetch_usage[] = { N_("git fetch [<options>] [<repository> [<refspec>...]]"), @@ -38,6 +39,10 @@ static int fetch_prune_config = -1; /* unspecified */ static int prune = -1; /* unspecified */ #define PRUNE_BY_DEFAULT 0 /* do we prune by default? */ +static int fetch_prune_tags_config = -1; /* unspecified */ +static int prune_tags = -1; /* unspecified */ +#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */ + static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative; static int progress = -1; static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen; @@ -56,6 +61,7 @@ static int recurse_submodules_default = RECURSE_SUBMODULES_ON_DEMAND; static int shown_url = 0; static int refmap_alloc, refmap_nr; static const char **refmap_array; +static struct list_objects_filter_options filter_options; static int git_fetch_config(const char *k, const char *v, void *cb) { @@ -64,6 +70,11 @@ static int git_fetch_config(const char *k, const char *v, void *cb) return 0; } + if (!strcmp(k, "fetch.prunetags")) { + fetch_prune_tags_config = git_config_bool(k, v); + return 0; + } + if (!strcmp(k, "submodule.recurse")) { int r = git_config_bool(k, v) ? RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF; @@ -115,7 +126,7 @@ static struct option builtin_fetch_options[] = { N_("append to .git/FETCH_HEAD instead of overwriting")), OPT_STRING(0, "upload-pack", &upload_pack, N_("path"), N_("path to upload pack on remote end")), - OPT__FORCE(&force, N_("force overwrite of local branch")), + OPT__FORCE(&force, N_("force overwrite of local branch"), 0), OPT_BOOL('m', "multiple", &multiple, N_("fetch from multiple remotes")), OPT_SET_INT('t', "tags", &tags, @@ -126,6 +137,8 @@ static struct option builtin_fetch_options[] = { N_("number of submodules fetched in parallel")), OPT_BOOL('p', "prune", &prune, N_("prune remote-tracking branches no longer on remote")), + OPT_BOOL('P', "prune-tags", &prune_tags, + N_("prune local tags no longer on remote and clobber changed tags")), { OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, N_("on-demand"), N_("control recursive fetching of submodules"), PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules }, @@ -161,6 +174,7 @@ static struct option builtin_fetch_options[] = { TRANSPORT_FAMILY_IPV4), OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"), TRANSPORT_FAMILY_IPV6), + OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options), OPT_END() }; @@ -1045,6 +1059,11 @@ static struct transport *prepare_transport(struct remote *remote, int deepen) set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes"); if (update_shallow) set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes"); + if (filter_options.choice) { + set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, + filter_options.filter_spec); + set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); + } return transport; } @@ -1212,6 +1231,8 @@ static void add_options_to_argv(struct argv_array *argv) argv_array_push(argv, "--dry-run"); if (prune != -1) argv_array_push(argv, prune ? "--prune" : "--no-prune"); + if (prune_tags != -1) + argv_array_push(argv, prune_tags ? "--prune-tags" : "--no-prune-tags"); if (update_head_ok) argv_array_push(argv, "--update-head-ok"); if (force) @@ -1265,12 +1286,65 @@ static int fetch_multiple(struct string_list *list) return result; } -static int fetch_one(struct remote *remote, int argc, const char **argv) +/* + * Fetching from the promisor remote should use the given filter-spec + * or inherit the default filter-spec from the config. + */ +static inline void fetch_one_setup_partial(struct remote *remote) +{ + /* + * Explicit --no-filter argument overrides everything, regardless + * of any prior partial clones and fetches. + */ + if (filter_options.no_filter) + return; + + /* + * If no prior partial clone/fetch and the current fetch DID NOT + * request a partial-fetch, do a normal fetch. + */ + if (!repository_format_partial_clone && !filter_options.choice) + return; + + /* + * If this is the FIRST partial-fetch request, we enable partial + * on this repo and remember the given filter-spec as the default + * for subsequent fetches to this remote. + */ + if (!repository_format_partial_clone && filter_options.choice) { + partial_clone_register(remote->name, &filter_options); + return; + } + + /* + * We are currently limited to only ONE promisor remote and only + * allow partial-fetches from the promisor remote. + */ + if (strcmp(remote->name, repository_format_partial_clone)) { + if (filter_options.choice) + die(_("--filter can only be used with the remote configured in core.partialClone")); + return; + } + + /* + * Do a partial-fetch from the promisor remote using either the + * explicitly given filter-spec or inherit the filter-spec from + * the config. + */ + if (!filter_options.choice) + partial_clone_get_default_filter_spec(&filter_options); + return; +} + +static int fetch_one(struct remote *remote, int argc, const char **argv, int prune_tags_ok) { static const char **refs = NULL; struct refspec *refspec; int ref_nr = 0; + int j = 0; int exit_code; + int maybe_prune_tags; + int remote_via_config = remote_is_configured(remote, 0); if (!remote) die(_("No remote repository specified. Please, specify either a URL or a\n" @@ -1280,18 +1354,39 @@ static int fetch_one(struct remote *remote, int argc, const char **argv) if (prune < 0) { /* no command line request */ - if (0 <= gtransport->remote->prune) - prune = gtransport->remote->prune; + if (0 <= remote->prune) + prune = remote->prune; else if (0 <= fetch_prune_config) prune = fetch_prune_config; else prune = PRUNE_BY_DEFAULT; } + if (prune_tags < 0) { + /* no command line request */ + if (0 <= remote->prune_tags) + prune_tags = remote->prune_tags; + else if (0 <= fetch_prune_tags_config) + prune_tags = fetch_prune_tags_config; + else + prune_tags = PRUNE_TAGS_BY_DEFAULT; + } + + maybe_prune_tags = prune_tags_ok && prune_tags; + if (maybe_prune_tags && remote_via_config) + add_prune_tags_to_fetch_refspec(remote); + + if (argc > 0 || (maybe_prune_tags && !remote_via_config)) { + size_t nr_alloc = st_add3(argc, maybe_prune_tags, 1); + refs = xcalloc(nr_alloc, sizeof(const char *)); + if (maybe_prune_tags) { + refs[j++] = xstrdup("refs/tags/*:refs/tags/*"); + ref_nr++; + } + } + if (argc > 0) { - int j = 0; int i; - refs = xcalloc(st_add(argc, 1), sizeof(const char *)); for (i = 0; i < argc; i++) { if (!strcmp(argv[i], "tag")) { i++; @@ -1301,9 +1396,8 @@ static int fetch_one(struct remote *remote, int argc, const char **argv) argv[i], argv[i]); } else refs[j++] = argv[i]; + ref_nr++; } - refs[j] = NULL; - ref_nr = j; } sigchain_push_common(unlock_pack_on_signal); @@ -1320,12 +1414,15 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) { int i; struct string_list list = STRING_LIST_INIT_DUP; - struct remote *remote; + struct remote *remote = NULL; int result = 0; + int prune_tags_ok = 1; struct argv_array argv_gc_auto = ARGV_ARRAY_INIT; packet_trace_identity("fetch"); + fetch_if_missing = 0; + /* Record the command line for the reflog */ strbuf_addstr(&default_rla, "fetch"); for (i = 1; i < argc; i++) @@ -1359,23 +1456,23 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) if (depth || deepen_since || deepen_not.nr) deepen = 1; + if (filter_options.choice && !repository_format_partial_clone) + die("--filter can only be used when extensions.partialClone is set"); + if (all) { if (argc == 1) die(_("fetch --all does not take a repository argument")); else if (argc > 1) die(_("fetch --all does not make sense with refspecs")); (void) for_each_remote(get_one_remote_for_fetch, &list); - result = fetch_multiple(&list); } else if (argc == 0) { /* No arguments -- use default remote */ remote = remote_get(NULL); - result = fetch_one(remote, argc, argv); } else if (multiple) { /* All arguments are assumed to be remotes or groups */ for (i = 0; i < argc; i++) if (!add_remote_or_group(argv[i], &list)) die(_("No such remote or remote group: %s"), argv[i]); - result = fetch_multiple(&list); } else { /* Single remote or group */ (void) add_remote_or_group(argv[0], &list); @@ -1383,14 +1480,26 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) /* More than one remote */ if (argc > 1) die(_("Fetching a group and specifying refspecs does not make sense")); - result = fetch_multiple(&list); } else { /* Zero or one remotes */ remote = remote_get(argv[0]); - result = fetch_one(remote, argc-1, argv+1); + prune_tags_ok = (argc == 1); + argc--; + argv++; } } + if (remote) { + if (filter_options.choice || repository_format_partial_clone) + fetch_one_setup_partial(remote); + result = fetch_one(remote, argc, argv, prune_tags_ok); + } else { + if (filter_options.choice) + die(_("--filter can only be used with the remote configured in core.partialClone")); + /* TODO should this also die if we have a previous partial-clone? */ + result = fetch_multiple(&list); + } + if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) { struct argv_array options = ARGV_ARRAY_INIT; diff --git a/builtin/fsck.c b/builtin/fsck.c index 92ce775a74..ef78c6c00c 100644 --- a/builtin/fsck.c +++ b/builtin/fsck.c @@ -70,7 +70,7 @@ static const char *printable_type(struct object *obj) object_as_type(obj, type, 0); } - ret = typename(obj->type); + ret = type_name(obj->type); if (!ret) ret = "unknown"; @@ -137,7 +137,7 @@ static int mark_object(struct object *obj, int type, void *data, struct fsck_opt printf("broken link from %7s %s\n", printable_type(parent), describe_object(parent)); printf("broken link from %7s %s\n", - (type == OBJ_ANY ? "unknown" : typename(type)), "unknown"); + (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown"); errors_found |= ERROR_REACHABLE; return 1; } @@ -149,6 +149,15 @@ static int mark_object(struct object *obj, int type, void *data, struct fsck_opt if (obj->flags & REACHABLE) return 0; obj->flags |= REACHABLE; + + if (is_promisor_object(&obj->oid)) + /* + * Further recursion does not need to be performed on this + * object since it is a promisor object (so it does not need to + * be added to "pending"). + */ + return 0; + if (!(obj->flags & HAS_OBJ)) { if (parent && !has_object_file(&obj->oid)) { printf("broken link from %7s %s\n", @@ -214,6 +223,8 @@ static void check_reachable_object(struct object *obj) * do a full fsck */ if (!(obj->flags & HAS_OBJ)) { + if (is_promisor_object(&obj->oid)) + return; if (has_sha1_pack(obj->oid.hash)) return; /* it is in pack - forget about it */ printf("missing %s %s\n", printable_type(obj), @@ -404,7 +415,7 @@ static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid, xstrfmt("%s@{%"PRItime"}", refname, timestamp)); obj->flags |= USED; mark_object_reachable(obj); - } else { + } else if (!is_promisor_object(oid)) { error("%s: invalid reflog entry %s", refname, oid_to_hex(oid)); errors_found |= ERROR_REACHABLE; } @@ -440,6 +451,14 @@ static int fsck_handle_ref(const char *refname, const struct object_id *oid, obj = parse_object(oid); if (!obj) { + if (is_promisor_object(oid)) { + /* + * Increment default_refs anyway, because this is a + * valid ref. + */ + default_refs++; + return 0; + } error("%s: invalid sha1 pointer %s", refname, oid_to_hex(oid)); errors_found |= ERROR_REACHABLE; /* We'll continue with the rest despite the error.. */ @@ -665,6 +684,9 @@ int cmd_fsck(int argc, const char **argv, const char *prefix) int i; struct alternate_object_database *alt; + /* fsck knows how to handle missing promisor objects */ + fetch_if_missing = 0; + errors_found = 0; check_replace_refs = 0; @@ -737,6 +759,8 @@ int cmd_fsck(int argc, const char **argv, const char *prefix) struct object *obj = lookup_object(oid.hash); if (!obj || !(obj->flags & HAS_OBJ)) { + if (is_promisor_object(&oid)) + continue; error("%s: object missing", oid_to_hex(&oid)); errors_found |= ERROR_OBJECT; continue; diff --git a/builtin/gc.c b/builtin/gc.c index 3c5eae0edf..f51e5a6500 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -360,8 +360,11 @@ int cmd_gc(int argc, const char **argv, const char *prefix) N_("prune unreferenced objects"), PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire }, OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")), - OPT_BOOL(0, "auto", &auto_gc, N_("enable auto-gc mode")), - OPT_BOOL(0, "force", &force, N_("force running gc even if there may be another gc running")), + OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"), + PARSE_OPT_NOCOMPLETE), + OPT_BOOL_F(0, "force", &force, + N_("force running gc even if there may be another gc running"), + PARSE_OPT_NOCOMPLETE), OPT_END() }; @@ -458,6 +461,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix) argv_array_push(&prune, prune_expire); if (quiet) argv_array_push(&prune, "--no-progress"); + if (repository_format_partial_clone) + argv_array_push(&prune, + "--exclude-promisor-objects"); if (run_command_v_opt(prune.argv, RUN_GIT_CMD)) return error(FAILED_RUN, prune.argv[0]); } diff --git a/builtin/grep.c b/builtin/grep.c index 3ca4ac80d8..789a89133a 100644 --- a/builtin/grep.c +++ b/builtin/grep.c @@ -92,8 +92,7 @@ static pthread_cond_t cond_result; static int skip_first_line; -static void add_work(struct grep_opt *opt, enum grep_source_type type, - const char *name, const char *path, const void *id) +static void add_work(struct grep_opt *opt, const struct grep_source *gs) { grep_lock(); @@ -101,7 +100,7 @@ static void add_work(struct grep_opt *opt, enum grep_source_type type, pthread_cond_wait(&cond_write, &grep_mutex); } - grep_source_init(&todo[todo_end].source, type, name, path, id); + todo[todo_end].source = *gs; if (opt->binary != GREP_BINARY_TEXT) grep_source_load_driver(&todo[todo_end].source); todo[todo_end].done = 0; @@ -317,6 +316,7 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid, const char *path) { struct strbuf pathbuf = STRBUF_INIT; + struct grep_source gs; if (opt->relative && opt->prefix_length) { quote_path_relative(filename + tree_name_len, opt->prefix, &pathbuf); @@ -325,19 +325,22 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid, strbuf_addstr(&pathbuf, filename); } + grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid); + strbuf_release(&pathbuf); + #ifndef NO_PTHREADS if (num_threads) { - add_work(opt, GREP_SOURCE_OID, pathbuf.buf, path, oid); - strbuf_release(&pathbuf); + /* + * add_work() copies gs and thus assumes ownership of + * its fields, so do not call grep_source_clear() + */ + add_work(opt, &gs); return 0; } else #endif { - struct grep_source gs; int hit; - grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid); - strbuf_release(&pathbuf); hit = grep_source(opt, &gs); grep_source_clear(&gs); @@ -348,25 +351,29 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid, static int grep_file(struct grep_opt *opt, const char *filename) { struct strbuf buf = STRBUF_INIT; + struct grep_source gs; if (opt->relative && opt->prefix_length) quote_path_relative(filename, opt->prefix, &buf); else strbuf_addstr(&buf, filename); + grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename); + strbuf_release(&buf); + #ifndef NO_PTHREADS if (num_threads) { - add_work(opt, GREP_SOURCE_FILE, buf.buf, filename, filename); - strbuf_release(&buf); + /* + * add_work() copies gs and thus assumes ownership of + * its fields, so do not call grep_source_clear() + */ + add_work(opt, &gs); return 0; } else #endif { - struct grep_source gs; int hit; - grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename); - strbuf_release(&buf); hit = grep_source(opt, &gs); grep_source_clear(&gs); @@ -627,7 +634,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec, free(data); return hit; } - die(_("unable to grep from object of type %s"), typename(obj->type)); + die(_("unable to grep from object of type %s"), type_name(obj->type)); } static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec, @@ -832,8 +839,9 @@ int cmd_grep(int argc, const char **argv, const char *prefix) OPT_BOOL('L', "files-without-match", &opt.unmatch_name_only, N_("show only the names of files without match")), - OPT_BOOL('z', "null", &opt.null_following_name, - N_("print NUL after filenames")), + OPT_BOOL_F('z', "null", &opt.null_following_name, + N_("print NUL after filenames"), + PARSE_OPT_NOCOMPLETE), OPT_BOOL('c', "count", &opt.count, N_("show the number of matches instead of matching lines")), OPT__COLOR(&opt.color, N_("highlight matches")), @@ -884,9 +892,11 @@ int cmd_grep(int argc, const char **argv, const char *prefix) OPT_GROUP(""), { OPTION_STRING, 'O', "open-files-in-pager", &show_in_pager, N_("pager"), N_("show matching files in the pager"), - PARSE_OPT_OPTARG, NULL, (intptr_t)default_pager }, - OPT_BOOL(0, "ext-grep", &external_grep_allowed__ignored, - N_("allow calling of grep(1) (ignored by this build)")), + PARSE_OPT_OPTARG | PARSE_OPT_NOCOMPLETE, + NULL, (intptr_t)default_pager }, + OPT_BOOL_F(0, "ext-grep", &external_grep_allowed__ignored, + N_("allow calling of grep(1) (ignored by this build)"), + PARSE_OPT_NOCOMPLETE), OPT_END() }; diff --git a/builtin/hash-object.c b/builtin/hash-object.c index c532ff9320..526da5c185 100644 --- a/builtin/hash-object.c +++ b/builtin/hash-object.c @@ -24,7 +24,8 @@ static int hash_literally(struct object_id *oid, int fd, const char *type, unsig if (strbuf_read(&buf, fd, 4096) < 0) ret = -1; else - ret = hash_sha1_file_literally(buf.buf, buf.len, type, oid, flags); + ret = hash_object_file_literally(buf.buf, buf.len, type, oid, + flags); strbuf_release(&buf); return ret; } diff --git a/builtin/help.c b/builtin/help.c index d3c8fc4082..598867cfea 100644 --- a/builtin/help.c +++ b/builtin/help.c @@ -194,11 +194,11 @@ static void do_add_man_viewer_info(const char *name, size_t len, const char *value) { - struct man_viewer_info_list *new; - FLEX_ALLOC_MEM(new, name, name, len); - new->info = xstrdup(value); - new->next = man_viewer_info_list; - man_viewer_info_list = new; + struct man_viewer_info_list *new_man_viewer; + FLEX_ALLOC_MEM(new_man_viewer, name, name, len); + new_man_viewer->info = xstrdup(value); + new_man_viewer->next = man_viewer_info_list; + man_viewer_info_list = new_man_viewer; } static int add_man_viewer_path(const char *name, diff --git a/builtin/index-pack.c b/builtin/index-pack.c index 4c51aec81f..bda84a92ef 100644 --- a/builtin/index-pack.c +++ b/builtin/index-pack.c @@ -49,6 +49,7 @@ struct thread_local { int pack_fd; }; +/* Remember to update object flag allocation in object.h */ #define FLAG_LINK (1u<<20) #define FLAG_CHECKED (1u<<21) @@ -91,7 +92,7 @@ static unsigned int input_offset, input_len; static off_t consumed_bytes; static off_t max_input_size; static unsigned deepest_delta; -static git_SHA_CTX input_ctx; +static git_hash_ctx input_ctx; static uint32_t input_crc32; static int input_fd, output_fd; static const char *curr_pack; @@ -228,7 +229,7 @@ static unsigned check_object(struct object *obj) if (type != obj->type) die(_("object %s: expected type %s, found %s"), oid_to_hex(&obj->oid), - typename(obj->type), typename(type)); + type_name(obj->type), type_name(type)); obj->flags |= FLAG_CHECKED; return 1; } @@ -253,7 +254,7 @@ static void flush(void) if (input_offset) { if (output_fd >= 0) write_or_die(output_fd, input_buffer, input_offset); - git_SHA1_Update(&input_ctx, input_buffer, input_offset); + the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset); memmove(input_buffer, input_buffer + input_offset, input_len); input_offset = 0; } @@ -326,7 +327,7 @@ static const char *open_pack_file(const char *pack_name) output_fd = -1; nothread_data.pack_fd = input_fd; } - git_SHA1_Init(&input_ctx); + the_hash_algo->init_fn(&input_ctx); return pack_name; } @@ -437,22 +438,22 @@ static int is_delta_type(enum object_type type) } static void *unpack_entry_data(off_t offset, unsigned long size, - enum object_type type, unsigned char *sha1) + enum object_type type, struct object_id *oid) { static char fixed_buf[8192]; int status; git_zstream stream; void *buf; - git_SHA_CTX c; + git_hash_ctx c; char hdr[32]; int hdrlen; if (!is_delta_type(type)) { - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), size) + 1; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, hdrlen); + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), size) + 1; + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, hdrlen); } else - sha1 = NULL; + oid = NULL; if (type == OBJ_BLOB && size > big_file_threshold) buf = fixed_buf; else @@ -469,8 +470,8 @@ static void *unpack_entry_data(off_t offset, unsigned long size, stream.avail_in = input_len; status = git_inflate(&stream, 0); use(input_len - stream.avail_in); - if (sha1) - git_SHA1_Update(&c, last_out, stream.next_out - last_out); + if (oid) + the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out); if (buf == fixed_buf) { stream.next_out = buf; stream.avail_out = sizeof(fixed_buf); @@ -479,15 +480,15 @@ static void *unpack_entry_data(off_t offset, unsigned long size, if (stream.total_out != size || status != Z_STREAM_END) bad_object(offset, _("inflate returned %d"), status); git_inflate_end(&stream); - if (sha1) - git_SHA1_Final(sha1, &c); + if (oid) + the_hash_algo->final_fn(oid->hash, &c); return buf == fixed_buf ? NULL : buf; } static void *unpack_raw_entry(struct object_entry *obj, off_t *ofs_offset, - unsigned char *ref_sha1, - unsigned char *sha1) + struct object_id *ref_oid, + struct object_id *oid) { unsigned char *p; unsigned long size, c; @@ -515,8 +516,8 @@ static void *unpack_raw_entry(struct object_entry *obj, switch (obj->type) { case OBJ_REF_DELTA: - hashcpy(ref_sha1, fill(20)); - use(20); + hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz)); + use(the_hash_algo->rawsz); break; case OBJ_OFS_DELTA: p = fill(1); @@ -546,7 +547,7 @@ static void *unpack_raw_entry(struct object_entry *obj, } obj->hdr_size = consumed_bytes - obj->idx.offset; - data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, sha1); + data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid); obj->idx.crc32 = input_crc32; return data; } @@ -827,7 +828,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry, free(has_data); } - if (strict) { + if (strict || do_fsck_object) { read_lock(); if (type == OBJ_BLOB) { struct blob *blob = lookup_blob(oid); @@ -849,11 +850,11 @@ static void sha1_object(const void *data, struct object_entry *obj_entry, obj = parse_object_buffer(oid, type, size, buf, &eaten); if (!obj) - die(_("invalid %s"), typename(type)); + die(_("invalid %s"), type_name(type)); if (do_fsck_object && fsck_object(obj, buf, size, &fsck_options)) die(_("Error in object")); - if (fsck_walk(obj, NULL, &fsck_options)) + if (strict && fsck_walk(obj, NULL, &fsck_options)) die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid)); if (obj->type == OBJ_TREE) { @@ -958,9 +959,8 @@ static void resolve_delta(struct object_entry *delta_obj, free(delta_data); if (!result->data) bad_object(delta_obj->idx.offset, _("failed to apply delta")); - hash_sha1_file(result->data, result->size, - typename(delta_obj->real_type), - delta_obj->idx.oid.hash); + hash_object_file(result->data, result->size, + type_name(delta_obj->real_type), &delta_obj->idx.oid); sha1_object(result->data, NULL, result->size, delta_obj->real_type, &delta_obj->idx.oid); counter_lock(); @@ -1119,11 +1119,11 @@ static void *threaded_second_pass(void *data) * - calculate SHA1 of all non-delta objects; * - remember base (SHA1 or offset) for all deltas. */ -static void parse_pack_objects(unsigned char *sha1) +static void parse_pack_objects(unsigned char *hash) { int i, nr_delays = 0; struct ofs_delta_entry *ofs_delta = ofs_deltas; - unsigned char ref_delta_sha1[20]; + struct object_id ref_delta_oid; struct stat st; if (verbose) @@ -1133,8 +1133,8 @@ static void parse_pack_objects(unsigned char *sha1) for (i = 0; i < nr_objects; i++) { struct object_entry *obj = &objects[i]; void *data = unpack_raw_entry(obj, &ofs_delta->offset, - ref_delta_sha1, - obj->idx.oid.hash); + &ref_delta_oid, + &obj->idx.oid); obj->real_type = obj->type; if (obj->type == OBJ_OFS_DELTA) { nr_ofs_deltas++; @@ -1142,7 +1142,7 @@ static void parse_pack_objects(unsigned char *sha1) ofs_delta++; } else if (obj->type == OBJ_REF_DELTA) { ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc); - hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_sha1); + hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_oid.hash); ref_deltas[nr_ref_deltas].obj_no = i; nr_ref_deltas++; } else if (!data) { @@ -1160,10 +1160,10 @@ static void parse_pack_objects(unsigned char *sha1) /* Check pack integrity */ flush(); - git_SHA1_Final(sha1, &input_ctx); - if (hashcmp(fill(20), sha1)) + the_hash_algo->final_fn(hash, &input_ctx); + if (hashcmp(fill(the_hash_algo->rawsz), hash)) die(_("pack is corrupted (SHA1 mismatch)")); - use(20); + use(the_hash_algo->rawsz); /* If input_fd is a file, we should have reached its end now. */ if (fstat(input_fd, &st)) @@ -1239,21 +1239,21 @@ static void resolve_deltas(void) /* * Third pass: * - append objects to convert thin pack to full pack if required - * - write the final 20-byte SHA-1 + * - write the final pack hash */ -static void fix_unresolved_deltas(struct sha1file *f); -static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_sha1) +static void fix_unresolved_deltas(struct hashfile *f); +static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash) { if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) { stop_progress(&progress); - /* Flush remaining pack final 20-byte SHA1. */ + /* Flush remaining pack final hash. */ flush(); return; } if (fix_thin_pack) { - struct sha1file *f; - unsigned char read_sha1[20], tail_sha1[20]; + struct hashfile *f; + unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ]; struct strbuf msg = STRBUF_INIT; int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas; int nr_objects_initial = nr_objects; @@ -1262,7 +1262,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1); memset(objects + nr_objects + 1, 0, nr_unresolved * sizeof(*objects)); - f = sha1fd(output_fd, curr_pack); + f = hashfd(output_fd, curr_pack); fix_unresolved_deltas(f); strbuf_addf(&msg, Q_("completed with %d local object", "completed with %d local objects", @@ -1270,12 +1270,12 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha nr_objects - nr_objects_initial); stop_progress_msg(&progress, msg.buf); strbuf_release(&msg); - sha1close(f, tail_sha1, 0); - hashcpy(read_sha1, pack_sha1); - fixup_pack_header_footer(output_fd, pack_sha1, + hashclose(f, tail_hash, 0); + hashcpy(read_hash, pack_hash); + fixup_pack_header_footer(output_fd, pack_hash, curr_pack, nr_objects, - read_sha1, consumed_bytes-20); - if (hashcmp(read_sha1, tail_sha1) != 0) + read_hash, consumed_bytes-the_hash_algo->rawsz); + if (hashcmp(read_hash, tail_hash) != 0) die(_("Unexpected tail checksum for %s " "(disk corruption?)"), curr_pack); } @@ -1286,7 +1286,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas); } -static int write_compressed(struct sha1file *f, void *in, unsigned int size) +static int write_compressed(struct hashfile *f, void *in, unsigned int size) { git_zstream stream; int status; @@ -1300,7 +1300,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size) stream.next_out = outbuf; stream.avail_out = sizeof(outbuf); status = git_deflate(&stream, Z_FINISH); - sha1write(f, outbuf, sizeof(outbuf) - stream.avail_out); + hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out); } while (status == Z_OK); if (status != Z_STREAM_END) @@ -1310,7 +1310,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size) return size; } -static struct object_entry *append_obj_to_pack(struct sha1file *f, +static struct object_entry *append_obj_to_pack(struct hashfile *f, const unsigned char *sha1, void *buf, unsigned long size, enum object_type type) { @@ -1327,7 +1327,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f, } header[n++] = c; crc32_begin(f); - sha1write(f, header, n); + hashwrite(f, header, n); obj[0].size = size; obj[0].hdr_size = n; obj[0].type = type; @@ -1335,7 +1335,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f, obj[1].idx.offset = obj[0].idx.offset + n; obj[1].idx.offset += write_compressed(f, buf, size); obj[0].idx.crc32 = crc32_end(f); - sha1flush(f); + hashflush(f); hashcpy(obj->idx.oid.hash, sha1); return obj; } @@ -1347,7 +1347,7 @@ static int delta_pos_compare(const void *_a, const void *_b) return a->obj_no - b->obj_no; } -static void fix_unresolved_deltas(struct sha1file *f) +static void fix_unresolved_deltas(struct hashfile *f) { struct ref_delta_entry **sorted_by_pos; int i; @@ -1379,7 +1379,7 @@ static void fix_unresolved_deltas(struct sha1file *f) continue; if (check_sha1_signature(d->sha1, base_obj->data, - base_obj->size, typename(type))) + base_obj->size, type_name(type))) die(_("local object %s is corrupt"), sha1_to_hex(d->sha1)); base_obj->obj = append_obj_to_pack(f, d->sha1, base_obj->data, base_obj->size, type); @@ -1389,15 +1389,60 @@ static void fix_unresolved_deltas(struct sha1file *f) free(sorted_by_pos); } +static const char *derive_filename(const char *pack_name, const char *suffix, + struct strbuf *buf) +{ + size_t len; + if (!strip_suffix(pack_name, ".pack", &len)) + die(_("packfile name '%s' does not end with '.pack'"), + pack_name); + strbuf_add(buf, pack_name, len); + strbuf_addch(buf, '.'); + strbuf_addstr(buf, suffix); + return buf->buf; +} + +static void write_special_file(const char *suffix, const char *msg, + const char *pack_name, const unsigned char *hash, + const char **report) +{ + struct strbuf name_buf = STRBUF_INIT; + const char *filename; + int fd; + int msg_len = strlen(msg); + + if (pack_name) + filename = derive_filename(pack_name, suffix, &name_buf); + else + filename = odb_pack_name(&name_buf, hash, suffix); + + fd = odb_pack_keep(filename); + if (fd < 0) { + if (errno != EEXIST) + die_errno(_("cannot write %s file '%s'"), + suffix, filename); + } else { + if (msg_len > 0) { + write_or_die(fd, msg, msg_len); + write_or_die(fd, "\n", 1); + } + if (close(fd) != 0) + die_errno(_("cannot close written %s file '%s'"), + suffix, filename); + if (report) + *report = suffix; + } + strbuf_release(&name_buf); +} + static void final(const char *final_pack_name, const char *curr_pack_name, const char *final_index_name, const char *curr_index_name, - const char *keep_name, const char *keep_msg, - unsigned char *sha1) + const char *keep_msg, const char *promisor_msg, + unsigned char *hash) { const char *report = "pack"; struct strbuf pack_name = STRBUF_INIT; struct strbuf index_name = STRBUF_INIT; - struct strbuf keep_name_buf = STRBUF_INIT; int err; if (!from_stdin) { @@ -1409,32 +1454,16 @@ static void final(const char *final_pack_name, const char *curr_pack_name, die_errno(_("error while closing pack file")); } - if (keep_msg) { - int keep_fd, keep_msg_len = strlen(keep_msg); - - if (!keep_name) - keep_name = odb_pack_name(&keep_name_buf, sha1, "keep"); - - keep_fd = odb_pack_keep(keep_name); - if (keep_fd < 0) { - if (errno != EEXIST) - die_errno(_("cannot write keep file '%s'"), - keep_name); - } else { - if (keep_msg_len > 0) { - write_or_die(keep_fd, keep_msg, keep_msg_len); - write_or_die(keep_fd, "\n", 1); - } - if (close(keep_fd) != 0) - die_errno(_("cannot close written keep file '%s'"), - keep_name); - report = "keep"; - } - } + if (keep_msg) + write_special_file("keep", keep_msg, final_pack_name, hash, + &report); + if (promisor_msg) + write_special_file("promisor", promisor_msg, final_pack_name, + hash, NULL); if (final_pack_name != curr_pack_name) { if (!final_pack_name) - final_pack_name = odb_pack_name(&pack_name, sha1, "pack"); + final_pack_name = odb_pack_name(&pack_name, hash, "pack"); if (finalize_object_file(curr_pack_name, final_pack_name)) die(_("cannot store pack file")); } else if (from_stdin) @@ -1442,18 +1471,18 @@ static void final(const char *final_pack_name, const char *curr_pack_name, if (final_index_name != curr_index_name) { if (!final_index_name) - final_index_name = odb_pack_name(&index_name, sha1, "idx"); + final_index_name = odb_pack_name(&index_name, hash, "idx"); if (finalize_object_file(curr_index_name, final_index_name)) die(_("cannot store index file")); } else chmod(final_index_name, 0444); if (!from_stdin) { - printf("%s\n", sha1_to_hex(sha1)); + printf("%s\n", sha1_to_hex(hash)); } else { struct strbuf buf = STRBUF_INIT; - strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(sha1)); + strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash)); write_or_die(1, buf.buf, buf.len); strbuf_release(&buf); @@ -1472,7 +1501,6 @@ static void final(const char *final_pack_name, const char *curr_pack_name, strbuf_release(&index_name); strbuf_release(&pack_name); - strbuf_release(&keep_name_buf); } static int git_index_pack_config(const char *k, const char *v, void *cb) @@ -1588,7 +1616,7 @@ static void show_pack_info(int stat_only) continue; printf("%s %-6s %lu %lu %"PRIuMAX, oid_to_hex(&obj->idx.oid), - typename(obj->real_type), obj->size, + type_name(obj->real_type), obj->size, (unsigned long)(obj[1].idx.offset - obj->idx.offset), (uintmax_t)obj->idx.offset); if (is_delta_type(obj->type)) { @@ -1615,32 +1643,26 @@ static void show_pack_info(int stat_only) } } -static const char *derive_filename(const char *pack_name, const char *suffix, - struct strbuf *buf) -{ - size_t len; - if (!strip_suffix(pack_name, ".pack", &len)) - die(_("packfile name '%s' does not end with '.pack'"), - pack_name); - strbuf_add(buf, pack_name, len); - strbuf_addstr(buf, suffix); - return buf->buf; -} - int cmd_index_pack(int argc, const char **argv, const char *prefix) { int i, fix_thin_pack = 0, verify = 0, stat_only = 0; const char *curr_index; const char *index_name = NULL, *pack_name = NULL; - const char *keep_name = NULL, *keep_msg = NULL; - struct strbuf index_name_buf = STRBUF_INIT, - keep_name_buf = STRBUF_INIT; + const char *keep_msg = NULL; + const char *promisor_msg = NULL; + struct strbuf index_name_buf = STRBUF_INIT; struct pack_idx_entry **idx_objects; struct pack_idx_option opts; - unsigned char pack_sha1[20]; + unsigned char pack_hash[GIT_MAX_RAWSZ]; unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */ int report_end_of_input = 0; + /* + * index-pack never needs to fetch missing objects, since it only + * accesses the repo to do hash collision checks + */ + fetch_if_missing = 0; + if (argc == 2 && !strcmp(argv[1], "-h")) usage(index_pack_usage); @@ -1667,6 +1689,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) } else if (!strcmp(arg, "--check-self-contained-and-connected")) { strict = 1; check_self_contained_and_connected = 1; + } else if (!strcmp(arg, "--fsck-objects")) { + do_fsck_object = 1; } else if (!strcmp(arg, "--verify")) { verify = 1; } else if (!strcmp(arg, "--verify-stat")) { @@ -1678,6 +1702,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) stat_only = 1; } else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) { ; /* nothing to do */ + } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) { + ; /* already parsed */ } else if (starts_with(arg, "--threads=")) { char *end; nr_threads = strtoul(arg+10, &end, 0); @@ -1740,9 +1766,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) if (from_stdin && !startup_info->have_repository) die(_("--stdin requires a git repository")); if (!index_name && pack_name) - index_name = derive_filename(pack_name, ".idx", &index_name_buf); - if (keep_msg && !keep_name && pack_name) - keep_name = derive_filename(pack_name, ".keep", &keep_name_buf); + index_name = derive_filename(pack_name, "idx", &index_name_buf); if (verify) { if (!index_name) @@ -1768,11 +1792,11 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) if (show_stat) obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat)); ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry)); - parse_pack_objects(pack_sha1); + parse_pack_objects(pack_hash); if (report_end_of_input) write_in_full(2, "\0", 1); resolve_deltas(); - conclude_pack(fix_thin_pack, curr_pack, pack_sha1); + conclude_pack(fix_thin_pack, curr_pack, pack_hash); free(ofs_deltas); free(ref_deltas); if (strict) @@ -1784,19 +1808,18 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) ALLOC_ARRAY(idx_objects, nr_objects); for (i = 0; i < nr_objects; i++) idx_objects[i] = &objects[i].idx; - curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1); + curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash); free(idx_objects); if (!verify) final(pack_name, curr_pack, index_name, curr_index, - keep_name, keep_msg, - pack_sha1); + keep_msg, promisor_msg, + pack_hash); else close(input_fd); free(objects); strbuf_release(&index_name_buf); - strbuf_release(&keep_name_buf); if (pack_name == NULL) free((void *) curr_pack); if (index_name == NULL) diff --git a/builtin/init-db.c b/builtin/init-db.c index c9b7946bad..68ff4ad75a 100644 --- a/builtin/init-db.c +++ b/builtin/init-db.c @@ -24,11 +24,11 @@ static int init_is_bare_repository = 0; static int init_shared_repository = -1; static const char *init_db_template_dir; -static void copy_templates_1(struct strbuf *path, struct strbuf *template, +static void copy_templates_1(struct strbuf *path, struct strbuf *template_path, DIR *dir) { size_t path_baselen = path->len; - size_t template_baselen = template->len; + size_t template_baselen = template_path->len; struct dirent *de; /* Note: if ".git/hooks" file exists in the repository being @@ -44,12 +44,12 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template, int exists = 0; strbuf_setlen(path, path_baselen); - strbuf_setlen(template, template_baselen); + strbuf_setlen(template_path, template_baselen); if (de->d_name[0] == '.') continue; strbuf_addstr(path, de->d_name); - strbuf_addstr(template, de->d_name); + strbuf_addstr(template_path, de->d_name); if (lstat(path->buf, &st_git)) { if (errno != ENOENT) die_errno(_("cannot stat '%s'"), path->buf); @@ -57,36 +57,36 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template, else exists = 1; - if (lstat(template->buf, &st_template)) - die_errno(_("cannot stat template '%s'"), template->buf); + if (lstat(template_path->buf, &st_template)) + die_errno(_("cannot stat template '%s'"), template_path->buf); if (S_ISDIR(st_template.st_mode)) { - DIR *subdir = opendir(template->buf); + DIR *subdir = opendir(template_path->buf); if (!subdir) - die_errno(_("cannot opendir '%s'"), template->buf); + die_errno(_("cannot opendir '%s'"), template_path->buf); strbuf_addch(path, '/'); - strbuf_addch(template, '/'); - copy_templates_1(path, template, subdir); + strbuf_addch(template_path, '/'); + copy_templates_1(path, template_path, subdir); closedir(subdir); } else if (exists) continue; else if (S_ISLNK(st_template.st_mode)) { struct strbuf lnk = STRBUF_INIT; - if (strbuf_readlink(&lnk, template->buf, 0) < 0) - die_errno(_("cannot readlink '%s'"), template->buf); + if (strbuf_readlink(&lnk, template_path->buf, 0) < 0) + die_errno(_("cannot readlink '%s'"), template_path->buf); if (symlink(lnk.buf, path->buf)) die_errno(_("cannot symlink '%s' '%s'"), lnk.buf, path->buf); strbuf_release(&lnk); } else if (S_ISREG(st_template.st_mode)) { - if (copy_file(path->buf, template->buf, st_template.st_mode)) + if (copy_file(path->buf, template_path->buf, st_template.st_mode)) die_errno(_("cannot copy '%s' to '%s'"), - template->buf, path->buf); + template_path->buf, path->buf); } else - error(_("ignoring template %s"), template->buf); + error(_("ignoring template %s"), template_path->buf); } } diff --git a/builtin/log.c b/builtin/log.c index 14fdf39165..94ee177d56 100644 --- a/builtin/log.c +++ b/builtin/log.c @@ -29,6 +29,8 @@ #include "gpg-interface.h" #include "progress.h" +#define MAIL_DEFAULT_WRAP 72 + /* Set a default date-time format for git log ("log.date" config variable) */ static const char *default_date_mode = NULL; @@ -188,8 +190,8 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix, if (rev->show_notes) init_display_notes(&rev->notes_opt); - if (rev->diffopt.pickaxe || rev->diffopt.filter || - rev->diffopt.flags.follow_renames) + if ((rev->diffopt.pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) || + rev->diffopt.filter || rev->diffopt.flags.follow_renames) rev->always_show_header = 0; if (source) @@ -1044,7 +1046,7 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout, shortlog_init(&log); log.wrap_lines = 1; - log.wrap = 72; + log.wrap = MAIL_DEFAULT_WRAP; log.in1 = 2; log.in2 = 4; log.file = rev->diffopt.file; @@ -1061,6 +1063,7 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout, memcpy(&opts, &rev->diffopt, sizeof(opts)); opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT; + opts.stat_width = MAIL_DEFAULT_WRAP; diff_setup_done(&opts); @@ -1614,6 +1617,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) (!rev.diffopt.output_format || rev.diffopt.output_format == DIFF_FORMAT_PATCH)) rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT | DIFF_FORMAT_SUMMARY; + if (!rev.diffopt.stat_width) + rev.diffopt.stat_width = MAIL_DEFAULT_WRAP; /* Always generate a patch */ rev.diffopt.output_format |= DIFF_FORMAT_PATCH; diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c index c4be98ab9e..540d56429f 100644 --- a/builtin/ls-remote.c +++ b/builtin/ls-remote.c @@ -60,8 +60,9 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix) OPT_BIT(0, "refs", &flags, N_("do not show peeled tags"), REF_NORMAL), OPT_BOOL(0, "get-url", &get_url, N_("take url.<base>.insteadOf into account")), - OPT_SET_INT(0, "exit-code", &status, - N_("exit with exit code 2 if no matching refs are found"), 2), + OPT_SET_INT_F(0, "exit-code", &status, + N_("exit with exit code 2 if no matching refs are found"), + 2, PARSE_OPT_NOCOMPLETE), OPT_BOOL(0, "symref", &show_symref_target, N_("show underlying ref in addition to the object pointed by it")), OPT_END() diff --git a/builtin/merge.c b/builtin/merge.c index 30264cfd7c..ee050a47f3 100644 --- a/builtin/merge.c +++ b/builtin/merge.c @@ -33,6 +33,7 @@ #include "sequencer.h" #include "string-list.h" #include "packfile.h" +#include "tag.h" #define DEFAULT_TWOHEAD (1<<0) #define DEFAULT_OCTOPUS (1<<1) @@ -520,7 +521,7 @@ static void merge_name(const char *remote, struct strbuf *msg) if (desc && desc->obj && desc->obj->type == OBJ_TAG) { strbuf_addf(msg, "%s\t\t%s '%s'\n", oid_to_hex(&desc->obj->oid), - typename(desc->obj->type), + type_name(desc->obj->type), remote); goto cleanup; } @@ -651,10 +652,9 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common, hold_locked_index(&lock, LOCK_DIE_ON_ERROR); refresh_cache(REFRESH_QUIET); - if (active_cache_changed && - write_locked_index(&the_index, &lock, COMMIT_LOCK)) + if (write_locked_index(&the_index, &lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) return error(_("Unable to write index.")); - rollback_lock_file(&lock); if (!strcmp(strategy, "recursive") || !strcmp(strategy, "subtree")) { int clean, x; @@ -691,10 +691,9 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common, remoteheads->item, reversed, &result); if (clean < 0) exit(128); - if (active_cache_changed && - write_locked_index(&the_index, &lock, COMMIT_LOCK)) + if (write_locked_index(&the_index, &lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) die (_("unable to write %s"), get_index_file()); - rollback_lock_file(&lock); return clean ? 0 : 1; } else { return try_merge_command(strategy, xopts_nr, xopts, @@ -810,18 +809,17 @@ static int merge_trivial(struct commit *head, struct commit_list *remoteheads) hold_locked_index(&lock, LOCK_DIE_ON_ERROR); refresh_cache(REFRESH_QUIET); - if (active_cache_changed && - write_locked_index(&the_index, &lock, COMMIT_LOCK)) + if (write_locked_index(&the_index, &lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) return error(_("Unable to write index.")); - rollback_lock_file(&lock); write_tree_trivial(&result_tree); printf(_("Wonderful.\n")); pptr = commit_list_append(head, pptr); pptr = commit_list_append(remoteheads->item, pptr); prepare_to_commit(remoteheads); - if (commit_tree(merge_msg.buf, merge_msg.len, result_tree.hash, parents, - result_commit.hash, NULL, sign_commit)) + if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents, + &result_commit, NULL, sign_commit)) die(_("failed to write commit object")); finish(head, remoteheads, &result_commit, "In-index merge"); drop_save(); @@ -845,8 +843,8 @@ static int finish_automerge(struct commit *head, commit_list_insert(head, &parents); strbuf_addch(&merge_msg, '\n'); prepare_to_commit(remoteheads); - if (commit_tree(merge_msg.buf, merge_msg.len, result_tree->hash, parents, - result_commit.hash, NULL, sign_commit)) + if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents, + &result_commit, NULL, sign_commit)) die(_("failed to write commit object")); strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy); finish(head, remoteheads, &result_commit, buf.buf); @@ -1125,6 +1123,43 @@ static struct commit_list *collect_parents(struct commit *head_commit, return remoteheads; } +static int merging_a_throwaway_tag(struct commit *commit) +{ + char *tag_ref; + struct object_id oid; + int is_throwaway_tag = 0; + + /* Are we merging a tag? */ + if (!merge_remote_util(commit) || + !merge_remote_util(commit)->obj || + merge_remote_util(commit)->obj->type != OBJ_TAG) + return is_throwaway_tag; + + /* + * Now we know we are merging a tag object. Are we downstream + * and following the tags from upstream? If so, we must have + * the tag object pointed at by "refs/tags/$T" where $T is the + * tagname recorded in the tag object. We want to allow such + * a "just to catch up" merge to fast-forward. + * + * Otherwise, we are playing an integrator's role, making a + * merge with a throw-away tag from a contributor with + * something like "git pull $contributor $signed_tag". + * We want to forbid such a merge from fast-forwarding + * by default; otherwise we would not keep the signature + * anywhere. + */ + tag_ref = xstrfmt("refs/tags/%s", + ((struct tag *)merge_remote_util(commit)->obj)->tag); + if (!read_ref(tag_ref, &oid) && + !oidcmp(&oid, &merge_remote_util(commit)->obj->oid)) + is_throwaway_tag = 0; + else + is_throwaway_tag = 1; + free(tag_ref); + return is_throwaway_tag; +} + int cmd_merge(int argc, const char **argv, const char *prefix) { struct object_id result_tree, stash, head_oid; @@ -1322,10 +1357,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix) oid_to_hex(&commit->object.oid)); setenv(buf.buf, merge_remote_util(commit)->name, 1); strbuf_reset(&buf); - if (fast_forward != FF_ONLY && - merge_remote_util(commit) && - merge_remote_util(commit)->obj && - merge_remote_util(commit)->obj->type == OBJ_TAG) + if (fast_forward != FF_ONLY && merging_a_throwaway_tag(commit)) fast_forward = FF_NO; } diff --git a/builtin/mktag.c b/builtin/mktag.c index 031b750f06..beb552847b 100644 --- a/builtin/mktag.c +++ b/builtin/mktag.c @@ -151,7 +151,7 @@ static int verify_tag(char *buffer, unsigned long size) int cmd_mktag(int argc, const char **argv, const char *prefix) { struct strbuf buf = STRBUF_INIT; - unsigned char result_sha1[20]; + struct object_id result; if (argc != 1) usage("git mktag"); @@ -165,10 +165,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix) if (verify_tag(buf.buf, buf.len) < 0) die("invalid tag signature file"); - if (write_sha1_file(buf.buf, buf.len, tag_type, result_sha1) < 0) + if (write_object_file(buf.buf, buf.len, tag_type, &result) < 0) die("unable to write tag file"); strbuf_release(&buf); - printf("%s\n", sha1_to_hex(result_sha1)); + printf("%s\n", oid_to_hex(&result)); return 0; } diff --git a/builtin/mktree.c b/builtin/mktree.c index da0fd8cd70..f5f3c0eea1 100644 --- a/builtin/mktree.c +++ b/builtin/mktree.c @@ -40,7 +40,7 @@ static int ent_compare(const void *a_, const void *b_) b->name, b->len, b->mode); } -static void write_tree(unsigned char *sha1) +static void write_tree(struct object_id *oid) { struct strbuf buf; size_t size; @@ -57,7 +57,7 @@ static void write_tree(unsigned char *sha1) strbuf_add(&buf, ent->sha1, 20); } - write_sha1_file(buf.buf, buf.len, tree_type, sha1); + write_object_file(buf.buf, buf.len, tree_type, oid); strbuf_release(&buf); } @@ -112,7 +112,7 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss mode_type = object_type(mode); if (mode_type != type_from_string(ptr)) { die("entry '%s' object type (%s) doesn't match mode type (%s)", - path, ptr, typename(mode_type)); + path, ptr, type_name(mode_type)); } /* Check the type of object identified by sha1 */ @@ -131,7 +131,7 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss * because the new tree entry will never be correct. */ die("entry '%s' object %s is a %s but specified type was (%s)", - path, sha1_to_hex(sha1), typename(obj_type), typename(mode_type)); + path, sha1_to_hex(sha1), type_name(obj_type), type_name(mode_type)); } } @@ -142,7 +142,7 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss int cmd_mktree(int ac, const char **av, const char *prefix) { struct strbuf sb = STRBUF_INIT; - unsigned char sha1[20]; + struct object_id oid; int nul_term_line = 0; int allow_missing = 0; int is_batch_mode = 0; @@ -181,8 +181,8 @@ int cmd_mktree(int ac, const char **av, const char *prefix) */ ; /* skip creating an empty tree */ } else { - write_tree(sha1); - puts(sha1_to_hex(sha1)); + write_tree(&oid); + puts(oid_to_hex(&oid)); fflush(stdout); } used=0; /* reset tree entry buffer for re-use in batch mode */ diff --git a/builtin/mv.c b/builtin/mv.c index 8ce6a2ddd4..6d141f7a53 100644 --- a/builtin/mv.c +++ b/builtin/mv.c @@ -122,7 +122,8 @@ int cmd_mv(int argc, const char **argv, const char *prefix) struct option builtin_mv_options[] = { OPT__VERBOSE(&verbose, N_("be verbose")), OPT__DRY_RUN(&show_only, N_("dry run")), - OPT__FORCE(&force, N_("force move/rename even if target exists")), + OPT__FORCE(&force, N_("force move/rename even if target exists"), + PARSE_OPT_NOCOMPLETE), OPT_BOOL('k', NULL, &ignore_errors, N_("skip move/rename errors")), OPT_END(), }; @@ -292,8 +293,8 @@ int cmd_mv(int argc, const char **argv, const char *prefix) if (gitmodules_modified) stage_updated_gitmodules(&the_index); - if (active_cache_changed && - write_locked_index(&the_index, &lock_file, COMMIT_LOCK)) + if (write_locked_index(&the_index, &lock_file, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) die(_("Unable to write new index file")); return 0; diff --git a/builtin/notes.c b/builtin/notes.c index 7c81761645..6d2fda4a7d 100644 --- a/builtin/notes.c +++ b/builtin/notes.c @@ -198,9 +198,9 @@ static void prepare_note_data(const struct object_id *object, struct note_data * } } -static void write_note_data(struct note_data *d, unsigned char *sha1) +static void write_note_data(struct note_data *d, struct object_id *oid) { - if (write_sha1_file(d->buf.buf, d->buf.len, blob_type, sha1)) { + if (write_object_file(d->buf.buf, d->buf.len, blob_type, oid)) { error(_("unable to write note object")); if (d->edit_path) error(_("the note contents have been left in %s"), @@ -413,7 +413,7 @@ static int add(int argc, const char **argv, const char *prefix) parse_reuse_arg}, OPT_BOOL(0, "allow-empty", &allow_empty, N_("allow storing empty note")), - OPT__FORCE(&force, N_("replace existing notes")), + OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE), OPT_END() }; @@ -459,7 +459,7 @@ static int add(int argc, const char **argv, const char *prefix) prepare_note_data(&object, &d, note ? note->hash : NULL); if (d.buf.len || allow_empty) { - write_note_data(&d, new_note.hash); + write_note_data(&d, &new_note); if (add_note(t, &object, &new_note, combine_notes_overwrite)) die("BUG: combine_notes_overwrite failed"); commit_notes(t, "Notes added by 'git notes add'"); @@ -484,7 +484,7 @@ static int copy(int argc, const char **argv, const char *prefix) struct notes_tree *t; const char *rewrite_cmd = NULL; struct option options[] = { - OPT__FORCE(&force, N_("replace existing notes")), + OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE), OPT_BOOL(0, "stdin", &from_stdin, N_("read objects from stdin")), OPT_STRING(0, "for-rewrite", &rewrite_cmd, N_("command"), N_("load rewriting config for <command> (implies " @@ -619,7 +619,7 @@ static int append_edit(int argc, const char **argv, const char *prefix) } if (d.buf.len || allow_empty) { - write_note_data(&d, new_note.hash); + write_note_data(&d, &new_note); if (add_note(t, &object, &new_note, combine_notes_overwrite)) die("BUG: combine_notes_overwrite failed"); logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]); diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 6b9cfc289d..e9d3cfb9e3 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -26,7 +26,7 @@ #include "reachable.h" #include "sha1-array.h" #include "argv-array.h" -#include "mru.h" +#include "list.h" #include "packfile.h" static const char *pack_usage[] = { @@ -75,6 +75,8 @@ static int use_bitmap_index = -1; static int write_bitmap_index; static uint16_t write_bitmap_options; +static int exclude_promisor_objects; + static unsigned long delta_cache_size = 0; static unsigned long max_delta_cache_size = 256 * 1024 * 1024; static unsigned long cache_max_small_delta_size = 1000; @@ -84,8 +86,9 @@ static unsigned long window_memory_limit = 0; static struct list_objects_filter_options filter_options; enum missing_action { - MA_ERROR = 0, /* fail if any missing objects are encountered */ - MA_ALLOW_ANY, /* silently allow ALL missing objects */ + MA_ERROR = 0, /* fail if any missing objects are encountered */ + MA_ALLOW_ANY, /* silently allow ALL missing objects */ + MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */ }; static enum missing_action arg_missing_action; static show_object_fn fn_show_object; @@ -161,7 +164,7 @@ static unsigned long do_compress(void **pptr, unsigned long size) return stream.total_out; } -static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f, +static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f, const struct object_id *oid) { git_zstream stream; @@ -185,7 +188,7 @@ static unsigned long write_large_blob_data(struct git_istream *st, struct sha1fi stream.next_out = obuf; stream.avail_out = sizeof(obuf); zret = git_deflate(&stream, readlen ? 0 : Z_FINISH); - sha1write(f, obuf, stream.next_out - obuf); + hashwrite(f, obuf, stream.next_out - obuf); olen += stream.next_out - obuf; } if (stream.avail_in) @@ -230,7 +233,7 @@ static int check_pack_inflate(struct packed_git *p, stream.total_in == len) ? 0 : -1; } -static void copy_pack_data(struct sha1file *f, +static void copy_pack_data(struct hashfile *f, struct packed_git *p, struct pack_window **w_curs, off_t offset, @@ -243,14 +246,14 @@ static void copy_pack_data(struct sha1file *f, in = use_pack(p, w_curs, offset, &avail); if (avail > len) avail = (unsigned long)len; - sha1write(f, in, avail); + hashwrite(f, in, avail); offset += avail; len -= avail; } } /* Return 0 if we will bust the pack-size limit */ -static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry, +static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry, unsigned long limit, int usable_delta) { unsigned long size, datalen; @@ -323,8 +326,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent free(buf); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, dheader + pos, sizeof(dheader) - pos); + hashwrite(f, header, hdrlen); + hashwrite(f, dheader + pos, sizeof(dheader) - pos); hdrlen += sizeof(dheader) - pos; } else if (type == OBJ_REF_DELTA) { /* @@ -337,8 +340,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent free(buf); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, entry->delta->idx.oid.hash, 20); + hashwrite(f, header, hdrlen); + hashwrite(f, entry->delta->idx.oid.hash, 20); hdrlen += 20; } else { if (limit && hdrlen + datalen + 20 >= limit) { @@ -347,13 +350,13 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent free(buf); return 0; } - sha1write(f, header, hdrlen); + hashwrite(f, header, hdrlen); } if (st) { datalen = write_large_blob_data(st, f, &entry->idx.oid); close_istream(st); } else { - sha1write(f, buf, datalen); + hashwrite(f, buf, datalen); free(buf); } @@ -361,7 +364,7 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent } /* Return 0 if we will bust the pack-size limit */ -static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, +static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry, unsigned long limit, int usable_delta) { struct packed_git *p = entry->in_pack; @@ -412,8 +415,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, dheader + pos, sizeof(dheader) - pos); + hashwrite(f, header, hdrlen); + hashwrite(f, dheader + pos, sizeof(dheader) - pos); hdrlen += sizeof(dheader) - pos; reused_delta++; } else if (type == OBJ_REF_DELTA) { @@ -421,8 +424,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, entry->delta->idx.oid.hash, 20); + hashwrite(f, header, hdrlen); + hashwrite(f, entry->delta->idx.oid.hash, 20); hdrlen += 20; reused_delta++; } else { @@ -430,7 +433,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); + hashwrite(f, header, hdrlen); } copy_pack_data(f, p, &w_curs, offset, datalen); unuse_pack(&w_curs); @@ -439,7 +442,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, } /* Return 0 if we will bust the pack-size limit */ -static off_t write_object(struct sha1file *f, +static off_t write_object(struct hashfile *f, struct object_entry *entry, off_t write_offset) { @@ -512,7 +515,7 @@ enum write_one_status { WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */ }; -static enum write_one_status write_one(struct sha1file *f, +static enum write_one_status write_one(struct hashfile *f, struct object_entry *e, off_t *offset) { @@ -731,7 +734,7 @@ static struct object_entry **compute_write_order(void) return wo; } -static off_t write_reused_pack(struct sha1file *f) +static off_t write_reused_pack(struct hashfile *f) { unsigned char buffer[8192]; off_t to_write, total; @@ -762,7 +765,7 @@ static off_t write_reused_pack(struct sha1file *f) if (read_pack > to_write) read_pack = to_write; - sha1write(f, buffer, read_pack); + hashwrite(f, buffer, read_pack); to_write -= read_pack; /* @@ -791,7 +794,7 @@ static const char no_split_warning[] = N_( static void write_pack_file(void) { uint32_t i = 0, j; - struct sha1file *f; + struct hashfile *f; off_t offset; uint32_t nr_remaining = nr_result; time_t last_mtime = 0; @@ -807,7 +810,7 @@ static void write_pack_file(void) char *pack_tmp_name = NULL; if (pack_to_stdout) - f = sha1fd_throughput(1, "<stdout>", progress_state); + f = hashfd_throughput(1, "<stdout>", progress_state); else f = create_tmp_packfile(&pack_tmp_name); @@ -834,11 +837,11 @@ static void write_pack_file(void) * If so, rewrite it like in fast-import */ if (pack_to_stdout) { - sha1close(f, oid.hash, CSUM_CLOSE); + hashclose(f, oid.hash, CSUM_CLOSE); } else if (nr_written == nr_remaining) { - sha1close(f, oid.hash, CSUM_FSYNC); + hashclose(f, oid.hash, CSUM_FSYNC); } else { - int fd = sha1close(f, oid.hash, 0); + int fd = hashclose(f, oid.hash, 0); fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, nr_written, oid.hash, offset); close(fd); @@ -1006,8 +1009,8 @@ static int want_object_in_pack(const struct object_id *oid, struct packed_git **found_pack, off_t *found_offset) { - struct mru_entry *entry; int want; + struct list_head *pos; if (!exclude && local && has_loose_object_nonlocal(oid->hash)) return 0; @@ -1023,8 +1026,8 @@ static int want_object_in_pack(const struct object_id *oid, return want; } - for (entry = packed_git_mru.head; entry; entry = entry->next) { - struct packed_git *p = entry->item; + list_for_each(pos, &packed_git_mru) { + struct packed_git *p = list_entry(pos, struct packed_git, mru); off_t offset; if (p == *found_pack) @@ -1041,7 +1044,7 @@ static int want_object_in_pack(const struct object_id *oid, } want = want_found_object(exclude, p); if (!exclude && want > 0) - mru_mark(&packed_git_mru, entry); + list_move(&p->mru, &packed_git_mru); if (want != -1) return want; } @@ -1376,10 +1379,10 @@ static void cleanup_preferred_base(void) it = pbase_tree; pbase_tree = NULL; while (it) { - struct pbase_tree *this = it; - it = this->next; - free(this->pcache.tree_data); - free(this); + struct pbase_tree *tmp = it; + it = tmp->next; + free(tmp->pcache.tree_data); + free(tmp); } for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) { @@ -2546,6 +2549,7 @@ static void read_object_list_from_stdin(void) } } +/* Remember to update object flag allocation in object.h */ #define OBJECT_ADDED (1u<<20) static void show_commit(struct commit *commit, void *data) @@ -2578,6 +2582,20 @@ static void show_object__ma_allow_any(struct object *obj, const char *name, void show_object(obj, name, data); } +static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data) +{ + assert(arg_missing_action == MA_ALLOW_PROMISOR); + + /* + * Quietly ignore EXPECTED missing objects. This avoids problems with + * staging them now and getting an odd error later. + */ + if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid)) + return; + + show_object(obj, name, data); +} + static int option_parse_missing_action(const struct option *opt, const char *arg, int unset) { @@ -2592,10 +2610,18 @@ static int option_parse_missing_action(const struct option *opt, if (!strcmp(arg, "allow-any")) { arg_missing_action = MA_ALLOW_ANY; + fetch_if_missing = 0; fn_show_object = show_object__ma_allow_any; return 0; } + if (!strcmp(arg, "allow-promisor")) { + arg_missing_action = MA_ALLOW_PROMISOR; + fetch_if_missing = 0; + fn_show_object = show_object__ma_allow_promisor; + return 0; + } + die(_("invalid value for --missing")); return 0; } @@ -2768,7 +2794,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs) if (!packlist_find(&to_pack, oid.hash, NULL) && !has_sha1_pack_kept_or_nonlocal(&oid) && !loosened_object_can_be_discarded(&oid, p->mtime)) - if (force_object_loose(oid.hash, p->mtime)) + if (force_object_loose(&oid, p->mtime)) die("unable to force loose object"); } } @@ -3009,6 +3035,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) { OPTION_CALLBACK, 0, "missing", NULL, N_("action"), N_("handling for missing objects"), PARSE_OPT_NONEG, option_parse_missing_action }, + OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects, + N_("do not pack objects in promisor packfiles")), OPT_END(), }; @@ -3054,6 +3082,12 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) argv_array_push(&rp, "--unpacked"); } + if (exclude_promisor_objects) { + use_internal_rev_list = 1; + fetch_if_missing = 0; + argv_array_push(&rp, "--exclude-promisor-objects"); + } + if (!reuse_object) reuse_delta = 0; if (pack_compression_level == -1) diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c index aaa8136322..991e1bb76f 100644 --- a/builtin/pack-redundant.c +++ b/builtin/pack-redundant.c @@ -48,17 +48,17 @@ static inline void llist_item_put(struct llist_item *item) static inline struct llist_item *llist_item_get(void) { - struct llist_item *new; + struct llist_item *new_item; if ( free_nodes ) { - new = free_nodes; + new_item = free_nodes; free_nodes = free_nodes->next; } else { int i = 1; - ALLOC_ARRAY(new, BLKSIZE); + ALLOC_ARRAY(new_item, BLKSIZE); for (; i < BLKSIZE; i++) - llist_item_put(&new[i]); + llist_item_put(&new_item[i]); } - return new; + return new_item; } static void llist_free(struct llist *list) @@ -80,26 +80,26 @@ static inline void llist_init(struct llist **list) static struct llist * llist_copy(struct llist *list) { struct llist *ret; - struct llist_item *new, *old, *prev; + struct llist_item *new_item, *old_item, *prev; llist_init(&ret); if ((ret->size = list->size) == 0) return ret; - new = ret->front = llist_item_get(); - new->sha1 = list->front->sha1; + new_item = ret->front = llist_item_get(); + new_item->sha1 = list->front->sha1; - old = list->front->next; - while (old) { - prev = new; - new = llist_item_get(); - prev->next = new; - new->sha1 = old->sha1; - old = old->next; + old_item = list->front->next; + while (old_item) { + prev = new_item; + new_item = llist_item_get(); + prev->next = new_item; + new_item->sha1 = old_item->sha1; + old_item = old_item->next; } - new->next = NULL; - ret->back = new; + new_item->next = NULL; + ret->back = new_item; return ret; } @@ -108,24 +108,24 @@ static inline struct llist_item *llist_insert(struct llist *list, struct llist_item *after, const unsigned char *sha1) { - struct llist_item *new = llist_item_get(); - new->sha1 = sha1; - new->next = NULL; + struct llist_item *new_item = llist_item_get(); + new_item->sha1 = sha1; + new_item->next = NULL; if (after != NULL) { - new->next = after->next; - after->next = new; + new_item->next = after->next; + after->next = new_item; if (after == list->back) - list->back = new; + list->back = new_item; } else {/* insert in front */ if (list->size == 0) - list->back = new; + list->back = new_item; else - new->next = list->front; - list->front = new; + new_item->next = list->front; + list->front = new_item; } list->size++; - return new; + return new_item; } static inline struct llist_item *llist_insert_back(struct llist *list, diff --git a/builtin/prune.c b/builtin/prune.c index d2fdae680a..4394d01c93 100644 --- a/builtin/prune.c +++ b/builtin/prune.c @@ -52,7 +52,7 @@ static int prune_object(const struct object_id *oid, const char *fullpath, if (show_only || verbose) { enum object_type type = sha1_object_info(oid->hash, NULL); printf("%s %s\n", oid_to_hex(oid), - (type > 0) ? typename(type) : "unknown"); + (type > 0) ? type_name(type) : "unknown"); } if (!show_only) unlink_or_warn(fullpath); @@ -101,12 +101,15 @@ int cmd_prune(int argc, const char **argv, const char *prefix) { struct rev_info revs; struct progress *progress = NULL; + int exclude_promisor_objects = 0; const struct option options[] = { OPT__DRY_RUN(&show_only, N_("do not remove, show only")), OPT__VERBOSE(&verbose, N_("report pruned objects")), OPT_BOOL(0, "progress", &show_progress, N_("show progress")), OPT_EXPIRY_DATE(0, "expire", &expire, N_("expire objects older than <time>")), + OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects, + N_("limit traversal to objects outside promisor packfiles")), OPT_END() }; char *s; @@ -139,6 +142,10 @@ int cmd_prune(int argc, const char **argv, const char *prefix) show_progress = isatty(2); if (show_progress) progress = start_delayed_progress(_("Checking connectivity"), 0); + if (exclude_promisor_objects) { + fetch_if_missing = 0; + revs.exclude_promisor_objects = 1; + } mark_reachable_objects(&revs, 1, expire, progress); stop_progress(&progress); diff --git a/builtin/pull.c b/builtin/pull.c index 511dbbe0f6..e32d6cd5b4 100644 --- a/builtin/pull.c +++ b/builtin/pull.c @@ -193,7 +193,7 @@ static struct option pull_options[] = { OPT_PASSTHRU(0, "upload-pack", &opt_upload_pack, N_("path"), N_("path to upload pack on remote end"), 0), - OPT__FORCE(&opt_force, N_("force overwrite of local branch")), + OPT__FORCE(&opt_force, N_("force overwrite of local branch"), 0), OPT_PASSTHRU('t', "tags", &opt_tags, NULL, N_("fetch all tags and associated objects"), PARSE_OPT_NOARG), @@ -574,6 +574,7 @@ static int rebase_submodules(void) cp.no_stdin = 1; argv_array_pushl(&cp.args, "submodule", "update", "--recursive", "--rebase", NULL); + argv_push_verbosity(&cp.args); return run_command(&cp); } @@ -586,6 +587,7 @@ static int update_submodules(void) cp.no_stdin = 1; argv_array_pushl(&cp.args, "submodule", "update", "--recursive", "--checkout", NULL); + argv_push_verbosity(&cp.args); return run_command(&cp); } diff --git a/builtin/push.c b/builtin/push.c index 1c28427d82..013c20d616 100644 --- a/builtin/push.c +++ b/builtin/push.c @@ -548,7 +548,7 @@ int cmd_push(int argc, const char **argv, const char *prefix) { OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, "check|on-demand|no", N_("control recursive pushing of submodules"), PARSE_OPT_OPTARG, option_parse_recurse_submodules }, - OPT_BOOL( 0 , "thin", &thin, N_("use thin pack")), + OPT_BOOL_F( 0 , "thin", &thin, N_("use thin pack"), PARSE_OPT_NOCOMPLETE), OPT_STRING( 0 , "receive-pack", &receivepack, "receive-pack", N_("receive pack program")), OPT_STRING( 0 , "exec", &receivepack, "receive-pack", N_("receive pack program")), OPT_BIT('u', "set-upstream", &flags, N_("set upstream for git pull/status"), diff --git a/builtin/rebase--helper.c b/builtin/rebase--helper.c index 7daee544b7..ad074705bb 100644 --- a/builtin/rebase--helper.c +++ b/builtin/rebase--helper.c @@ -22,6 +22,8 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix) struct option options[] = { OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")), OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")), + OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message, + N_("allow commits with empty messages")), OPT_CMDMODE(0, "continue", &command, N_("continue rebase"), CONTINUE), OPT_CMDMODE(0, "abort", &command, N_("abort rebase"), @@ -43,7 +45,7 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix) OPT_END() }; - git_config(git_default_config, NULL); + sequencer_init_config(&opts); git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands); opts.action = REPLAY_INTERACTIVE_REBASE; diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c index b7ce7c7f52..75e7f18ace 100644 --- a/builtin/receive-pack.c +++ b/builtin/receive-pack.c @@ -69,7 +69,7 @@ static int sent_capabilities; static int shallow_update; static const char *alt_shallow_file; static struct strbuf push_cert = STRBUF_INIT; -static unsigned char push_cert_sha1[20]; +static struct object_id push_cert_oid; static struct signature_check sigcheck; static const char *push_cert_nonce; static const char *cert_nonce_seed; @@ -633,8 +633,9 @@ static void prepare_push_cert_sha1(struct child_process *proc) int bogs /* beginning_of_gpg_sig */; already_done = 1; - if (write_sha1_file(push_cert.buf, push_cert.len, "blob", push_cert_sha1)) - hashclr(push_cert_sha1); + if (write_object_file(push_cert.buf, push_cert.len, "blob", + &push_cert_oid)) + oidclr(&push_cert_oid); memset(&sigcheck, '\0', sizeof(sigcheck)); sigcheck.result = 'N'; @@ -655,9 +656,9 @@ static void prepare_push_cert_sha1(struct child_process *proc) strbuf_release(&gpg_status); nonce_status = check_nonce(push_cert.buf, bogs); } - if (!is_null_sha1(push_cert_sha1)) { + if (!is_null_oid(&push_cert_oid)) { argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT=%s", - sha1_to_hex(push_cert_sha1)); + oid_to_hex(&push_cert_oid)); argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s", sigcheck.signer ? sigcheck.signer : ""); argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s", diff --git a/builtin/reflog.c b/builtin/reflog.c index 2233725315..4719a5354c 100644 --- a/builtin/reflog.c +++ b/builtin/reflog.c @@ -52,6 +52,7 @@ struct collect_reflog_cb { int nr; }; +/* Remember to update object flag allocation in object.h */ #define INCOMPLETE (1u<<10) #define STUDYING (1u<<11) #define REACHABLE (1u<<12) @@ -289,20 +290,20 @@ static int should_expire_reflog_ent(struct object_id *ooid, struct object_id *no const char *message, void *cb_data) { struct expire_reflog_policy_cb *cb = cb_data; - struct commit *old, *new; + struct commit *old_commit, *new_commit; if (timestamp < cb->cmd.expire_total) return 1; - old = new = NULL; + old_commit = new_commit = NULL; if (cb->cmd.stalefix && - (!keep_entry(&old, ooid) || !keep_entry(&new, noid))) + (!keep_entry(&old_commit, ooid) || !keep_entry(&new_commit, noid))) return 1; if (timestamp < cb->cmd.expire_unreachable) { if (cb->unreachable_expire_kind == UE_ALWAYS) return 1; - if (unreachable(cb, old, ooid) || unreachable(cb, new, noid)) + if (unreachable(cb, old_commit, ooid) || unreachable(cb, new_commit, noid)) return 1; } diff --git a/builtin/remote.c b/builtin/remote.c index d95bf904c3..805ffc05cd 100644 --- a/builtin/remote.c +++ b/builtin/remote.c @@ -168,7 +168,7 @@ static int add(int argc, const char **argv) OPT_STRING('m', "master", &master, N_("branch"), N_("master branch")), { OPTION_CALLBACK, 0, "mirror", &mirror, N_("push|fetch"), N_("set up remote as a mirror to push to or fetch from"), - PARSE_OPT_OPTARG, parse_mirror_opt }, + PARSE_OPT_OPTARG | PARSE_OPT_COMP_ARG, parse_mirror_opt }, OPT_END() }; @@ -322,7 +322,7 @@ static void read_branches(void) struct ref_states { struct remote *remote; - struct string_list new, stale, tracked, heads, push; + struct string_list new_refs, stale, tracked, heads, push; int queried; }; @@ -337,12 +337,12 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat die(_("Could not get fetch map for refspec %s"), states->remote->fetch_refspec[i]); - states->new.strdup_strings = 1; + states->new_refs.strdup_strings = 1; states->tracked.strdup_strings = 1; states->stale.strdup_strings = 1; for (ref = fetch_map; ref; ref = ref->next) { if (!ref->peer_ref || !ref_exists(ref->peer_ref->name)) - string_list_append(&states->new, abbrev_branch(ref->name)); + string_list_append(&states->new_refs, abbrev_branch(ref->name)); else string_list_append(&states->tracked, abbrev_branch(ref->name)); } @@ -356,7 +356,7 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat free_refs(stale_refs); free_refs(fetch_map); - string_list_sort(&states->new); + string_list_sort(&states->new_refs); string_list_sort(&states->tracked); string_list_sort(&states->stale); @@ -546,8 +546,8 @@ static int add_branch_for_removal(const char *refname, } struct rename_info { - const char *old; - const char *new; + const char *old_name; + const char *new_name; struct string_list *remote_branches; }; @@ -560,7 +560,7 @@ static int read_remote_branches(const char *refname, int flag; const char *symref; - strbuf_addf(&buf, "refs/remotes/%s/", rename->old); + strbuf_addf(&buf, "refs/remotes/%s/", rename->old_name); if (starts_with(refname, buf.buf)) { item = string_list_append(rename->remote_branches, xstrdup(refname)); symref = resolve_ref_unsafe(refname, RESOLVE_REF_READING, @@ -615,36 +615,36 @@ static int mv(int argc, const char **argv) if (argc != 3) usage_with_options(builtin_remote_rename_usage, options); - rename.old = argv[1]; - rename.new = argv[2]; + rename.old_name = argv[1]; + rename.new_name = argv[2]; rename.remote_branches = &remote_branches; - oldremote = remote_get(rename.old); + oldremote = remote_get(rename.old_name); if (!remote_is_configured(oldremote, 1)) - die(_("No such remote: %s"), rename.old); + die(_("No such remote: %s"), rename.old_name); - if (!strcmp(rename.old, rename.new) && oldremote->origin != REMOTE_CONFIG) + if (!strcmp(rename.old_name, rename.new_name) && oldremote->origin != REMOTE_CONFIG) return migrate_file(oldremote); - newremote = remote_get(rename.new); + newremote = remote_get(rename.new_name); if (remote_is_configured(newremote, 1)) - die(_("remote %s already exists."), rename.new); + die(_("remote %s already exists."), rename.new_name); - strbuf_addf(&buf, "refs/heads/test:refs/remotes/%s/test", rename.new); + strbuf_addf(&buf, "refs/heads/test:refs/remotes/%s/test", rename.new_name); if (!valid_fetch_refspec(buf.buf)) - die(_("'%s' is not a valid remote name"), rename.new); + die(_("'%s' is not a valid remote name"), rename.new_name); strbuf_reset(&buf); - strbuf_addf(&buf, "remote.%s", rename.old); - strbuf_addf(&buf2, "remote.%s", rename.new); + strbuf_addf(&buf, "remote.%s", rename.old_name); + strbuf_addf(&buf2, "remote.%s", rename.new_name); if (git_config_rename_section(buf.buf, buf2.buf) < 1) return error(_("Could not rename config section '%s' to '%s'"), buf.buf, buf2.buf); strbuf_reset(&buf); - strbuf_addf(&buf, "remote.%s.fetch", rename.new); + strbuf_addf(&buf, "remote.%s.fetch", rename.new_name); git_config_set_multivar(buf.buf, NULL, NULL, 1); - strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old); + strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old_name); for (i = 0; i < oldremote->fetch_refspec_nr; i++) { char *ptr; @@ -655,8 +655,8 @@ static int mv(int argc, const char **argv) refspec_updated = 1; strbuf_splice(&buf2, ptr-buf2.buf + strlen(":refs/remotes/"), - strlen(rename.old), rename.new, - strlen(rename.new)); + strlen(rename.old_name), rename.new_name, + strlen(rename.new_name)); } else warning(_("Not updating non-default fetch refspec\n" "\t%s\n" @@ -670,10 +670,10 @@ static int mv(int argc, const char **argv) for (i = 0; i < branch_list.nr; i++) { struct string_list_item *item = branch_list.items + i; struct branch_info *info = item->util; - if (info->remote_name && !strcmp(info->remote_name, rename.old)) { + if (info->remote_name && !strcmp(info->remote_name, rename.old_name)) { strbuf_reset(&buf); strbuf_addf(&buf, "branch.%s.remote", item->string); - git_config_set(buf.buf, rename.new); + git_config_set(buf.buf, rename.new_name); } } @@ -703,8 +703,8 @@ static int mv(int argc, const char **argv) continue; strbuf_reset(&buf); strbuf_addstr(&buf, item->string); - strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old), - rename.new, strlen(rename.new)); + strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name), + rename.new_name, strlen(rename.new_name)); strbuf_reset(&buf2); strbuf_addf(&buf2, "remote: renamed %s to %s", item->string, buf.buf); @@ -718,12 +718,12 @@ static int mv(int argc, const char **argv) continue; strbuf_reset(&buf); strbuf_addstr(&buf, item->string); - strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old), - rename.new, strlen(rename.new)); + strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name), + rename.new_name, strlen(rename.new_name)); strbuf_reset(&buf2); strbuf_addstr(&buf2, item->util); - strbuf_splice(&buf2, strlen("refs/remotes/"), strlen(rename.old), - rename.new, strlen(rename.new)); + strbuf_splice(&buf2, strlen("refs/remotes/"), strlen(rename.old_name), + rename.new_name, strlen(rename.new_name)); strbuf_reset(&buf3); strbuf_addf(&buf3, "remote: renamed %s to %s", item->string, buf.buf); @@ -822,7 +822,7 @@ static void clear_push_info(void *util, const char *string) static void free_remote_ref_states(struct ref_states *states) { - string_list_clear(&states->new, 0); + string_list_clear(&states->new_refs, 0); string_list_clear(&states->stale, 1); string_list_clear(&states->tracked, 0); string_list_clear(&states->heads, 0); @@ -907,7 +907,7 @@ static int show_remote_info_item(struct string_list_item *item, void *cb_data) if (states->queried) { const char *fmt = "%s"; const char *arg = ""; - if (string_list_has_string(&states->new, name)) { + if (string_list_has_string(&states->new_refs, name)) { fmt = _(" new (next fetch will store in remotes/%s)"); arg = states->remote->name; } else if (string_list_has_string(&states->tracked, name)) @@ -1176,7 +1176,7 @@ static int show(int argc, const char **argv) /* remote branch info */ info.width = 0; - for_each_string_list(&states.new, add_remote_to_show_info, &info); + for_each_string_list(&states.new_refs, add_remote_to_show_info, &info); for_each_string_list(&states.tracked, add_remote_to_show_info, &info); for_each_string_list(&states.stale, add_remote_to_show_info, &info); if (info.list->nr) diff --git a/builtin/repack.c b/builtin/repack.c index f17a68a17d..7bdb40142f 100644 --- a/builtin/repack.c +++ b/builtin/repack.c @@ -83,7 +83,8 @@ static void remove_pack_on_signal(int signo) /* * Adds all packs hex strings to the fname list, which do not - * have a corresponding .keep file. + * have a corresponding .keep or .promisor file. These packs are not to + * be kept if we are going to pack everything into one file. */ static void get_non_kept_pack_filenames(struct string_list *fname_list) { @@ -101,7 +102,8 @@ static void get_non_kept_pack_filenames(struct string_list *fname_list) fname = xmemdupz(e->d_name, len); - if (!file_exists(mkpath("%s/%s.keep", packdir, fname))) + if (!file_exists(mkpath("%s/%s.keep", packdir, fname)) && + !file_exists(mkpath("%s/%s.promisor", packdir, fname))) string_list_append_nodup(fname_list, fname); else free(fname); @@ -232,6 +234,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix) argv_array_push(&cmd.args, "--all"); argv_array_push(&cmd.args, "--reflog"); argv_array_push(&cmd.args, "--indexed-objects"); + if (repository_format_partial_clone) + argv_array_push(&cmd.args, "--exclude-promisor-objects"); if (window) argv_array_pushf(&cmd.args, "--window=%s", window); if (window_memory) diff --git a/builtin/replace.c b/builtin/replace.c index 10078ae371..482f12018f 100644 --- a/builtin/replace.c +++ b/builtin/replace.c @@ -56,8 +56,8 @@ static int show_reference(const char *refname, const struct object_id *oid, obj_type = sha1_object_info(object.hash, NULL); repl_type = sha1_object_info(oid->hash, NULL); - printf("%s (%s) -> %s (%s)\n", refname, typename(obj_type), - oid_to_hex(oid), typename(repl_type)); + printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type), + oid_to_hex(oid), type_name(repl_type)); } } @@ -168,8 +168,8 @@ static int replace_object_oid(const char *object_ref, die("Objects must be of the same type.\n" "'%s' points to a replaced object of type '%s'\n" "while '%s' points to a replacement object of type '%s'.", - object_ref, typename(obj_type), - replace_ref, typename(repl_type)); + object_ref, type_name(obj_type), + replace_ref, type_name(repl_type)); check_ref_valid(object, &prev, &ref, force); @@ -215,7 +215,7 @@ static void export_object(const struct object_id *oid, enum object_type type, argv_array_push(&cmd.args, "--no-replace-objects"); argv_array_push(&cmd.args, "cat-file"); if (raw) - argv_array_push(&cmd.args, typename(type)); + argv_array_push(&cmd.args, type_name(type)); else argv_array_push(&cmd.args, "-p"); argv_array_push(&cmd.args, oid_to_hex(oid)); @@ -284,30 +284,30 @@ static int edit_and_replace(const char *object_ref, int force, int raw) { char *tmpfile = git_pathdup("REPLACE_EDITOBJ"); enum object_type type; - struct object_id old, new, prev; + struct object_id old_oid, new_oid, prev; struct strbuf ref = STRBUF_INIT; - if (get_oid(object_ref, &old) < 0) + if (get_oid(object_ref, &old_oid) < 0) die("Not a valid object name: '%s'", object_ref); - type = sha1_object_info(old.hash, NULL); + type = sha1_object_info(old_oid.hash, NULL); if (type < 0) - die("unable to get object type for %s", oid_to_hex(&old)); + die("unable to get object type for %s", oid_to_hex(&old_oid)); - check_ref_valid(&old, &prev, &ref, force); + check_ref_valid(&old_oid, &prev, &ref, force); strbuf_release(&ref); - export_object(&old, type, raw, tmpfile); + export_object(&old_oid, type, raw, tmpfile); if (launch_editor(tmpfile, NULL, NULL) < 0) die("editing object file failed"); - import_object(&new, type, raw, tmpfile); + import_object(&new_oid, type, raw, tmpfile); free(tmpfile); - if (!oidcmp(&old, &new)) - return error("new object is the same as the old one: '%s'", oid_to_hex(&old)); + if (!oidcmp(&old_oid, &new_oid)) + return error("new object is the same as the old one: '%s'", oid_to_hex(&old_oid)); - return replace_object_oid(object_ref, &old, "replacement", &new, force); + return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force); } static void replace_parents(struct strbuf *buf, int argc, const char **argv) @@ -355,7 +355,7 @@ static void check_one_mergetag(struct commit *commit, struct tag *tag; int i; - hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), tag_oid.hash); + hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid); tag = lookup_tag(&tag_oid); if (!tag) die(_("bad mergetag in commit '%s'"), ref); @@ -386,16 +386,16 @@ static void check_mergetags(struct commit *commit, int argc, const char **argv) static int create_graft(int argc, const char **argv, int force) { - struct object_id old, new; + struct object_id old_oid, new_oid; const char *old_ref = argv[0]; struct commit *commit; struct strbuf buf = STRBUF_INIT; const char *buffer; unsigned long size; - if (get_oid(old_ref, &old) < 0) + if (get_oid(old_ref, &old_oid) < 0) die(_("Not a valid object name: '%s'"), old_ref); - commit = lookup_commit_or_die(&old, old_ref); + commit = lookup_commit_or_die(&old_oid, old_ref); buffer = get_commit_buffer(commit, &size); strbuf_add(&buf, buffer, size); @@ -410,15 +410,15 @@ static int create_graft(int argc, const char **argv, int force) check_mergetags(commit, argc, argv); - if (write_sha1_file(buf.buf, buf.len, commit_type, new.hash)) + if (write_object_file(buf.buf, buf.len, commit_type, &new_oid)) die(_("could not write replacement commit for: '%s'"), old_ref); strbuf_release(&buf); - if (!oidcmp(&old, &new)) - return error("new commit is the same as the old one: '%s'", oid_to_hex(&old)); + if (!oidcmp(&old_oid, &new_oid)) + return error("new commit is the same as the old one: '%s'", oid_to_hex(&old_oid)); - return replace_object_oid(old_ref, &old, "replacement", &new, force); + return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force); } int cmd_replace(int argc, const char **argv, const char *prefix) @@ -439,7 +439,8 @@ int cmd_replace(int argc, const char **argv, const char *prefix) OPT_CMDMODE('d', "delete", &cmdmode, N_("delete replace refs"), MODE_DELETE), OPT_CMDMODE('e', "edit", &cmdmode, N_("edit existing object"), MODE_EDIT), OPT_CMDMODE('g', "graft", &cmdmode, N_("change a commit's parents"), MODE_GRAFT), - OPT_BOOL('f', "force", &force, N_("replace the ref if it exists")), + OPT_BOOL_F('f', "force", &force, N_("replace the ref if it exists"), + PARSE_OPT_NOCOMPLETE), OPT_BOOL(0, "raw", &raw, N_("do not pretty-print contents for --edit")), OPT_STRING(0, "format", &format, N_("format"), N_("use this format")), OPT_END() diff --git a/builtin/reset.c b/builtin/reset.c index e15f595799..5da0f75de9 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -106,24 +106,16 @@ out: static void print_new_head_line(struct commit *commit) { - const char *hex, *body; - const char *msg; - - hex = find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV); - printf(_("HEAD is now at %s"), hex); - msg = logmsg_reencode(commit, NULL, get_log_output_encoding()); - body = strstr(msg, "\n\n"); - if (body) { - const char *eol; - size_t len; - body = skip_blank_lines(body + 2); - eol = strchr(body, '\n'); - len = eol ? eol - body : strlen(body); - printf(" %.*s\n", (int) len, body); - } - else - printf("\n"); - unuse_commit_buffer(commit, msg); + struct strbuf buf = STRBUF_INIT; + + printf(_("HEAD is now at %s"), + find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV)); + + pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf); + if (buf.len > 0) + printf(" %s", buf.buf); + putchar('\n'); + strbuf_release(&buf); } static void update_index_from_diff(struct diff_queue_struct *q, diff --git a/builtin/rev-list.c b/builtin/rev-list.c index d95acaa40e..6f5b9b0847 100644 --- a/builtin/rev-list.c +++ b/builtin/rev-list.c @@ -15,6 +15,7 @@ #include "progress.h" #include "reflog-walk.h" #include "oidset.h" +#include "packfile.h" static const char rev_list_usage[] = "git rev-list [OPTION] <commit-id>... [ -- paths... ]\n" @@ -67,6 +68,7 @@ enum missing_action { MA_ERROR = 0, /* fail if any missing objects are encountered */ MA_ALLOW_ANY, /* silently allow ALL missing objects */ MA_PRINT, /* print ALL missing objects in special section */ + MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */ }; static enum missing_action arg_missing_action; @@ -197,6 +199,12 @@ static void finish_commit(struct commit *commit, void *data) static inline void finish_object__ma(struct object *obj) { + /* + * Whether or not we try to dynamically fetch missing objects + * from the server, we currently DO NOT have the object. We + * can either print, allow (ignore), or conditionally allow + * (ignore) them. + */ switch (arg_missing_action) { case MA_ERROR: die("missing blob object '%s'", oid_to_hex(&obj->oid)); @@ -209,25 +217,36 @@ static inline void finish_object__ma(struct object *obj) oidset_insert(&missing_objects, &obj->oid); return; + case MA_ALLOW_PROMISOR: + if (is_promisor_object(&obj->oid)) + return; + die("unexpected missing blob object '%s'", + oid_to_hex(&obj->oid)); + return; + default: BUG("unhandled missing_action"); return; } } -static void finish_object(struct object *obj, const char *name, void *cb_data) +static int finish_object(struct object *obj, const char *name, void *cb_data) { struct rev_list_info *info = cb_data; - if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) + if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) { finish_object__ma(obj); + return 1; + } if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT) parse_object(&obj->oid); + return 0; } static void show_object(struct object *obj, const char *name, void *cb_data) { struct rev_list_info *info = cb_data; - finish_object(obj, name, cb_data); + if (finish_object(obj, name, cb_data)) + return; display_progress(progress, ++progress_counter); if (info->flags & REV_LIST_QUIET) return; @@ -315,11 +334,19 @@ static inline int parse_missing_action_value(const char *value) if (!strcmp(value, "allow-any")) { arg_missing_action = MA_ALLOW_ANY; + fetch_if_missing = 0; return 1; } if (!strcmp(value, "print")) { arg_missing_action = MA_PRINT; + fetch_if_missing = 0; + return 1; + } + + if (!strcmp(value, "allow-promisor")) { + arg_missing_action = MA_ALLOW_PROMISOR; + fetch_if_missing = 0; return 1; } @@ -344,6 +371,35 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) init_revisions(&revs, prefix); revs.abbrev = DEFAULT_ABBREV; revs.commit_format = CMIT_FMT_UNSPECIFIED; + + /* + * Scan the argument list before invoking setup_revisions(), so that we + * know if fetch_if_missing needs to be set to 0. + * + * "--exclude-promisor-objects" acts as a pre-filter on missing objects + * by not crossing the boundary from realized objects to promisor + * objects. + * + * Let "--missing" to conditionally set fetch_if_missing. + */ + for (i = 1; i < argc; i++) { + const char *arg = argv[i]; + if (!strcmp(arg, "--exclude-promisor-objects")) { + fetch_if_missing = 0; + revs.exclude_promisor_objects = 1; + break; + } + } + for (i = 1; i < argc; i++) { + const char *arg = argv[i]; + if (skip_prefix(arg, "--missing=", &arg)) { + if (revs.exclude_promisor_objects) + die(_("cannot combine --exclude-promisor-objects and --missing")); + if (parse_missing_action_value(arg)) + break; + } + } + argc = setup_revisions(argc, argv, &revs, NULL); memset(&info, 0, sizeof(info)); @@ -404,7 +460,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) continue; } if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) { - list_objects_filter_release(&filter_options); + list_objects_filter_set_no_filter(&filter_options); continue; } if (!strcmp(arg, "--filter-print-omitted")) { @@ -412,9 +468,10 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) continue; } - if (skip_prefix(arg, "--missing=", &arg) && - parse_missing_action_value(arg)) - continue; + if (!strcmp(arg, "--exclude-promisor-objects")) + continue; /* already handled above */ + if (skip_prefix(arg, "--missing=", &arg)) + continue; /* already handled above */ usage(rev_list_usage); @@ -479,7 +536,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) mark_edges_uninteresting(&revs, show_edge); if (bisect_list) { - int reaches = reaches, all = all; + int reaches, all; find_bisection(&revs.commits, &reaches, &all, bisect_find_all); diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c index 74aa644cbb..a1e680b5e9 100644 --- a/builtin/rev-parse.c +++ b/builtin/rev-parse.c @@ -243,28 +243,28 @@ static int show_file(const char *arg, int output_prefix) static int try_difference(const char *arg) { char *dotdot; - struct object_id oid; - struct object_id end; - const char *next; - const char *this; + struct object_id start_oid; + struct object_id end_oid; + const char *end; + const char *start; int symmetric; static const char head_by_default[] = "HEAD"; if (!(dotdot = strstr(arg, ".."))) return 0; - next = dotdot + 2; - this = arg; - symmetric = (*next == '.'); + end = dotdot + 2; + start = arg; + symmetric = (*end == '.'); *dotdot = 0; - next += symmetric; + end += symmetric; - if (!*next) - next = head_by_default; + if (!*end) + end = head_by_default; if (dotdot == arg) - this = head_by_default; + start = head_by_default; - if (this == head_by_default && next == head_by_default && + if (start == head_by_default && end == head_by_default && !symmetric) { /* * Just ".."? That is not a range but the @@ -274,14 +274,14 @@ static int try_difference(const char *arg) return 0; } - if (!get_oid_committish(this, &oid) && !get_oid_committish(next, &end)) { - show_rev(NORMAL, &end, next); - show_rev(symmetric ? NORMAL : REVERSED, &oid, this); + if (!get_oid_committish(start, &start_oid) && !get_oid_committish(end, &end_oid)) { + show_rev(NORMAL, &end_oid, end); + show_rev(symmetric ? NORMAL : REVERSED, &start_oid, start); if (symmetric) { struct commit_list *exclude; struct commit *a, *b; - a = lookup_commit_reference(&oid); - b = lookup_commit_reference(&end); + a = lookup_commit_reference(&start_oid); + b = lookup_commit_reference(&end_oid); exclude = get_merge_bases(a, b); while (exclude) { struct commit *commit = pop_commit(&exclude); @@ -516,7 +516,7 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix) PARSE_OPT_SHELL_EVAL); strbuf_addstr(&parsed, " --"); - sq_quote_argv(&parsed, argv, 0); + sq_quote_argv(&parsed, argv); puts(parsed.buf); return 0; } @@ -526,7 +526,7 @@ static int cmd_sq_quote(int argc, const char **argv) struct strbuf buf = STRBUF_INIT; if (argc) - sq_quote_argv(&buf, argv, 0); + sq_quote_argv(&buf, argv); printf("%s\n", buf.buf); strbuf_release(&buf); diff --git a/builtin/revert.c b/builtin/revert.c index b9d927eb09..76f0a35b07 100644 --- a/builtin/revert.c +++ b/builtin/revert.c @@ -208,7 +208,7 @@ int cmd_revert(int argc, const char **argv, const char *prefix) if (isatty(0)) opts.edit = 1; opts.action = REPLAY_REVERT; - git_config(git_default_config, NULL); + sequencer_init_config(&opts); res = run_sequencer(argc, argv, &opts); if (res < 0) die(_("revert failed")); @@ -221,7 +221,7 @@ int cmd_cherry_pick(int argc, const char **argv, const char *prefix) int res; opts.action = REPLAY_PICK; - git_config(git_default_config, NULL); + sequencer_init_config(&opts); res = run_sequencer(argc, argv, &opts); if (res < 0) die(_("cherry-pick failed")); diff --git a/builtin/rm.c b/builtin/rm.c index 4a2fcca27b..4447bb4d0f 100644 --- a/builtin/rm.c +++ b/builtin/rm.c @@ -242,7 +242,7 @@ static struct option builtin_rm_options[] = { OPT__DRY_RUN(&show_only, N_("dry run")), OPT__QUIET(&quiet, N_("do not list removed files")), OPT_BOOL( 0 , "cached", &index_only, N_("only remove from the index")), - OPT__FORCE(&force, N_("override the up-to-date check")), + OPT__FORCE(&force, N_("override the up-to-date check"), PARSE_OPT_NOCOMPLETE), OPT_BOOL('r', NULL, &recursive, N_("allow recursive removal")), OPT_BOOL( 0 , "ignore-unmatch", &ignore_unmatch, N_("exit with a zero status even if nothing matched")), @@ -385,10 +385,9 @@ int cmd_rm(int argc, const char **argv, const char *prefix) stage_updated_gitmodules(&the_index); } - if (active_cache_changed) { - if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK)) - die(_("Unable to write new index file")); - } + if (write_locked_index(&the_index, &lock_file, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) + die(_("Unable to write new index file")); return 0; } diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c index a5c4a8a694..ee020d4749 100644 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@ -20,6 +20,7 @@ #define OPT_QUIET (1 << 0) #define OPT_CACHED (1 << 1) #define OPT_RECURSIVE (1 << 2) +#define OPT_FORCE (1 << 3) typedef void (*each_submodule_fn)(const struct cache_entry *list_item, void *cb_data); @@ -50,6 +51,20 @@ static char *get_default_remote(void) return ret; } +static int print_default_remote(int argc, const char **argv, const char *prefix) +{ + const char *remote; + + if (argc != 1) + die(_("submodule--helper print-default-remote takes no arguments")); + + remote = get_default_remote(); + if (remote) + printf("%s\n", remote); + + return 0; +} + static int starts_with_dot_slash(const char *str) { return str[0] == '.' && is_dir_sep(str[1]); @@ -358,6 +373,25 @@ static void module_list_active(struct module_list *list) *list = active_modules; } +static char *get_up_path(const char *path) +{ + int i; + struct strbuf sb = STRBUF_INIT; + + for (i = count_slashes(path); i; i--) + strbuf_addstr(&sb, "../"); + + /* + * Check if 'path' ends with slash or not + * for having the same output for dir/sub_dir + * and dir/sub_dir/ + */ + if (!is_dir_sep(path[strlen(path) - 1])) + strbuf_addstr(&sb, "../"); + + return strbuf_detach(&sb, NULL); +} + static int module_list(int argc, const char **argv, const char *prefix) { int i; @@ -718,6 +752,309 @@ static int module_name(int argc, const char **argv, const char *prefix) return 0; } +struct sync_cb { + const char *prefix; + unsigned int flags; +}; + +#define SYNC_CB_INIT { NULL, 0 } + +static void sync_submodule(const char *path, const char *prefix, + unsigned int flags) +{ + const struct submodule *sub; + char *remote_key = NULL; + char *sub_origin_url, *super_config_url, *displaypath; + struct strbuf sb = STRBUF_INIT; + struct child_process cp = CHILD_PROCESS_INIT; + char *sub_config_path = NULL; + + if (!is_submodule_active(the_repository, path)) + return; + + sub = submodule_from_path(&null_oid, path); + + if (sub && sub->url) { + if (starts_with_dot_dot_slash(sub->url) || + starts_with_dot_slash(sub->url)) { + char *remote_url, *up_path; + char *remote = get_default_remote(); + strbuf_addf(&sb, "remote.%s.url", remote); + + if (git_config_get_string(sb.buf, &remote_url)) + remote_url = xgetcwd(); + + up_path = get_up_path(path); + sub_origin_url = relative_url(remote_url, sub->url, up_path); + super_config_url = relative_url(remote_url, sub->url, NULL); + + free(remote); + free(up_path); + free(remote_url); + } else { + sub_origin_url = xstrdup(sub->url); + super_config_url = xstrdup(sub->url); + } + } else { + sub_origin_url = xstrdup(""); + super_config_url = xstrdup(""); + } + + displaypath = get_submodule_displaypath(path, prefix); + + if (!(flags & OPT_QUIET)) + printf(_("Synchronizing submodule url for '%s'\n"), + displaypath); + + strbuf_reset(&sb); + strbuf_addf(&sb, "submodule.%s.url", sub->name); + if (git_config_set_gently(sb.buf, super_config_url)) + die(_("failed to register url for submodule path '%s'"), + displaypath); + + if (!is_submodule_populated_gently(path, NULL)) + goto cleanup; + + prepare_submodule_repo_env(&cp.env_array); + cp.git_cmd = 1; + cp.dir = path; + argv_array_pushl(&cp.args, "submodule--helper", + "print-default-remote", NULL); + + strbuf_reset(&sb); + if (capture_command(&cp, &sb, 0)) + die(_("failed to get the default remote for submodule '%s'"), + path); + + strbuf_strip_suffix(&sb, "\n"); + remote_key = xstrfmt("remote.%s.url", sb.buf); + + strbuf_reset(&sb); + submodule_to_gitdir(&sb, path); + strbuf_addstr(&sb, "/config"); + + if (git_config_set_in_file_gently(sb.buf, remote_key, sub_origin_url)) + die(_("failed to update remote for submodule '%s'"), + path); + + if (flags & OPT_RECURSIVE) { + struct child_process cpr = CHILD_PROCESS_INIT; + + cpr.git_cmd = 1; + cpr.dir = path; + prepare_submodule_repo_env(&cpr.env_array); + + argv_array_push(&cpr.args, "--super-prefix"); + argv_array_pushf(&cpr.args, "%s/", displaypath); + argv_array_pushl(&cpr.args, "submodule--helper", "sync", + "--recursive", NULL); + + if (flags & OPT_QUIET) + argv_array_push(&cpr.args, "--quiet"); + + if (run_command(&cpr)) + die(_("failed to recurse into submodule '%s'"), + path); + } + +cleanup: + free(super_config_url); + free(sub_origin_url); + strbuf_release(&sb); + free(remote_key); + free(displaypath); + free(sub_config_path); +} + +static void sync_submodule_cb(const struct cache_entry *list_item, void *cb_data) +{ + struct sync_cb *info = cb_data; + sync_submodule(list_item->name, info->prefix, info->flags); + +} + +static int module_sync(int argc, const char **argv, const char *prefix) +{ + struct sync_cb info = SYNC_CB_INIT; + struct pathspec pathspec; + struct module_list list = MODULE_LIST_INIT; + int quiet = 0; + int recursive = 0; + + struct option module_sync_options[] = { + OPT__QUIET(&quiet, N_("Suppress output of synchronizing submodule url")), + OPT_BOOL(0, "recursive", &recursive, + N_("Recurse into nested submodules")), + OPT_END() + }; + + const char *const git_submodule_helper_usage[] = { + N_("git submodule--helper sync [--quiet] [--recursive] [<path>]"), + NULL + }; + + argc = parse_options(argc, argv, prefix, module_sync_options, + git_submodule_helper_usage, 0); + + if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0) + return 1; + + info.prefix = prefix; + if (quiet) + info.flags |= OPT_QUIET; + if (recursive) + info.flags |= OPT_RECURSIVE; + + for_each_listed_submodule(&list, sync_submodule_cb, &info); + + return 0; +} + +struct deinit_cb { + const char *prefix; + unsigned int flags; +}; +#define DEINIT_CB_INIT { NULL, 0 } + +static void deinit_submodule(const char *path, const char *prefix, + unsigned int flags) +{ + const struct submodule *sub; + char *displaypath = NULL; + struct child_process cp_config = CHILD_PROCESS_INIT; + struct strbuf sb_config = STRBUF_INIT; + char *sub_git_dir = xstrfmt("%s/.git", path); + + sub = submodule_from_path(&null_oid, path); + + if (!sub || !sub->name) + goto cleanup; + + displaypath = get_submodule_displaypath(path, prefix); + + /* remove the submodule work tree (unless the user already did it) */ + if (is_directory(path)) { + struct strbuf sb_rm = STRBUF_INIT; + const char *format; + + /* + * protect submodules containing a .git directory + * NEEDSWORK: instead of dying, automatically call + * absorbgitdirs and (possibly) warn. + */ + if (is_directory(sub_git_dir)) + die(_("Submodule work tree '%s' contains a .git " + "directory (use 'rm -rf' if you really want " + "to remove it including all of its history)"), + displaypath); + + if (!(flags & OPT_FORCE)) { + struct child_process cp_rm = CHILD_PROCESS_INIT; + cp_rm.git_cmd = 1; + argv_array_pushl(&cp_rm.args, "rm", "-qn", + path, NULL); + + if (run_command(&cp_rm)) + die(_("Submodule work tree '%s' contains local " + "modifications; use '-f' to discard them"), + displaypath); + } + + strbuf_addstr(&sb_rm, path); + + if (!remove_dir_recursively(&sb_rm, 0)) + format = _("Cleared directory '%s'\n"); + else + format = _("Could not remove submodule work tree '%s'\n"); + + if (!(flags & OPT_QUIET)) + printf(format, displaypath); + + strbuf_release(&sb_rm); + } + + if (mkdir(path, 0777)) + printf(_("could not create empty submodule directory %s"), + displaypath); + + cp_config.git_cmd = 1; + argv_array_pushl(&cp_config.args, "config", "--get-regexp", NULL); + argv_array_pushf(&cp_config.args, "submodule.%s\\.", sub->name); + + /* remove the .git/config entries (unless the user already did it) */ + if (!capture_command(&cp_config, &sb_config, 0) && sb_config.len) { + char *sub_key = xstrfmt("submodule.%s", sub->name); + /* + * remove the whole section so we have a clean state when + * the user later decides to init this submodule again + */ + git_config_rename_section_in_file(NULL, sub_key, NULL); + if (!(flags & OPT_QUIET)) + printf(_("Submodule '%s' (%s) unregistered for path '%s'\n"), + sub->name, sub->url, displaypath); + free(sub_key); + } + +cleanup: + free(displaypath); + free(sub_git_dir); + strbuf_release(&sb_config); +} + +static void deinit_submodule_cb(const struct cache_entry *list_item, + void *cb_data) +{ + struct deinit_cb *info = cb_data; + deinit_submodule(list_item->name, info->prefix, info->flags); +} + +static int module_deinit(int argc, const char **argv, const char *prefix) +{ + struct deinit_cb info = DEINIT_CB_INIT; + struct pathspec pathspec; + struct module_list list = MODULE_LIST_INIT; + int quiet = 0; + int force = 0; + int all = 0; + + struct option module_deinit_options[] = { + OPT__QUIET(&quiet, N_("Suppress submodule status output")), + OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes"), 0), + OPT_BOOL(0, "all", &all, N_("Unregister all submodules")), + OPT_END() + }; + + const char *const git_submodule_helper_usage[] = { + N_("git submodule deinit [--quiet] [-f | --force] [--all | [--] [<path>...]]"), + NULL + }; + + argc = parse_options(argc, argv, prefix, module_deinit_options, + git_submodule_helper_usage, 0); + + if (all && argc) { + error("pathspec and --all are incompatible"); + usage_with_options(git_submodule_helper_usage, + module_deinit_options); + } + + if (!argc && !all) + die(_("Use '--all' if you really want to deinitialize all submodules")); + + if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0) + BUG("module_list_compute should not choke on empty pathspec"); + + info.prefix = prefix; + if (quiet) + info.flags |= OPT_QUIET; + if (force) + info.flags |= OPT_FORCE; + + for_each_listed_submodule(&list, deinit_submodule_cb, &info); + + return 0; +} + static int clone_submodule(const char *path, const char *gitdir, const char *url, const char *depth, struct string_list *reference, int quiet, int progress) @@ -1498,6 +1835,9 @@ static struct cmd_struct commands[] = { {"resolve-relative-url-test", resolve_relative_url_test, 0}, {"init", module_init, SUPPORT_SUPER_PREFIX}, {"status", module_status, SUPPORT_SUPER_PREFIX}, + {"print-default-remote", print_default_remote, 0}, + {"sync", module_sync, SUPPORT_SUPER_PREFIX}, + {"deinit", module_deinit, 0}, {"remote-branch", resolve_remote_submodule_branch, 0}, {"push-check", push_check, 0}, {"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX}, diff --git a/builtin/tag.c b/builtin/tag.c index a7e6a5b0f2..da186691ed 100644 --- a/builtin/tag.c +++ b/builtin/tag.c @@ -187,13 +187,14 @@ static int build_tag_object(struct strbuf *buf, int sign, struct object_id *resu { if (sign && do_sign(buf) < 0) return error(_("unable to sign the tag")); - if (write_sha1_file(buf->buf, buf->len, tag_type, result->hash) < 0) + if (write_object_file(buf->buf, buf->len, tag_type, result) < 0) return error(_("unable to write tag file")); return 0; } struct create_tag_options { unsigned int message_given:1; + unsigned int use_editor:1; unsigned int sign; enum { CLEANUP_NONE, @@ -220,11 +221,11 @@ static void create_tag(const struct object_id *object, const char *tag, "tag %s\n" "tagger %s\n\n", oid_to_hex(object), - typename(type), + type_name(type), tag, git_committer_info(IDENT_STRICT)); - if (!opt->message_given) { + if (!opt->message_given || opt->use_editor) { int fd; /* write the template message before editing: */ @@ -233,7 +234,10 @@ static void create_tag(const struct object_id *object, const char *tag, if (fd < 0) die_errno(_("could not create file '%s'"), path); - if (!is_null_oid(prev)) { + if (opt->message_given) { + write_or_die(fd, buf->buf, buf->len); + strbuf_reset(buf); + } else if (!is_null_oid(prev)) { write_tag_body(fd, prev); } else { struct strbuf buf = STRBUF_INIT; @@ -372,6 +376,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix) static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting; struct ref_format format = REF_FORMAT_INIT; int icase = 0; + int edit_flag = 0; struct option options[] = { OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'), { OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"), @@ -386,12 +391,13 @@ int cmd_tag(int argc, const char **argv, const char *prefix) OPT_CALLBACK('m', "message", &msg, N_("message"), N_("tag message"), parse_msg_arg), OPT_FILENAME('F', "file", &msgfile, N_("read message from file")), + OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")), OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")), OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"), N_("how to strip spaces and #comments from message")), OPT_STRING('u', "local-user", &keyid, N_("key-id"), N_("use another key to sign the tag")), - OPT__FORCE(&force, N_("replace the tag if exists")), + OPT__FORCE(&force, N_("replace the tag if exists"), 0), OPT_BOOL(0, "create-reflog", &create_reflog, N_("create a reflog")), OPT_GROUP(N_("Tag listing options")), @@ -524,6 +530,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix) die(_("tag '%s' already exists"), tag); opt.message_given = msg.given || msgfile; + opt.use_editor = edit_flag; if (!cleanup_arg || !strcmp(cleanup_arg, "strip")) opt.cleanup_mode = CLEANUP_ALL; diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c index 62ea264c46..6620feec68 100644 --- a/builtin/unpack-objects.c +++ b/builtin/unpack-objects.c @@ -21,7 +21,7 @@ static unsigned char buffer[4096]; static unsigned int offset, len; static off_t consumed_bytes; static off_t max_input_size; -static git_SHA_CTX ctx; +static git_hash_ctx ctx; static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT; /* @@ -62,7 +62,7 @@ static void *fill(int min) if (min > sizeof(buffer)) die("cannot fill %d bytes", min); if (offset) { - git_SHA1_Update(&ctx, buffer, offset); + the_hash_algo->update_fn(&ctx, buffer, offset); memmove(buffer, buffer + offset, len); offset = 0; } @@ -158,6 +158,7 @@ struct obj_info { struct object *obj; }; +/* Remember to update object flag allocation in object.h */ #define FLAG_OPEN (1u<<20) #define FLAG_WRITTEN (1u<<21) @@ -172,7 +173,8 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf) { struct object_id oid; - if (write_sha1_file(obj_buf->buffer, obj_buf->size, typename(obj->type), oid.hash) < 0) + if (write_object_file(obj_buf->buffer, obj_buf->size, + type_name(obj->type), &oid) < 0) die("failed to write object %s", oid_to_hex(&obj->oid)); obj->flags |= FLAG_WRITTEN; } @@ -237,14 +239,16 @@ static void write_object(unsigned nr, enum object_type type, void *buf, unsigned long size) { if (!strict) { - if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0) + if (write_object_file(buf, size, type_name(type), + &obj_list[nr].oid) < 0) die("failed to write object"); added_object(nr, type, buf, size); free(buf); obj_list[nr].obj = NULL; } else if (type == OBJ_BLOB) { struct blob *blob; - if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0) + if (write_object_file(buf, size, type_name(type), + &obj_list[nr].oid) < 0) die("failed to write object"); added_object(nr, type, buf, size); free(buf); @@ -258,12 +262,12 @@ static void write_object(unsigned nr, enum object_type type, } else { struct object *obj; int eaten; - hash_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash); + hash_object_file(buf, size, type_name(type), &obj_list[nr].oid); added_object(nr, type, buf, size); obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf, &eaten); if (!obj) - die("invalid %s", typename(type)); + die("invalid %s", type_name(type)); add_object_buffer(obj, buf, size); obj->flags |= FLAG_OPEN; obj_list[nr].obj = obj; @@ -345,8 +349,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size, struct object_id base_oid; if (type == OBJ_REF_DELTA) { - hashcpy(base_oid.hash, fill(GIT_SHA1_RAWSZ)); - use(GIT_SHA1_RAWSZ); + hashcpy(base_oid.hash, fill(the_hash_algo->rawsz)); + use(the_hash_algo->rawsz); delta_data = get_data(delta_size); if (dry_run || !delta_data) { free(delta_data); @@ -564,15 +568,15 @@ int cmd_unpack_objects(int argc, const char **argv, const char *prefix) /* We don't take any non-flag arguments now.. Maybe some day */ usage(unpack_usage); } - git_SHA1_Init(&ctx); + the_hash_algo->init_fn(&ctx); unpack_all(); - git_SHA1_Update(&ctx, buffer, offset); - git_SHA1_Final(oid.hash, &ctx); + the_hash_algo->update_fn(&ctx, buffer, offset); + the_hash_algo->final_fn(oid.hash, &ctx); if (strict) write_rest(); - if (hashcmp(fill(GIT_SHA1_RAWSZ), oid.hash)) + if (hashcmp(fill(the_hash_algo->rawsz), oid.hash)) die("final sha1 did not match"); - use(GIT_SHA1_RAWSZ); + use(the_hash_algo->rawsz); /* Write the last part of the buffer to stdout */ while (len) { diff --git a/builtin/update-server-info.c b/builtin/update-server-info.c index 873070e517..4321a34456 100644 --- a/builtin/update-server-info.c +++ b/builtin/update-server-info.c @@ -12,7 +12,7 @@ int cmd_update_server_info(int argc, const char **argv, const char *prefix) { int force = 0; struct option options[] = { - OPT__FORCE(&force, N_("update the info files from scratch")), + OPT__FORCE(&force, N_("update the info files from scratch"), 0), OPT_END() }; diff --git a/builtin/verify-commit.c b/builtin/verify-commit.c index ba38ac9b15..05315ea7c9 100644 --- a/builtin/verify-commit.c +++ b/builtin/verify-commit.c @@ -49,7 +49,7 @@ static int verify_commit(const char *name, unsigned flags) return error("%s: unable to read file.", name); if (type != OBJ_COMMIT) return error("%s: cannot verify a non-commit object of type %s.", - name, typename(type)); + name, type_name(type)); ret = run_gpg_verify(&oid, buf, size, flags); diff --git a/builtin/worktree.c b/builtin/worktree.c index 9efdc22466..670555dedd 100644 --- a/builtin/worktree.c +++ b/builtin/worktree.c @@ -17,7 +17,9 @@ static const char * const worktree_usage[] = { N_("git worktree add [<options>] <path> [<commit-ish>]"), N_("git worktree list [<options>]"), N_("git worktree lock [<options>] <path>"), + N_("git worktree move <worktree> <new-path>"), N_("git worktree prune [<options>]"), + N_("git worktree remove [<options>] <worktree>"), N_("git worktree unlock <path>"), NULL }; @@ -345,9 +347,23 @@ done: * Hook failure does not warrant worktree deletion, so run hook after * is_junk is cleared, but do return appropriate code when hook fails. */ - if (!ret && opts->checkout) - ret = run_hook_le(NULL, "post-checkout", oid_to_hex(&null_oid), - oid_to_hex(&commit->object.oid), "1", NULL); + if (!ret && opts->checkout) { + const char *hook = find_hook("post-checkout"); + if (hook) { + const char *env[] = { "GIT_DIR", "GIT_WORK_TREE", NULL }; + cp.git_cmd = 0; + cp.no_stdin = 1; + cp.stdout_to_stderr = 1; + cp.dir = path; + cp.env = env; + cp.argv = NULL; + argv_array_pushl(&cp.args, absolute_path(hook), + oid_to_hex(&null_oid), + oid_to_hex(&commit->object.oid), + "1", NULL); + ret = run_command(&cp); + } + } argv_array_clear(&child_env); strbuf_release(&sb); @@ -365,7 +381,9 @@ static int add(int ac, const char **av, const char *prefix) const char *branch; const char *opt_track = NULL; struct option options[] = { - OPT__FORCE(&opts.force, N_("checkout <branch> even if already checked out in other worktree")), + OPT__FORCE(&opts.force, + N_("checkout <branch> even if already checked out in other worktree"), + PARSE_OPT_NOCOMPLETE), OPT_STRING('b', NULL, &opts.new_branch, N_("branch"), N_("create a new branch")), OPT_STRING('B', NULL, &new_branch_force, N_("branch"), @@ -605,6 +623,220 @@ static int unlock_worktree(int ac, const char **av, const char *prefix) return ret; } +static void validate_no_submodules(const struct worktree *wt) +{ + struct index_state istate = { NULL }; + int i, found_submodules = 0; + + if (read_index_from(&istate, worktree_git_path(wt, "index"), + get_worktree_git_dir(wt)) > 0) { + for (i = 0; i < istate.cache_nr; i++) { + struct cache_entry *ce = istate.cache[i]; + + if (S_ISGITLINK(ce->ce_mode)) { + found_submodules = 1; + break; + } + } + } + discard_index(&istate); + + if (found_submodules) + die(_("working trees containing submodules cannot be moved or removed")); +} + +static int move_worktree(int ac, const char **av, const char *prefix) +{ + struct option options[] = { + OPT_END() + }; + struct worktree **worktrees, *wt; + struct strbuf dst = STRBUF_INIT; + struct strbuf errmsg = STRBUF_INIT; + const char *reason; + char *path; + + ac = parse_options(ac, av, prefix, options, worktree_usage, 0); + if (ac != 2) + usage_with_options(worktree_usage, options); + + path = prefix_filename(prefix, av[1]); + strbuf_addstr(&dst, path); + free(path); + + worktrees = get_worktrees(0); + wt = find_worktree(worktrees, prefix, av[0]); + if (!wt) + die(_("'%s' is not a working tree"), av[0]); + if (is_main_worktree(wt)) + die(_("'%s' is a main working tree"), av[0]); + if (is_directory(dst.buf)) { + const char *sep = find_last_dir_sep(wt->path); + + if (!sep) + die(_("could not figure out destination name from '%s'"), + wt->path); + strbuf_trim_trailing_dir_sep(&dst); + strbuf_addstr(&dst, sep); + } + if (file_exists(dst.buf)) + die(_("target '%s' already exists"), dst.buf); + + validate_no_submodules(wt); + + reason = is_worktree_locked(wt); + if (reason) { + if (*reason) + die(_("cannot move a locked working tree, lock reason: %s"), + reason); + die(_("cannot move a locked working tree")); + } + if (validate_worktree(wt, &errmsg, 0)) + die(_("validation failed, cannot move working tree: %s"), + errmsg.buf); + strbuf_release(&errmsg); + + if (rename(wt->path, dst.buf) == -1) + die_errno(_("failed to move '%s' to '%s'"), wt->path, dst.buf); + + update_worktree_location(wt, dst.buf); + + strbuf_release(&dst); + free_worktrees(worktrees); + return 0; +} + +/* + * Note, "git status --porcelain" is used to determine if it's safe to + * delete a whole worktree. "git status" does not ignore user + * configuration, so if a normal "git status" shows "clean" for the + * user, then it's ok to remove it. + * + * This assumption may be a bad one. We may want to ignore + * (potentially bad) user settings and only delete a worktree when + * it's absolutely safe to do so from _our_ point of view because we + * know better. + */ +static void check_clean_worktree(struct worktree *wt, + const char *original_path) +{ + struct argv_array child_env = ARGV_ARRAY_INIT; + struct child_process cp; + char buf[1]; + int ret; + + /* + * Until we sort this out, all submodules are "dirty" and + * will abort this function. + */ + validate_no_submodules(wt); + + argv_array_pushf(&child_env, "%s=%s/.git", + GIT_DIR_ENVIRONMENT, wt->path); + argv_array_pushf(&child_env, "%s=%s", + GIT_WORK_TREE_ENVIRONMENT, wt->path); + memset(&cp, 0, sizeof(cp)); + argv_array_pushl(&cp.args, "status", + "--porcelain", "--ignore-submodules=none", + NULL); + cp.env = child_env.argv; + cp.git_cmd = 1; + cp.dir = wt->path; + cp.out = -1; + ret = start_command(&cp); + if (ret) + die_errno(_("failed to run 'git status' on '%s'"), + original_path); + ret = xread(cp.out, buf, sizeof(buf)); + if (ret) + die(_("'%s' is dirty, use --force to delete it"), + original_path); + close(cp.out); + ret = finish_command(&cp); + if (ret) + die_errno(_("failed to run 'git status' on '%s', code %d"), + original_path, ret); +} + +static int delete_git_work_tree(struct worktree *wt) +{ + struct strbuf sb = STRBUF_INIT; + int ret = 0; + + strbuf_addstr(&sb, wt->path); + if (remove_dir_recursively(&sb, 0)) { + error_errno(_("failed to delete '%s'"), sb.buf); + ret = -1; + } + strbuf_release(&sb); + return ret; +} + +static int delete_git_dir(struct worktree *wt) +{ + struct strbuf sb = STRBUF_INIT; + int ret = 0; + + strbuf_addstr(&sb, git_common_path("worktrees/%s", wt->id)); + if (remove_dir_recursively(&sb, 0)) { + error_errno(_("failed to delete '%s'"), sb.buf); + ret = -1; + } + strbuf_release(&sb); + return ret; +} + +static int remove_worktree(int ac, const char **av, const char *prefix) +{ + int force = 0; + struct option options[] = { + OPT_BOOL(0, "force", &force, + N_("force removing even if the worktree is dirty")), + OPT_END() + }; + struct worktree **worktrees, *wt; + struct strbuf errmsg = STRBUF_INIT; + const char *reason; + int ret = 0; + + ac = parse_options(ac, av, prefix, options, worktree_usage, 0); + if (ac != 1) + usage_with_options(worktree_usage, options); + + worktrees = get_worktrees(0); + wt = find_worktree(worktrees, prefix, av[0]); + if (!wt) + die(_("'%s' is not a working tree"), av[0]); + if (is_main_worktree(wt)) + die(_("'%s' is a main working tree"), av[0]); + reason = is_worktree_locked(wt); + if (reason) { + if (*reason) + die(_("cannot remove a locked working tree, lock reason: %s"), + reason); + die(_("cannot remove a locked working tree")); + } + if (validate_worktree(wt, &errmsg, WT_VALIDATE_WORKTREE_MISSING_OK)) + die(_("validation failed, cannot remove working tree: %s"), + errmsg.buf); + strbuf_release(&errmsg); + + if (file_exists(wt->path)) { + if (!force) + check_clean_worktree(wt, av[0]); + + ret |= delete_git_work_tree(wt); + } + /* + * continue on even if ret is non-zero, there's no going back + * from here. + */ + ret |= delete_git_dir(wt); + + free_worktrees(worktrees); + return ret; +} + int cmd_worktree(int ac, const char **av, const char *prefix) { struct option options[] = { @@ -627,5 +859,9 @@ int cmd_worktree(int ac, const char **av, const char *prefix) return lock_worktree(ac - 1, av + 1, prefix); if (!strcmp(av[1], "unlock")) return unlock_worktree(ac - 1, av + 1, prefix); + if (!strcmp(av[1], "move")) + return move_worktree(ac - 1, av + 1, prefix); + if (!strcmp(av[1], "remove")) + return remove_worktree(ac - 1, av + 1, prefix); usage_with_options(worktree_usage, options); } diff --git a/bulk-checkin.c b/bulk-checkin.c index 3310fd210a..9d87eac07b 100644 --- a/bulk-checkin.c +++ b/bulk-checkin.c @@ -12,7 +12,7 @@ static struct bulk_checkin_state { unsigned plugged:1; char *pack_tmp_name; - struct sha1file *f; + struct hashfile *f; off_t offset; struct pack_idx_option pack_idx_opts; @@ -35,9 +35,9 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state) unlink(state->pack_tmp_name); goto clear_exit; } else if (state->nr_written == 1) { - sha1close(state->f, oid.hash, CSUM_FSYNC); + hashclose(state->f, oid.hash, CSUM_FSYNC); } else { - int fd = sha1close(state->f, oid.hash, 0); + int fd = hashclose(state->f, oid.hash, 0); fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name, state->nr_written, oid.hash, state->offset); @@ -93,7 +93,7 @@ static int already_written(struct bulk_checkin_state *state, unsigned char sha1[ * with a new pack. */ static int stream_to_pack(struct bulk_checkin_state *state, - git_SHA_CTX *ctx, off_t *already_hashed_to, + git_hash_ctx *ctx, off_t *already_hashed_to, int fd, size_t size, enum object_type type, const char *path, unsigned flags) { @@ -127,7 +127,7 @@ static int stream_to_pack(struct bulk_checkin_state *state, if (rsize < hsize) hsize = rsize; if (hsize) - git_SHA1_Update(ctx, ibuf, hsize); + the_hash_algo->update_fn(ctx, ibuf, hsize); *already_hashed_to = offset; } s.next_in = ibuf; @@ -149,7 +149,7 @@ static int stream_to_pack(struct bulk_checkin_state *state, return -1; } - sha1write(state->f, obuf, written); + hashwrite(state->f, obuf, written); state->offset += written; } s.next_out = obuf; @@ -192,10 +192,10 @@ static int deflate_to_pack(struct bulk_checkin_state *state, unsigned flags) { off_t seekback, already_hashed_to; - git_SHA_CTX ctx; + git_hash_ctx ctx; unsigned char obuf[16384]; unsigned header_len; - struct sha1file_checkpoint checkpoint; + struct hashfile_checkpoint checkpoint; struct pack_idx_entry *idx = NULL; seekback = lseek(fd, 0, SEEK_CUR); @@ -203,9 +203,9 @@ static int deflate_to_pack(struct bulk_checkin_state *state, return error("cannot find the current offset"); header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX, - typename(type), (uintmax_t)size) + 1; - git_SHA1_Init(&ctx); - git_SHA1_Update(&ctx, obuf, header_len); + type_name(type), (uintmax_t)size) + 1; + the_hash_algo->init_fn(&ctx); + the_hash_algo->update_fn(&ctx, obuf, header_len); /* Note: idx is non-NULL when we are writing */ if ((flags & HASH_WRITE_OBJECT) != 0) @@ -216,7 +216,7 @@ static int deflate_to_pack(struct bulk_checkin_state *state, while (1) { prepare_to_stream(state, flags); if (idx) { - sha1file_checkpoint(state->f, &checkpoint); + hashfile_checkpoint(state->f, &checkpoint); idx->offset = state->offset; crc32_begin(state->f); } @@ -230,19 +230,19 @@ static int deflate_to_pack(struct bulk_checkin_state *state, */ if (!idx) die("BUG: should not happen"); - sha1file_truncate(state->f, &checkpoint); + hashfile_truncate(state->f, &checkpoint); state->offset = checkpoint.offset; finish_bulk_checkin(state); if (lseek(fd, seekback, SEEK_SET) == (off_t) -1) return error("cannot seek back"); } - git_SHA1_Final(result_sha1, &ctx); + the_hash_algo->final_fn(result_sha1, &ctx); if (!idx) return 0; idx->crc32 = crc32_end(state->f); if (already_written(state, result_sha1)) { - sha1file_truncate(state->f, &checkpoint); + hashfile_truncate(state->f, &checkpoint); state->offset = checkpoint.offset; free(idx); } else { diff --git a/cache-tree.c b/cache-tree.c index 0dd6292a94..c52e4303df 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -84,9 +84,8 @@ static struct cache_tree_sub *find_subtree(struct cache_tree *it, down->namelen = pathlen; if (pos < it->subtree_nr) - memmove(it->down + pos + 1, - it->down + pos, - sizeof(down) * (it->subtree_nr - pos - 1)); + MOVE_ARRAY(it->down + pos + 1, it->down + pos, + it->subtree_nr - pos - 1); it->down[pos] = down; return down; } @@ -400,16 +399,16 @@ static int update_one(struct cache_tree *it, } if (repair) { - unsigned char sha1[20]; - hash_sha1_file(buffer.buf, buffer.len, tree_type, sha1); - if (has_sha1_file(sha1)) - hashcpy(it->oid.hash, sha1); + struct object_id oid; + hash_object_file(buffer.buf, buffer.len, tree_type, &oid); + if (has_sha1_file(oid.hash)) + oidcpy(&it->oid, &oid); else to_invalidate = 1; - } else if (dryrun) - hash_sha1_file(buffer.buf, buffer.len, tree_type, - it->oid.hash); - else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->oid.hash)) { + } else if (dryrun) { + hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid); + } else if (write_object_file(buffer.buf, buffer.len, tree_type, + &it->oid)) { strbuf_release(&buffer); return -1; } @@ -4,7 +4,7 @@ #include "git-compat-util.h" #include "strbuf.h" #include "hashmap.h" -#include "mru.h" +#include "list.h" #include "advice.h" #include "gettext.h" #include "convert.h" @@ -16,31 +16,6 @@ #include "sha1-array.h" #include "repository.h" -#ifndef platform_SHA_CTX -/* - * platform's underlying implementation of SHA-1; could be OpenSSL, - * blk_SHA, Apple CommonCrypto, etc... Note that including - * SHA1_HEADER may have already defined platform_SHA_CTX for our - * own implementations like block-sha1 and ppc-sha1, so we list - * the default for OpenSSL compatible SHA-1 implementations here. - */ -#define platform_SHA_CTX SHA_CTX -#define platform_SHA1_Init SHA1_Init -#define platform_SHA1_Update SHA1_Update -#define platform_SHA1_Final SHA1_Final -#endif - -#define git_SHA_CTX platform_SHA_CTX -#define git_SHA1_Init platform_SHA1_Init -#define git_SHA1_Update platform_SHA1_Update -#define git_SHA1_Final platform_SHA1_Final - -#ifdef SHA1_MAX_BLOCK_SIZE -#include "compat/sha1-chunked.h" -#undef git_SHA1_Update -#define git_SHA1_Update git_SHA1_Update_Chunked -#endif - #include <zlib.h> typedef struct git_zstream { z_stream z; @@ -624,6 +599,7 @@ extern int read_index_unmerged(struct index_state *); /* For use with `write_locked_index()`. */ #define COMMIT_LOCK (1 << 0) +#define SKIP_IF_UNCHANGED (1 << 1) /* * Write the index while holding an already-taken lock. Close the lock, @@ -640,6 +616,9 @@ extern int read_index_unmerged(struct index_state *); * With `COMMIT_LOCK`, the lock is always committed or rolled back. * Without it, the lock is closed, but neither committed nor rolled * back. + * + * If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing + * is written (and the lock is rolled back if `COMMIT_LOCK` is given). */ extern int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags); @@ -916,10 +895,13 @@ extern int grafts_replace_parents; #define GIT_REPO_VERSION 0 #define GIT_REPO_VERSION_READ 1 extern int repository_format_precious_objects; +extern char *repository_format_partial_clone; +extern const char *core_partial_clone_filter_default; struct repository_format { int version; int precious_objects; + char *partial_clone; /* value of extensions.partialclone */ int is_bare; int hash_algo; char *work_tree; @@ -959,12 +941,10 @@ extern void check_repository_format(void); #define TYPE_CHANGED 0x0040 /* - * Return the name of the file in the local object database that would - * be used to store a loose object with the specified sha1. The - * return value is a pointer to a statically allocated buffer that is - * overwritten each time the function is called. + * Put in `buf` the name of the file in the local object database that + * would be used to store a loose object with the specified sha1. */ -extern const char *sha1_file_name(const unsigned char *sha1); +extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1); /* * Return an abbreviated sha1 unique within this repository's object database. @@ -1031,7 +1011,7 @@ static inline void hashclr(unsigned char *hash) static inline void oidclr(struct object_id *oid) { - hashclr(oid->hash); + memset(oid->hash, 0, GIT_MAX_RAWSZ); } @@ -1049,8 +1029,6 @@ extern const struct object_id empty_tree_oid; "\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \ "\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91" extern const struct object_id empty_blob_oid; -#define EMPTY_BLOB_SHA1_BIN (empty_blob_oid.hash) - static inline int is_empty_blob_sha1(const unsigned char *sha1) { @@ -1240,11 +1218,22 @@ static inline const unsigned char *lookup_replace_object(const unsigned char *sh /* Read and unpack a sha1 file into memory, write memory to a sha1 file */ extern int sha1_object_info(const unsigned char *, unsigned long *); -extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1); -extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1); -extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags); -extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *); -extern int force_object_loose(const unsigned char *sha1, time_t mtime); + +extern int hash_object_file(const void *buf, unsigned long len, + const char *type, struct object_id *oid); + +extern int write_object_file(const void *buf, unsigned long len, + const char *type, struct object_id *oid); + +extern int hash_object_file_literally(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + unsigned flags); + +extern int pretend_object_file(void *, unsigned long, enum object_type, + struct object_id *oid); + +extern int force_object_loose(const struct object_id *oid, time_t mtime); + extern int git_open_cloexec(const char *name, int flags); #define git_open(name) git_open_cloexec(name, O_RDONLY) extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size); @@ -1637,6 +1626,7 @@ struct pack_window { extern struct packed_git { struct packed_git *next; + struct list_head mru; struct pack_window *windows; off_t pack_size; const void *index_data; @@ -1650,7 +1640,8 @@ extern struct packed_git { unsigned pack_local:1, pack_keep:1, freshened:1, - do_not_close:1; + do_not_close:1, + pack_promisor:1; unsigned char sha1[20]; struct revindex_entry *revindex; /* something like ".git/objects/pack/xxxxx.pack" */ @@ -1658,10 +1649,9 @@ extern struct packed_git { } *packed_git; /* - * A most-recently-used ordered version of the packed_git list, which can - * be iterated instead of packed_git (and marked via mru_mark). + * A most-recently-used ordered version of the packed_git list. */ -extern struct mru packed_git_mru; +extern struct list_head packed_git_mru; struct pack_entry { off_t offset; @@ -1675,7 +1665,7 @@ struct pack_entry { * usual "XXXXXX" trailer, and the resulting filename is written into the * "template" buffer. Returns the open descriptor. */ -extern int odb_mkstemp(struct strbuf *template, const char *pattern); +extern int odb_mkstemp(struct strbuf *temp_filename, const char *pattern); /* * Create a pack .keep file named "name" (which should generally be the output @@ -1746,7 +1736,7 @@ struct object_info { unsigned long *sizep; off_t *disk_sizep; unsigned char *delta_base_sha1; - struct strbuf *typename; + struct strbuf *type_name; void **contentp; /* Response */ @@ -1789,6 +1779,14 @@ struct object_info { #define OBJECT_INFO_QUICK 8 extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags); +/* + * Set this to 0 to prevent sha1_object_info_extended() from fetching missing + * blobs. This has a difference only if extensions.partialClone is set. + * + * Its default value is 1. + */ +extern int fetch_if_missing; + /* Dumb servers support */ extern int update_server_info(int); diff --git a/ci/lib-travisci.sh b/ci/lib-travisci.sh index 07f27c7270..109ef280da 100755 --- a/ci/lib-travisci.sh +++ b/ci/lib-travisci.sh @@ -21,8 +21,6 @@ skip_branch_tip_with_tag () { fi } -good_trees_file="$HOME/travis-cache/good-trees" - # Save some info about the current commit's tree, so we can skip the build # job if we encounter the same tree again and can provide a useful info # message. @@ -83,7 +81,10 @@ check_unignored_build_artifacts () # and installing dependencies. set -ex -mkdir -p "$HOME/travis-cache" +cache_dir="$HOME/travis-cache" +good_trees_file="$cache_dir/good-trees" + +mkdir -p "$cache_dir" skip_branch_tip_with_tag skip_good_tree @@ -96,7 +97,7 @@ fi export DEVELOPER=1 export DEFAULT_TEST_TARGET=prove export GIT_PROVE_OPTS="--timer --jobs 3 --state=failed,slow,save" -export GIT_TEST_OPTS="--verbose-log" +export GIT_TEST_OPTS="--verbose-log -x" export GIT_TEST_CLONE_2GB=YesPlease case "$jobname" in diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh index 3e23e65f9e..3735ce413f 100755 --- a/ci/run-build-and-tests.sh +++ b/ci/run-build-and-tests.sh @@ -5,7 +5,7 @@ . ${0%/*}/lib-travisci.sh -ln -s $HOME/travis-cache/.prove t/.prove +ln -s "$cache_dir/.prove" t/.prove make --jobs=2 make --quiet test diff --git a/ci/run-linux32-build.sh b/ci/run-linux32-build.sh index c19c50c1c9..2c60d2e70a 100755 --- a/ci/run-linux32-build.sh +++ b/ci/run-linux32-build.sh @@ -3,31 +3,58 @@ # Build and test Git in a 32-bit environment # # Usage: -# run-linux32-build.sh [host-user-id] +# run-linux32-build.sh <host-user-id> # -set -x +set -ex + +if test $# -ne 1 || test -z "$1" +then + echo >&2 "usage: run-linux32-build.sh <host-user-id>" + exit 1 +fi # Update packages to the latest available versions linux32 --32bit i386 sh -c ' apt update >/dev/null && apt install -y build-essential libcurl4-openssl-dev libssl-dev \ libexpat-dev gettext python >/dev/null -' && +' # If this script runs inside a docker container, then all commands are # usually executed as root. Consequently, the host user might not be # able to access the test output files. -# If a host user id is given, then create a user "ci" with the host user -# id to make everything accessible to the host user. -HOST_UID=$1 && -CI_USER=$USER && -test -z $HOST_UID || (CI_USER="ci" && useradd -u $HOST_UID $CI_USER) && +# If a non 0 host user id is given, then create a user "ci" with that +# user id to make everything accessible to the host user. +HOST_UID=$1 +if test $HOST_UID -eq 0 +then + # Just in case someone does want to run the test suite as root. + CI_USER=root +else + CI_USER=ci + if test "$(id -u $CI_USER 2>/dev/null)" = $HOST_UID + then + echo "user '$CI_USER' already exists with the requested ID $HOST_UID" + else + useradd -u $HOST_UID $CI_USER + fi + + # Due to a bug the test suite was run as root in the past, so + # a prove state file created back then is only accessible by + # root. Now that bug is fixed, the test suite is run as a + # regular user, but the prove state file coming from Travis + # CI's cache might still be owned by root. + # Make sure that this user has rights to any cached files, + # including an existing prove state file. + test -n "$cache_dir" && chown -R $HOST_UID:$HOST_UID "$cache_dir" +fi # Build and test linux32 --32bit i386 su -m -l $CI_USER -c ' - cd /usr/src/git && - ln -s /tmp/travis-cache/.prove t/.prove && - make --jobs=2 && - make --quiet test + set -ex + cd /usr/src/git + test -n "$cache_dir" && ln -s "$cache_dir/.prove" t/.prove + make --jobs=2 + make --quiet test ' diff --git a/ci/run-linux32-docker.sh b/ci/run-linux32-docker.sh index 4f191c5bb1..21637903ce 100755 --- a/ci/run-linux32-docker.sh +++ b/ci/run-linux32-docker.sh @@ -9,7 +9,9 @@ docker pull daald/ubuntu32:xenial # Use the following command to debug the docker build locally: # $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/bash daald/ubuntu32:xenial -# root@container:/# /usr/src/git/ci/run-linux32-build.sh +# root@container:/# /usr/src/git/ci/run-linux32-build.sh <host-user-id> + +container_cache_dir=/tmp/travis-cache docker run \ --interactive \ @@ -18,8 +20,9 @@ docker run \ --env GIT_PROVE_OPTS \ --env GIT_TEST_OPTS \ --env GIT_TEST_CLONE_2GB \ + --env cache_dir="$container_cache_dir" \ --volume "${PWD}:/usr/src/git" \ - --volume "${HOME}/travis-cache:/tmp/travis-cache" \ + --volume "$cache_dir:$container_cache_dir" \ daald/ubuntu32:xenial \ /usr/src/git/ci/run-linux32-build.sh $(id -u $USER) @@ -161,11 +161,6 @@ int color_parse(const char *value, char *dst) return color_parse_mem(value, strlen(value), dst); } -void color_set(char *dst, const char *color_bytes) -{ - xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes); -} - /* * Write the ANSI color codes for "c" to "out"; the string should * already have the ANSI escape code in it. "out" should have enough @@ -399,8 +394,6 @@ static int color_vfprintf(FILE *fp, const char *color, const char *fmt, return r; } - - int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) { va_list args; @@ -76,22 +76,46 @@ int git_color_config(const char *var, const char *value, void *cb); int git_color_default_config(const char *var, const char *value, void *cb); /* - * Set the color buffer (which must be COLOR_MAXLEN bytes) - * to the raw color bytes; this is useful for initializing - * default color variables. + * Parse a config option, which can be a boolean or one of + * "never", "auto", "always". Return a constant of + * GIT_COLOR_NEVER for "never" or negative boolean, + * GIT_COLOR_ALWAYS for "always" or a positive boolean, + * and GIT_COLOR_AUTO for "auto". */ -void color_set(char *dst, const char *color_bytes); - int git_config_colorbool(const char *var, const char *value); + +/* + * Return a boolean whether to use color, where the argument 'var' is + * one of GIT_COLOR_UNKNOWN, GIT_COLOR_NEVER, GIT_COLOR_ALWAYS, GIT_COLOR_AUTO. + */ int want_color(int var); + +/* + * Translate a Git color from 'value' into a string that the terminal can + * interpret and store it into 'dst'. The Git color values are of the form + * "foreground [background] [attr]" where fore- and background can be a color + * name ("red"), a RGB code (#0xFF0000) or a 256-color-mode from the terminal. + */ int color_parse(const char *value, char *dst); int color_parse_mem(const char *value, int len, char *dst); + +/* + * Output the formatted string in the specified color (and then reset to normal + * color so subsequent output is uncolored). Omits the color encapsulation if + * `color` is NULL. The `color_fprintf_ln` prints a new line after resetting + * the color. The `color_print_strbuf` prints the contents of the given + * strbuf (BUG: but only up to its first NUL character). + */ __attribute__((format (printf, 3, 4))) int color_fprintf(FILE *fp, const char *color, const char *fmt, ...); __attribute__((format (printf, 3, 4))) int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); void color_print_strbuf(FILE *fp, const char *color, const struct strbuf *sb); +/* + * Check if the given color is GIT_COLOR_NIL that means "no color selected". + * The caller needs to replace the color with the actual desired color. + */ int color_is_nil(const char *color); #endif /* COLOR_H */ diff --git a/combine-diff.c b/combine-diff.c index 2505de119a..1ec9af1f81 100644 --- a/combine-diff.c +++ b/combine-diff.c @@ -162,7 +162,7 @@ enum coalesce_direction { MATCH, BASE, NEW }; /* Coalesce new lines into base by finding LCS */ static struct lline *coalesce_lines(struct lline *base, int *lenbase, - struct lline *new, int lennew, + struct lline *newline, int lennew, unsigned long parent, long flags) { int **lcs; @@ -170,12 +170,12 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase, struct lline *baseend, *newend = NULL; int i, j, origbaselen = *lenbase; - if (new == NULL) + if (newline == NULL) return base; if (base == NULL) { *lenbase = lennew; - return new; + return newline; } /* @@ -200,7 +200,7 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase, directions[0][j] = NEW; for (i = 1, baseend = base; i < origbaselen + 1; i++) { - for (j = 1, newend = new; j < lennew + 1; j++) { + for (j = 1, newend = newline; j < lennew + 1; j++) { if (match_string_spaces(baseend->line, baseend->len, newend->line, newend->len, flags)) { lcs[i][j] = lcs[i - 1][j - 1] + 1; @@ -241,7 +241,7 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase, if (lline->prev) lline->prev->next = lline->next; else - new = lline->next; + newline = lline->next; if (lline->next) lline->next->prev = lline->prev; @@ -270,7 +270,7 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase, } } - newend = new; + newend = newline; while (newend) { struct lline *lline = newend; newend = newend->next; @@ -1053,7 +1053,7 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent, if (is_file) { struct strbuf buf = STRBUF_INIT; - if (convert_to_git(&the_index, elem->path, result, len, &buf, safe_crlf)) { + if (convert_to_git(&the_index, elem->path, result, len, &buf, global_conv_flags_eol)) { free(result); result = strbuf_detach(&buf, &len); result_size = len; @@ -1438,7 +1438,7 @@ void diff_tree_combined(const struct object_id *oid, opt->flags.follow_renames || opt->break_opt != -1 || opt->detect_rename || - opt->pickaxe || + (opt->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) || opt->filter; @@ -126,10 +126,8 @@ int register_commit_graft(struct commit_graft *graft, int ignore_dups) ALLOC_GROW(commit_graft, commit_graft_nr + 1, commit_graft_alloc); commit_graft_nr++; if (pos < commit_graft_nr) - memmove(commit_graft + pos + 1, - commit_graft + pos, - (commit_graft_nr - pos - 1) * - sizeof(*commit_graft)); + MOVE_ARRAY(commit_graft + pos + 1, commit_graft + pos, + commit_graft_nr - pos - 1); commit_graft[pos] = graft; return 0; } @@ -274,7 +272,7 @@ const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep) oid_to_hex(&commit->object.oid)); if (type != OBJ_COMMIT) die("expected commit for %s, got %s", - oid_to_hex(&commit->object.oid), typename(type)); + oid_to_hex(&commit->object.oid), type_name(type)); if (sizep) *sizep = size; } @@ -861,19 +859,19 @@ struct commit_list *get_octopus_merge_bases(struct commit_list *in) commit_list_insert(in->item, &ret); for (i = in->next; i; i = i->next) { - struct commit_list *new = NULL, *end = NULL; + struct commit_list *new_commits = NULL, *end = NULL; for (j = ret; j; j = j->next) { struct commit_list *bases; bases = get_merge_bases(i->item, j->item); - if (!new) - new = bases; + if (!new_commits) + new_commits = bases; else end->next = bases; for (k = bases; k; k = k->next) end = k; } - ret = new; + ret = new_commits; } return ret; } @@ -1380,9 +1378,8 @@ void free_commit_extra_headers(struct commit_extra_header *extra) } } -int commit_tree(const char *msg, size_t msg_len, - const unsigned char *tree, - struct commit_list *parents, unsigned char *ret, +int commit_tree(const char *msg, size_t msg_len, const struct object_id *tree, + struct commit_list *parents, struct object_id *ret, const char *author, const char *sign_commit) { struct commit_extra_header *extra = NULL, **tail = &extra; @@ -1511,8 +1508,8 @@ N_("Warning: commit message did not conform to UTF-8.\n" "variable i18n.commitencoding to the encoding your project uses.\n"); int commit_tree_extended(const char *msg, size_t msg_len, - const unsigned char *tree, - struct commit_list *parents, unsigned char *ret, + const struct object_id *tree, + struct commit_list *parents, struct object_id *ret, const char *author, const char *sign_commit, struct commit_extra_header *extra) { @@ -1520,7 +1517,7 @@ int commit_tree_extended(const char *msg, size_t msg_len, int encoding_is_utf8; struct strbuf buffer; - assert_sha1_type(tree, OBJ_TREE); + assert_sha1_type(tree->hash, OBJ_TREE); if (memchr(msg, '\0', msg_len)) return error("a NUL byte in commit log message not allowed."); @@ -1529,7 +1526,7 @@ int commit_tree_extended(const char *msg, size_t msg_len, encoding_is_utf8 = is_encoding_utf8(git_commit_encoding); strbuf_init(&buffer, 8192); /* should avoid reallocs for the headers */ - strbuf_addf(&buffer, "tree %s\n", sha1_to_hex(tree)); + strbuf_addf(&buffer, "tree %s\n", oid_to_hex(tree)); /* * NOTE! This ordering means that the same exact tree merged with a @@ -1568,7 +1565,7 @@ int commit_tree_extended(const char *msg, size_t msg_len, goto out; } - result = write_sha1_file(buffer.buf, buffer.len, commit_type, ret); + result = write_object_file(buffer.buf, buffer.len, commit_type, ret); out: strbuf_release(&buffer); return result; @@ -1617,11 +1614,11 @@ struct commit *get_merge_parent(const char *name) struct commit_list **commit_list_append(struct commit *commit, struct commit_list **next) { - struct commit_list *new = xmalloc(sizeof(struct commit_list)); - new->item = commit; - *next = new; - new->next = NULL; - return &new->next; + struct commit_list *new_commit = xmalloc(sizeof(struct commit_list)); + new_commit->item = commit; + *next = new_commit; + new_commit->next = NULL; + return &new_commit->next; } const char *find_commit_header(const char *msg, const char *key, size_t *out_len) @@ -262,14 +262,15 @@ extern void append_merge_tag_headers(struct commit_list *parents, struct commit_extra_header ***tail); extern int commit_tree(const char *msg, size_t msg_len, - const unsigned char *tree, - struct commit_list *parents, unsigned char *ret, + const struct object_id *tree, + struct commit_list *parents, struct object_id *ret, const char *author, const char *sign_commit); extern int commit_tree_extended(const char *msg, size_t msg_len, - const unsigned char *tree, - struct commit_list *parents, unsigned char *ret, - const char *author, const char *sign_commit, + const struct object_id *tree, + struct commit_list *parents, + struct object_id *ret, const char *author, + const char *sign_commit, struct commit_extra_header *); extern struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **); diff --git a/compat/mingw.c b/compat/mingw.c index 2d44d21aca..a67872babf 100644 --- a/compat/mingw.c +++ b/compat/mingw.c @@ -761,6 +761,17 @@ revert_attrs: return rc; } +#undef strftime +size_t mingw_strftime(char *s, size_t max, + const char *format, const struct tm *tm) +{ + size_t ret = strftime(s, max, format, tm); + + if (!ret && errno == EINVAL) + die("invalid strftime format: '%s'", format); + return ret; +} + unsigned int sleep (unsigned int seconds) { Sleep(seconds*1000); diff --git a/compat/mingw.h b/compat/mingw.h index e03aecfe2e..571019d0bd 100644 --- a/compat/mingw.h +++ b/compat/mingw.h @@ -361,6 +361,9 @@ int mingw_fstat(int fd, struct stat *buf); int mingw_utime(const char *file_name, const struct utimbuf *times); #define utime mingw_utime +size_t mingw_strftime(char *s, size_t max, + const char *format, const struct tm *tm); +#define strftime mingw_strftime pid_t mingw_spawnvpe(const char *cmd, const char **argv, char **env, const char *dir, @@ -1149,11 +1149,14 @@ static int git_default_core_config(const char *var, const char *value) } if (!strcmp(var, "core.safecrlf")) { + int eol_rndtrp_die; if (value && !strcasecmp(value, "warn")) { - safe_crlf = SAFE_CRLF_WARN; + global_conv_flags_eol = CONV_EOL_RNDTRP_WARN; return 0; } - safe_crlf = git_config_bool(var, value); + eol_rndtrp_die = git_config_bool(var, value); + global_conv_flags_eol = eol_rndtrp_die ? + CONV_EOL_RNDTRP_DIE : CONV_EOL_RNDTRP_WARN; return 0; } @@ -1251,6 +1254,11 @@ static int git_default_core_config(const char *var, const char *value) return 0; } + if (!strcmp(var, "core.partialclonefilter")) { + return git_config_string(&core_partial_clone_filter_default, + var, value); + } + /* Add other config variables here and to Documentation/config.txt. */ return 0; } diff --git a/connected.c b/connected.c index 4a47f33270..91feb78815 100644 --- a/connected.c +++ b/connected.c @@ -56,6 +56,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data, argv_array_push(&rev_list.args,"rev-list"); argv_array_push(&rev_list.args, "--objects"); argv_array_push(&rev_list.args, "--stdin"); + if (repository_format_partial_clone) + argv_array_push(&rev_list.args, "--exclude-promisor-objects"); argv_array_push(&rev_list.args, "--not"); argv_array_push(&rev_list.args, "--all"); argv_array_push(&rev_list.args, "--quiet"); diff --git a/contrib/coccinelle/strbuf.cocci b/contrib/coccinelle/strbuf.cocci index ce2e92c6e9..e34eada1ad 100644 --- a/contrib/coccinelle/strbuf.cocci +++ b/contrib/coccinelle/strbuf.cocci @@ -14,8 +14,9 @@ constant fmt !~ "%"; @@ expression E1, E2; +format F =~ "s"; @@ -- strbuf_addf(E1, "%s", E2); +- strbuf_addf(E1, "%@F@", E2); + strbuf_addstr(E1, E2); @@ diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash index 88813e9124..c7957f0a90 100644 --- a/contrib/completion/git-completion.bash +++ b/contrib/completion/git-completion.bash @@ -280,6 +280,39 @@ __gitcomp () esac } +# This function is equivalent to +# +# __gitcomp "$(git xxx --git-completion-helper) ..." +# +# except that the output is cached. Accept 1-3 arguments: +# 1: the git command to execute, this is also the cache key +# 2: extra options to be added on top (e.g. negative forms) +# 3: options to be excluded +__gitcomp_builtin () +{ + # spaces must be replaced with underscore for multi-word + # commands, e.g. "git remote add" becomes remote_add. + local cmd="$1" + local incl="$2" + local excl="$3" + + local var=__gitcomp_builtin_"${cmd/-/_}" + local options + eval "options=\$$var" + + if [ -z "$options" ]; then + # leading and trailing spaces are significant to make + # option removal work correctly. + options=" $(__git ${cmd/_/ } --git-completion-helper) $incl " + for i in $excl; do + options="${options/ $i / }" + done + eval "$var=\"$options\"" + fi + + __gitcomp "$options" +} + # Variation of __gitcomp_nl () that appends to the existing list of # completion candidates, COMPREPLY. __gitcomp_nl_append () @@ -439,7 +472,7 @@ __git_refs () track="" ;; *) - for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD; do + for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD; do case "$i" in $match*) if [ -e "$dir/$i" ]; then @@ -1072,12 +1105,13 @@ __git_count_arguments () } __git_whitespacelist="nowarn warn error error-all fix" +__git_am_inprogress_options="--skip --continue --resolved --abort --quit --show-current-patch" _git_am () { __git_find_repo_path if [ -d "$__git_repo_path"/rebase-apply ]; then - __gitcomp "--skip --continue --resolved --abort" + __gitcomp "$__git_am_inprogress_options" return fi case "$cur" in @@ -1086,12 +1120,8 @@ _git_am () return ;; --*) - __gitcomp " - --3way --committer-date-is-author-date --ignore-date - --ignore-whitespace --ignore-space-change - --interactive --keep --no-utf8 --signoff --utf8 - --whitespace= --scissors - " + __gitcomp_builtin am "--no-utf8" \ + "$__git_am_inprogress_options" return esac } @@ -1104,14 +1134,7 @@ _git_apply () return ;; --*) - __gitcomp " - --stat --numstat --summary --check --index - --cached --index-info --reverse --reject --unidiff-zero - --apply --no-add --exclude= - --ignore-whitespace --ignore-space-change - --whitespace= --inaccurate-eof --verbose - --recount --directory= - " + __gitcomp_builtin apply return esac } @@ -1120,10 +1143,7 @@ _git_add () { case "$cur" in --*) - __gitcomp " - --interactive --refresh --patch --update --dry-run - --ignore-errors --intent-to-add --force --edit --chmod= - " + __gitcomp_builtin add return esac @@ -1200,12 +1220,8 @@ _git_branch () __git_complete_refs --cur="${cur##--set-upstream-to=}" ;; --*) - __gitcomp " - --color --no-color --verbose --abbrev= --no-abbrev - --track --no-track --contains --no-contains --merged --no-merged - --set-upstream-to= --edit-description --list - --unset-upstream --delete --move --copy --remotes - --column --no-column --sort= --points-at + __gitcomp_builtin branch "--no-color --no-abbrev + --no-track --no-column " ;; *) @@ -1247,11 +1263,7 @@ _git_checkout () __gitcomp "diff3 merge" "" "${cur##--conflict=}" ;; --*) - __gitcomp " - --quiet --ours --theirs --track --no-track --merge - --conflict= --orphan --patch --detach --ignore-skip-worktree-bits - --recurse-submodules --no-recurse-submodules - " + __gitcomp_builtin checkout "--no-track --no-recurse-submodules" ;; *) # check if --track, --no-track, or --no-guess was specified @@ -1271,16 +1283,19 @@ _git_cherry () __git_complete_refs } +__git_cherry_pick_inprogress_options="--continue --quit --abort" + _git_cherry_pick () { __git_find_repo_path if [ -f "$__git_repo_path"/CHERRY_PICK_HEAD ]; then - __gitcomp "--continue --quit --abort" + __gitcomp "$__git_cherry_pick_inprogress_options" return fi case "$cur" in --*) - __gitcomp "--edit --no-commit --signoff --strategy= --mainline" + __gitcomp_builtin cherry-pick "" \ + "$__git_cherry_pick_inprogress_options" ;; *) __git_complete_refs @@ -1292,7 +1307,7 @@ _git_clean () { case "$cur" in --*) - __gitcomp "--dry-run --quiet" + __gitcomp_builtin clean return ;; esac @@ -1305,26 +1320,7 @@ _git_clone () { case "$cur" in --*) - __gitcomp " - --local - --no-hardlinks - --shared - --reference - --quiet - --no-checkout - --bare - --mirror - --origin - --upload-pack - --template= - --depth - --single-branch - --no-tags - --branch - --recurse-submodules - --no-single-branch - --shallow-submodules - " + __gitcomp_builtin clone "--no-single-branch" return ;; esac @@ -1357,16 +1353,7 @@ _git_commit () return ;; --*) - __gitcomp " - --all --author= --signoff --verify --no-verify - --edit --no-edit - --amend --include --only --interactive - --dry-run --reuse-message= --reedit-message= - --reset-author --file= --message= --template= - --cleanup= --untracked-files --untracked-files= - --verbose --quiet --fixup= --squash= - --patch --short --date --allow-empty - " + __gitcomp_builtin commit "--no-edit --verify" return esac @@ -1382,11 +1369,7 @@ _git_describe () { case "$cur" in --*) - __gitcomp " - --all --tags --contains --abbrev= --candidates= - --exact-match --debug --long --match --always --first-parent - --exclude --dirty --broken - " + __gitcomp_builtin describe return esac __git_complete_refs @@ -1411,7 +1394,7 @@ __git_diff_common_options="--stat --numstat --shortstat --summary --dirstat --dirstat= --dirstat-by-file --dirstat-by-file= --cumulative --diff-algorithm= - --submodule --submodule= + --submodule --submodule= --ignore-submodules " _git_diff () @@ -1452,11 +1435,11 @@ _git_difftool () return ;; --*) - __gitcomp "--cached --staged --pickaxe-all --pickaxe-regex - --base --ours --theirs - --no-renames --diff-filter= --find-copies-harder - --relative --ignore-submodules - --tool=" + __gitcomp_builtin difftool "$__git_diff_common_options + --base --cached --ours --theirs + --pickaxe-all --pickaxe-regex + --relative --staged + " return ;; esac @@ -1465,12 +1448,6 @@ _git_difftool () __git_fetch_recurse_submodules="yes on-demand no" -__git_fetch_options=" - --quiet --verbose --append --upload-pack --force --keep --depth= - --tags --no-tags --all --prune --dry-run --recurse-submodules= - --unshallow --update-shallow -" - _git_fetch () { case "$cur" in @@ -1479,7 +1456,7 @@ _git_fetch () return ;; --*) - __gitcomp "$__git_fetch_options" + __gitcomp_builtin fetch "--no-tags" return ;; esac @@ -1516,10 +1493,7 @@ _git_fsck () { case "$cur" in --*) - __gitcomp " - --tags --root --unreachable --cache --no-reflogs --full - --strict --verbose --lost-found --name-objects - " + __gitcomp_builtin fsck "--no-reflogs" return ;; esac @@ -1529,7 +1503,7 @@ _git_gc () { case "$cur" in --*) - __gitcomp "--prune --aggressive" + __gitcomp_builtin gc return ;; esac @@ -1585,21 +1559,7 @@ _git_grep () case "$cur" in --*) - __gitcomp " - --cached - --text --ignore-case --word-regexp --invert-match - --full-name --line-number - --extended-regexp --basic-regexp --fixed-strings - --perl-regexp - --threads - --files-with-matches --name-only - --files-without-match - --max-depth - --count - --and --or --not --all-match - --break --heading --show-function --function-context - --untracked --no-index - " + __gitcomp_builtin grep return ;; esac @@ -1617,7 +1577,7 @@ _git_help () { case "$cur" in --*) - __gitcomp "--all --guides --info --man --web" + __gitcomp_builtin help return ;; esac @@ -1640,7 +1600,7 @@ _git_init () return ;; --*) - __gitcomp "--quiet --bare --template= --shared --shared=" + __gitcomp_builtin init return ;; esac @@ -1650,13 +1610,7 @@ _git_ls_files () { case "$cur" in --*) - __gitcomp "--cached --deleted --modified --others --ignored - --stage --directory --no-empty-directory --unmerged - --killed --exclude= --exclude-from= - --exclude-per-directory= --exclude-standard - --error-unmatch --with-tree= --full-name - --abbrev --ignored --exclude-per-directory - " + __gitcomp_builtin ls-files "--no-empty-directory" return ;; esac @@ -1670,7 +1624,7 @@ _git_ls_remote () { case "$cur" in --*) - __gitcomp "--heads --tags --refs --get-url --symref" + __gitcomp_builtin ls-remote return ;; esac @@ -1794,22 +1748,18 @@ _git_log () __git_complete_revlist } -# Common merge options shared by git-merge(1) and git-pull(1). -__git_merge_options=" - --no-commit --no-stat --log --no-log --squash --strategy - --commit --stat --no-squash --ff --no-ff --ff-only --edit --no-edit - --verify-signatures --no-verify-signatures --gpg-sign - --quiet --verbose --progress --no-progress -" - _git_merge () { __git_complete_strategy && return case "$cur" in --*) - __gitcomp "$__git_merge_options - --rerere-autoupdate --no-rerere-autoupdate --abort --continue" + __gitcomp_builtin merge "--no-rerere-autoupdate + --no-commit --no-edit --no-ff + --no-log --no-progress + --no-squash --no-stat + --no-verify-signatures + " return esac __git_complete_refs @@ -1833,7 +1783,7 @@ _git_merge_base () { case "$cur" in --*) - __gitcomp "--octopus --independent --is-ancestor --fork-point" + __gitcomp_builtin merge-base return ;; esac @@ -1844,7 +1794,7 @@ _git_mv () { case "$cur" in --*) - __gitcomp "--dry-run" + __gitcomp_builtin mv return ;; esac @@ -1860,17 +1810,17 @@ _git_mv () _git_name_rev () { - __gitcomp "--tags --all --stdin" + __gitcomp_builtin name-rev } _git_notes () { - local subcommands='add append copy edit list prune remove show' + local subcommands='add append copy edit get-ref list merge prune remove show' local subcommand="$(__git_find_on_cmdline "$subcommands")" case "$subcommand,$cur" in ,--*) - __gitcomp '--ref' + __gitcomp_builtin notes ;; ,*) case "$prev" in @@ -1882,21 +1832,14 @@ _git_notes () ;; esac ;; - add,--reuse-message=*|append,--reuse-message=*|\ - add,--reedit-message=*|append,--reedit-message=*) + *,--reuse-message=*|*,--reedit-message=*) __git_complete_refs --cur="${cur#*=}" ;; - add,--*|append,--*) - __gitcomp '--file= --message= --reedit-message= - --reuse-message=' + *,--*) + __gitcomp_builtin notes_$subcommand ;; - copy,--*) - __gitcomp '--stdin' - ;; - prune,--*) - __gitcomp '--dry-run --verbose' - ;; - prune,*) + prune,*|get-ref,*) + # this command does not take a ref, do not complete it ;; *) case "$prev" in @@ -1920,12 +1863,11 @@ _git_pull () return ;; --*) - __gitcomp " - --rebase --no-rebase - --autostash --no-autostash - $__git_merge_options - $__git_fetch_options - " + __gitcomp_builtin pull "--no-autostash --no-commit --no-edit + --no-ff --no-log --no-progress --no-rebase + --no-squash --no-stat --no-tags + --no-verify-signatures" + return ;; esac @@ -1976,12 +1918,7 @@ _git_push () return ;; --*) - __gitcomp " - --all --mirror --tags --dry-run --force --verbose - --quiet --prune --delete --follow-tags - --receive-pack= --repo= --set-upstream - --force-with-lease --force-with-lease= --recurse-submodules= - " + __gitcomp_builtin push return ;; esac @@ -1992,11 +1929,11 @@ _git_rebase () { __git_find_repo_path if [ -f "$__git_repo_path"/rebase-merge/interactive ]; then - __gitcomp "--continue --skip --abort --quit --edit-todo" + __gitcomp "--continue --skip --abort --quit --edit-todo --show-current-patch" return elif [ -d "$__git_repo_path"/rebase-apply ] || \ [ -d "$__git_repo_path"/rebase-merge ]; then - __gitcomp "--continue --skip --abort --quit" + __gitcomp "--continue --skip --abort --quit --show-current-patch" return fi __git_complete_strategy && return @@ -2016,6 +1953,7 @@ _git_rebase () --autostash --no-autostash --verify --no-verify --keep-empty --root --force-rebase --no-ff + --rerere-autoupdate --exec " @@ -2081,7 +2019,7 @@ _git_send_email () --compose --confirm= --dry-run --envelope-sender --from --identity --in-reply-to --no-chain-reply-to --no-signed-off-by-cc - --no-suppress-from --no-thread --quiet + --no-suppress-from --no-thread --quiet --reply-to --signed-off-by-cc --smtp-pass --smtp-server --smtp-server-port --smtp-encryption= --smtp-user --subject --suppress-cc= --suppress-from --thread --to @@ -2119,11 +2057,7 @@ _git_status () return ;; --*) - __gitcomp " - --short --branch --porcelain --long --verbose - --untracked-files= --ignore-submodules= --ignored - --column= --no-column - " + __gitcomp_builtin status "--no-column" return ;; esac @@ -2265,14 +2199,7 @@ _git_config () esac case "$cur" in --*) - __gitcomp " - --system --global --local --file= - --list --replace-all - --get --get-all --get-regexp - --add --unset --unset-all - --remove-section --rename-section - --name-only - " + __gitcomp_builtin config return ;; branch.*.*) @@ -2672,7 +2599,7 @@ _git_remote () if [ -z "$subcommand" ]; then case "$cur" in --*) - __gitcomp "--verbose" + __gitcomp_builtin remote ;; *) __gitcomp "$subcommands" @@ -2683,33 +2610,33 @@ _git_remote () case "$subcommand,$cur" in add,--*) - __gitcomp "--track --master --fetch --tags --no-tags --mirror=" + __gitcomp_builtin remote_add "--no-tags" ;; add,*) ;; set-head,--*) - __gitcomp "--auto --delete" + __gitcomp_builtin remote_set-head ;; set-branches,--*) - __gitcomp "--add" + __gitcomp_builtin remote_set-branches ;; set-head,*|set-branches,*) __git_complete_remote_or_refspec ;; update,--*) - __gitcomp "--prune" + __gitcomp_builtin remote_update ;; update,*) __gitcomp "$(__git_get_config_variables "remotes")" ;; set-url,--*) - __gitcomp "--push --add --delete" + __gitcomp_builtin remote_set-url ;; get-url,--*) - __gitcomp "--push --all" + __gitcomp_builtin remote_get-url ;; prune,--*) - __gitcomp "--dry-run" + __gitcomp_builtin remote_prune ;; *) __gitcomp_nl "$(__git_remotes)" @@ -2721,7 +2648,7 @@ _git_replace () { case "$cur" in --*) - __gitcomp "--edit --graft --format= --list --delete" + __gitcomp_builtin replace return ;; esac @@ -2745,26 +2672,26 @@ _git_reset () case "$cur" in --*) - __gitcomp "--merge --mixed --hard --soft --patch --keep" + __gitcomp_builtin reset return ;; esac __git_complete_refs } +__git_revert_inprogress_options="--continue --quit --abort" + _git_revert () { __git_find_repo_path if [ -f "$__git_repo_path"/REVERT_HEAD ]; then - __gitcomp "--continue --quit --abort" + __gitcomp "$__git_revert_inprogress_options" return fi case "$cur" in --*) - __gitcomp " - --edit --mainline --no-edit --no-commit --signoff - --strategy= --strategy-option= - " + __gitcomp_builtin revert "--no-edit" \ + "$__git_revert_inprogress_options" return ;; esac @@ -2775,7 +2702,7 @@ _git_rm () { case "$cur" in --*) - __gitcomp "--cached --dry-run --ignore-unmatch --quiet" + __gitcomp_builtin rm return ;; esac @@ -2833,12 +2760,7 @@ _git_show_branch () { case "$cur" in --*) - __gitcomp " - --all --remotes --topo-order --date-order --current --more= - --list --independent --merge-base --no-name - --color --no-color - --sha1-name --sparse --topics --reflog - " + __gitcomp_builtin show-branch "--no-color" return ;; esac @@ -3045,7 +2967,7 @@ _git_tag () while [ $c -lt $cword ]; do i="${words[c]}" case "$i" in - -d|-v) + -d|--delete|-v|--verify) __gitcomp_direct "$(__git_tags "" "$cur" " ")" return ;; @@ -3071,11 +2993,7 @@ _git_tag () case "$cur" in --*) - __gitcomp " - --list --delete --verify --annotate --message --file - --sign --cleanup --local-user --force --column --sort= - --contains --no-contains --points-at --merged --no-merged --create-reflog - " + __gitcomp_builtin tag ;; esac } @@ -3087,23 +3005,26 @@ _git_whatchanged () _git_worktree () { - local subcommands="add list lock prune unlock" + local subcommands="add list lock move prune remove unlock" local subcommand="$(__git_find_on_cmdline "$subcommands")" if [ -z "$subcommand" ]; then __gitcomp "$subcommands" else case "$subcommand,$cur" in add,--*) - __gitcomp "--detach" + __gitcomp_builtin worktree_add ;; list,--*) - __gitcomp "--porcelain" + __gitcomp_builtin worktree_list ;; lock,--*) - __gitcomp "--reason" + __gitcomp_builtin worktree_lock ;; prune,--*) - __gitcomp "--dry-run --expire --verbose" + __gitcomp_builtin worktree_prune + ;; + remove,--*) + __gitcomp "--force" ;; *) ;; diff --git a/contrib/examples/builtin-fetch--tool.c b/contrib/examples/builtin-fetch--tool.c index a3eb19de04..22648c3afb 100644 --- a/contrib/examples/builtin-fetch--tool.c +++ b/contrib/examples/builtin-fetch--tool.c @@ -15,7 +15,7 @@ static char *get_stdin(void) static void show_new(enum object_type type, unsigned char *sha1_new) { - fprintf(stderr, " %s: %s\n", typename(type), + fprintf(stderr, " %s: %s\n", type_name(type), find_unique_abbrev(sha1_new, DEFAULT_ABBREV)); } diff --git a/contrib/examples/git-difftool.perl b/contrib/examples/git-difftool.perl index df59bdfe97..b2ea80f9ed 100755 --- a/contrib/examples/git-difftool.perl +++ b/contrib/examples/git-difftool.perl @@ -13,7 +13,7 @@ use 5.008; use strict; use warnings; -use Error qw(:try); +use Git::LoadCPAN::Error qw(:try); use File::Basename qw(dirname); use File::Copy; use File::Find; diff --git a/contrib/examples/git-svnimport.perl b/contrib/examples/git-svnimport.perl index c414f0d9c7..75a43e23b6 100755 --- a/contrib/examples/git-svnimport.perl +++ b/contrib/examples/git-svnimport.perl @@ -238,7 +238,7 @@ sub pdate($) { my($d) = @_; $d =~ m#(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)# or die "Unparseable date: $d\n"; - my $y=$1; $y-=1900 if $y>1900; + my $y=$1; $y+=1900 if $y<1000; return timegm($6||0,$5,$4,$3,$2-1,$y); } diff --git a/contrib/hooks/pre-auto-gc-battery b/contrib/hooks/pre-auto-gc-battery index 6a2cdebdb7..7ba78c4dff 100755 --- a/contrib/hooks/pre-auto-gc-battery +++ b/contrib/hooks/pre-auto-gc-battery @@ -17,7 +17,7 @@ # ln -sf /usr/share/git-core/contrib/hooks/pre-auto-gc-battery \ # hooks/pre-auto-gc -if test -x /sbin/on_ac_power && /sbin/on_ac_power +if test -x /sbin/on_ac_power && (/sbin/on_ac_power;test $? -ne 1) then exit 0 elif test "$(cat /sys/class/power_supply/AC/online 2>/dev/null)" = 1 diff --git a/contrib/subtree/git-subtree.sh b/contrib/subtree/git-subtree.sh index dec085a235..d3f39a862a 100755 --- a/contrib/subtree/git-subtree.sh +++ b/contrib/subtree/git-subtree.sh @@ -297,7 +297,7 @@ find_latest_squash () { main= sub= git log --grep="^git-subtree-dir: $dir/*\$" \ - --pretty=format:'START %H%n%s%n%n%b%nEND%n' HEAD | + --no-show-signature --pretty=format:'START %H%n%s%n%n%b%nEND%n' HEAD | while read a b junk do debug "$a $b $junk" @@ -341,7 +341,7 @@ find_existing_splits () { main= sub= git log --grep="^git-subtree-dir: $dir/*\$" \ - --pretty=format:'START %H%n%s%n%n%b%nEND%n' $revs | + --no-show-signature --pretty=format:'START %H%n%s%n%n%b%nEND%n' $revs | while read a b junk do case "$a" in @@ -382,7 +382,7 @@ copy_commit () { # We're going to set some environment vars here, so # do it in a subshell to get rid of them safely later debug copy_commit "{$1}" "{$2}" "{$3}" - git log -1 --pretty=format:'%an%n%ae%n%aD%n%cn%n%ce%n%cD%n%B' "$1" | + git log -1 --no-show-signature --pretty=format:'%an%n%ae%n%aD%n%cn%n%ce%n%cD%n%B' "$1" | ( read GIT_AUTHOR_NAME read GIT_AUTHOR_EMAIL @@ -462,8 +462,8 @@ squash_msg () { oldsub_short=$(git rev-parse --short "$oldsub") echo "Squashed '$dir/' changes from $oldsub_short..$newsub_short" echo - git log --pretty=tformat:'%h %s' "$oldsub..$newsub" - git log --pretty=tformat:'REVERT: %h %s' "$newsub..$oldsub" + git log --no-show-signature --pretty=tformat:'%h %s' "$oldsub..$newsub" + git log --no-show-signature --pretty=tformat:'REVERT: %h %s' "$newsub..$oldsub" else echo "Squashed '$dir/' content from commit $newsub_short" fi @@ -475,7 +475,7 @@ squash_msg () { toptree_for_commit () { commit="$1" - git log -1 --pretty=format:'%T' "$commit" -- || exit $? + git rev-parse --verify "$commit^{tree}" || exit $? } subtree_for_commit () { @@ -193,30 +193,30 @@ static enum eol output_eol(enum crlf_action crlf_action) return core_eol; } -static void check_safe_crlf(const char *path, enum crlf_action crlf_action, +static void check_global_conv_flags_eol(const char *path, enum crlf_action crlf_action, struct text_stat *old_stats, struct text_stat *new_stats, - enum safe_crlf checksafe) + int conv_flags) { if (old_stats->crlf && !new_stats->crlf ) { /* * CRLFs would not be restored by checkout */ - if (checksafe == SAFE_CRLF_WARN) + if (conv_flags & CONV_EOL_RNDTRP_DIE) + die(_("CRLF would be replaced by LF in %s."), path); + else if (conv_flags & CONV_EOL_RNDTRP_WARN) warning(_("CRLF will be replaced by LF in %s.\n" "The file will have its original line" " endings in your working directory."), path); - else /* i.e. SAFE_CRLF_FAIL */ - die(_("CRLF would be replaced by LF in %s."), path); } else if (old_stats->lonelf && !new_stats->lonelf ) { /* * CRLFs would be added by checkout */ - if (checksafe == SAFE_CRLF_WARN) + if (conv_flags & CONV_EOL_RNDTRP_DIE) + die(_("LF would be replaced by CRLF in %s"), path); + else if (conv_flags & CONV_EOL_RNDTRP_WARN) warning(_("LF will be replaced by CRLF in %s.\n" "The file will have its original line" " endings in your working directory."), path); - else /* i.e. SAFE_CRLF_FAIL */ - die(_("LF would be replaced by CRLF in %s"), path); } } @@ -268,7 +268,7 @@ static int will_convert_lf_to_crlf(size_t len, struct text_stat *stats, static int crlf_to_git(const struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *buf, - enum crlf_action crlf_action, enum safe_crlf checksafe) + enum crlf_action crlf_action, int conv_flags) { struct text_stat stats; char *dst; @@ -298,12 +298,12 @@ static int crlf_to_git(const struct index_state *istate, * unless we want to renormalize in a merge or * cherry-pick. */ - if ((checksafe != SAFE_CRLF_RENORMALIZE) && + if ((!(conv_flags & CONV_EOL_RENORMALIZE)) && has_crlf_in_index(istate, path)) convert_crlf_into_lf = 0; } - if ((checksafe == SAFE_CRLF_WARN || - (checksafe == SAFE_CRLF_FAIL)) && len) { + if (((conv_flags & CONV_EOL_RNDTRP_WARN) || + ((conv_flags & CONV_EOL_RNDTRP_DIE) && len))) { struct text_stat new_stats; memcpy(&new_stats, &stats, sizeof(new_stats)); /* simulate "git add" */ @@ -316,7 +316,7 @@ static int crlf_to_git(const struct index_state *istate, new_stats.crlf += new_stats.lonelf; new_stats.lonelf = 0; } - check_safe_crlf(path, crlf_action, &stats, &new_stats, checksafe); + check_global_conv_flags_eol(path, crlf_action, &stats, &new_stats, conv_flags); } if (!convert_crlf_into_lf) return 0; @@ -898,7 +898,7 @@ static int ident_to_git(const char *path, const char *src, size_t len, static int ident_to_worktree(const char *path, const char *src, size_t len, struct strbuf *buf, int ident) { - unsigned char sha1[20]; + struct object_id oid; char *to_free = NULL, *dollar, *spc; int cnt; @@ -912,7 +912,7 @@ static int ident_to_worktree(const char *path, const char *src, size_t len, /* are we "faking" in place editing ? */ if (src == buf->buf) to_free = strbuf_detach(buf, NULL); - hash_sha1_file(src, len, "blob", sha1); + hash_object_file(src, len, "blob", &oid); strbuf_grow(buf, len + cnt * 43); for (;;) { @@ -969,7 +969,7 @@ static int ident_to_worktree(const char *path, const char *src, size_t len, /* step 4: substitute */ strbuf_addstr(buf, "Id: "); - strbuf_add(buf, sha1_to_hex(sha1), 40); + strbuf_addstr(buf, oid_to_hex(&oid)); strbuf_addstr(buf, " $"); } strbuf_add(buf, src, len); @@ -1129,7 +1129,7 @@ const char *get_convert_attr_ascii(const char *path) int convert_to_git(const struct index_state *istate, const char *path, const char *src, size_t len, - struct strbuf *dst, enum safe_crlf checksafe) + struct strbuf *dst, int conv_flags) { int ret = 0; struct conv_attrs ca; @@ -1144,8 +1144,8 @@ int convert_to_git(const struct index_state *istate, src = dst->buf; len = dst->len; } - if (checksafe != SAFE_CRLF_KEEP_CRLF) { - ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, checksafe); + if (!(conv_flags & CONV_EOL_KEEP_CRLF)) { + ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, conv_flags); if (ret && dst) { src = dst->buf; len = dst->len; @@ -1156,7 +1156,7 @@ int convert_to_git(const struct index_state *istate, void convert_to_git_filter_fd(const struct index_state *istate, const char *path, int fd, struct strbuf *dst, - enum safe_crlf checksafe) + int conv_flags) { struct conv_attrs ca; convert_attrs(&ca, path); @@ -1167,7 +1167,7 @@ void convert_to_git_filter_fd(const struct index_state *istate, if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL)) die("%s: clean filter '%s' failed", path, ca.drv->name); - crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, checksafe); + crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags); ident_to_git(path, dst->buf, dst->len, dst, ca.ident); } @@ -1226,7 +1226,7 @@ int renormalize_buffer(const struct index_state *istate, const char *path, src = dst->buf; len = dst->len; } - return ret | convert_to_git(istate, path, src, len, dst, SAFE_CRLF_RENORMALIZE); + return ret | convert_to_git(istate, path, src, len, dst, CONV_EOL_RENORMALIZE); } /***************************************************************** @@ -8,15 +8,12 @@ struct index_state; -enum safe_crlf { - SAFE_CRLF_FALSE = 0, - SAFE_CRLF_FAIL = 1, - SAFE_CRLF_WARN = 2, - SAFE_CRLF_RENORMALIZE = 3, - SAFE_CRLF_KEEP_CRLF = 4 -}; +#define CONV_EOL_RNDTRP_DIE (1<<0) /* Die if CRLF to LF to CRLF is different */ +#define CONV_EOL_RNDTRP_WARN (1<<1) /* Warn if CRLF to LF to CRLF is different */ +#define CONV_EOL_RENORMALIZE (1<<2) /* Convert CRLF to LF */ +#define CONV_EOL_KEEP_CRLF (1<<3) /* Keep CRLF line endings as is */ -extern enum safe_crlf safe_crlf; +extern int global_conv_flags_eol; enum auto_crlf { AUTO_CRLF_FALSE = 0, @@ -66,7 +63,7 @@ extern const char *get_convert_attr_ascii(const char *path); /* returns 1 if *dst was used */ extern int convert_to_git(const struct index_state *istate, const char *path, const char *src, size_t len, - struct strbuf *dst, enum safe_crlf checksafe); + struct strbuf *dst, int conv_flags); extern int convert_to_working_tree(const char *path, const char *src, size_t len, struct strbuf *dst); extern int async_convert_to_working_tree(const char *path, const char *src, @@ -85,7 +82,7 @@ static inline int would_convert_to_git(const struct index_state *istate, extern void convert_to_git_filter_fd(const struct index_state *istate, const char *path, int fd, struct strbuf *dst, - enum safe_crlf checksafe); + int conv_flags); extern int would_convert_to_git_filter_fd(const char *path); /***************************************************************** diff --git a/csum-file.c b/csum-file.c index 2adae04073..5eda7fb6af 100644 --- a/csum-file.c +++ b/csum-file.c @@ -11,7 +11,7 @@ #include "progress.h" #include "csum-file.h" -static void flush(struct sha1file *f, const void *buf, unsigned int count) +static void flush(struct hashfile *f, const void *buf, unsigned int count) { if (0 <= f->check_fd && count) { unsigned char check_buffer[8192]; @@ -42,28 +42,28 @@ static void flush(struct sha1file *f, const void *buf, unsigned int count) } } -void sha1flush(struct sha1file *f) +void hashflush(struct hashfile *f) { unsigned offset = f->offset; if (offset) { - git_SHA1_Update(&f->ctx, f->buffer, offset); + the_hash_algo->update_fn(&f->ctx, f->buffer, offset); flush(f, f->buffer, offset); f->offset = 0; } } -int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags) +int hashclose(struct hashfile *f, unsigned char *result, unsigned int flags) { int fd; - sha1flush(f); - git_SHA1_Final(f->buffer, &f->ctx); + hashflush(f); + the_hash_algo->final_fn(f->buffer, &f->ctx); if (result) hashcpy(result, f->buffer); if (flags & (CSUM_CLOSE | CSUM_FSYNC)) { /* write checksum and close fd */ - flush(f, f->buffer, 20); + flush(f, f->buffer, the_hash_algo->rawsz); if (flags & CSUM_FSYNC) fsync_or_die(f->fd, f->name); if (close(f->fd)) @@ -86,7 +86,7 @@ int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags) return fd; } -void sha1write(struct sha1file *f, const void *buf, unsigned int count) +void hashwrite(struct hashfile *f, const void *buf, unsigned int count) { while (count) { unsigned offset = f->offset; @@ -110,7 +110,7 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count) buf = (char *) buf + nr; left -= nr; if (!left) { - git_SHA1_Update(&f->ctx, data, offset); + the_hash_algo->update_fn(&f->ctx, data, offset); flush(f, data, offset); offset = 0; } @@ -118,15 +118,15 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count) } } -struct sha1file *sha1fd(int fd, const char *name) +struct hashfile *hashfd(int fd, const char *name) { - return sha1fd_throughput(fd, name, NULL); + return hashfd_throughput(fd, name, NULL); } -struct sha1file *sha1fd_check(const char *name) +struct hashfile *hashfd_check(const char *name) { int sink, check; - struct sha1file *f; + struct hashfile *f; sink = open("/dev/null", O_WRONLY); if (sink < 0) @@ -134,14 +134,14 @@ struct sha1file *sha1fd_check(const char *name) check = open(name, O_RDONLY); if (check < 0) die_errno("unable to open '%s'", name); - f = sha1fd(sink, name); + f = hashfd(sink, name); f->check_fd = check; return f; } -struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp) +struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp) { - struct sha1file *f = xmalloc(sizeof(*f)); + struct hashfile *f = xmalloc(sizeof(*f)); f->fd = fd; f->check_fd = -1; f->offset = 0; @@ -149,18 +149,18 @@ struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp f->tp = tp; f->name = name; f->do_crc = 0; - git_SHA1_Init(&f->ctx); + the_hash_algo->init_fn(&f->ctx); return f; } -void sha1file_checkpoint(struct sha1file *f, struct sha1file_checkpoint *checkpoint) +void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpoint) { - sha1flush(f); + hashflush(f); checkpoint->offset = f->total; checkpoint->ctx = f->ctx; } -int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint) +int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint) { off_t offset = checkpoint->offset; @@ -169,17 +169,17 @@ int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint return -1; f->total = offset; f->ctx = checkpoint->ctx; - f->offset = 0; /* sha1flush() was called in checkpoint */ + f->offset = 0; /* hashflush() was called in checkpoint */ return 0; } -void crc32_begin(struct sha1file *f) +void crc32_begin(struct hashfile *f) { f->crc32 = crc32(0, NULL, 0); f->do_crc = 1; } -uint32_t crc32_end(struct sha1file *f) +uint32_t crc32_end(struct hashfile *f) { f->do_crc = 0; return f->crc32; diff --git a/csum-file.h b/csum-file.h index 7530927d77..992e5c0141 100644 --- a/csum-file.h +++ b/csum-file.h @@ -4,11 +4,11 @@ struct progress; /* A SHA1-protected file */ -struct sha1file { +struct hashfile { int fd; int check_fd; unsigned int offset; - git_SHA_CTX ctx; + git_hash_ctx ctx; off_t total; struct progress *tp; const char *name; @@ -18,36 +18,36 @@ struct sha1file { }; /* Checkpoint */ -struct sha1file_checkpoint { +struct hashfile_checkpoint { off_t offset; - git_SHA_CTX ctx; + git_hash_ctx ctx; }; -extern void sha1file_checkpoint(struct sha1file *, struct sha1file_checkpoint *); -extern int sha1file_truncate(struct sha1file *, struct sha1file_checkpoint *); +extern void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *); +extern int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *); -/* sha1close flags */ +/* hashclose flags */ #define CSUM_CLOSE 1 #define CSUM_FSYNC 2 -extern struct sha1file *sha1fd(int fd, const char *name); -extern struct sha1file *sha1fd_check(const char *name); -extern struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp); -extern int sha1close(struct sha1file *, unsigned char *, unsigned int); -extern void sha1write(struct sha1file *, const void *, unsigned int); -extern void sha1flush(struct sha1file *f); -extern void crc32_begin(struct sha1file *); -extern uint32_t crc32_end(struct sha1file *); +extern struct hashfile *hashfd(int fd, const char *name); +extern struct hashfile *hashfd_check(const char *name); +extern struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp); +extern int hashclose(struct hashfile *, unsigned char *, unsigned int); +extern void hashwrite(struct hashfile *, const void *, unsigned int); +extern void hashflush(struct hashfile *f); +extern void crc32_begin(struct hashfile *); +extern uint32_t crc32_end(struct hashfile *); -static inline void sha1write_u8(struct sha1file *f, uint8_t data) +static inline void hashwrite_u8(struct hashfile *f, uint8_t data) { - sha1write(f, &data, sizeof(data)); + hashwrite(f, &data, sizeof(data)); } -static inline void sha1write_be32(struct sha1file *f, uint32_t data) +static inline void hashwrite_be32(struct hashfile *f, uint32_t data) { data = htonl(data); - sha1write(f, &data, sizeof(data)); + hashwrite(f, &data, sizeof(data)); } #endif @@ -9,7 +9,12 @@ #define initgroups(x, y) (0) /* nothing */ #endif -static int log_syslog; +static enum log_destination { + LOG_DESTINATION_UNSET = -1, + LOG_DESTINATION_NONE = 0, + LOG_DESTINATION_STDERR = 1, + LOG_DESTINATION_SYSLOG = 2, +} log_destination = LOG_DESTINATION_UNSET; static int verbose; static int reuseaddr; static int informative_errors; @@ -25,6 +30,7 @@ static const char daemon_usage[] = " [--access-hook=<path>]\n" " [--inetd | [--listen=<host_or_ipaddr>] [--port=<n>]\n" " [--detach] [--user=<user> [--group=<group>]]\n" +" [--log-destination=(stderr|syslog|none)]\n" " [<directory>...]"; /* List of acceptable pathname prefixes */ @@ -74,11 +80,14 @@ static const char *get_ip_address(struct hostinfo *hi) static void logreport(int priority, const char *err, va_list params) { - if (log_syslog) { + switch (log_destination) { + case LOG_DESTINATION_SYSLOG: { char buf[1024]; vsnprintf(buf, sizeof(buf), err, params); syslog(priority, "%s", buf); - } else { + break; + } + case LOG_DESTINATION_STDERR: /* * Since stderr is set to buffered mode, the * logging of different processes will not overlap @@ -88,6 +97,11 @@ static void logreport(int priority, const char *err, va_list params) vfprintf(stderr, err, params); fputc('\n', stderr); fflush(stderr); + break; + case LOG_DESTINATION_NONE: + break; + case LOG_DESTINATION_UNSET: + BUG("log destination not initialized correctly"); } } @@ -1286,7 +1300,6 @@ int cmd_main(int argc, const char **argv) } if (!strcmp(arg, "--inetd")) { inetd_mode = 1; - log_syslog = 1; continue; } if (!strcmp(arg, "--verbose")) { @@ -1294,9 +1307,22 @@ int cmd_main(int argc, const char **argv) continue; } if (!strcmp(arg, "--syslog")) { - log_syslog = 1; + log_destination = LOG_DESTINATION_SYSLOG; continue; } + if (skip_prefix(arg, "--log-destination=", &v)) { + if (!strcmp(v, "syslog")) { + log_destination = LOG_DESTINATION_SYSLOG; + continue; + } else if (!strcmp(v, "stderr")) { + log_destination = LOG_DESTINATION_STDERR; + continue; + } else if (!strcmp(v, "none")) { + log_destination = LOG_DESTINATION_NONE; + continue; + } else + die("unknown log destination '%s'", v); + } if (!strcmp(arg, "--export-all")) { export_all_trees = 1; continue; @@ -1353,7 +1379,6 @@ int cmd_main(int argc, const char **argv) } if (!strcmp(arg, "--detach")) { detach = 1; - log_syslog = 1; continue; } if (skip_prefix(arg, "--user=", &v)) { @@ -1399,7 +1424,14 @@ int cmd_main(int argc, const char **argv) usage(daemon_usage); } - if (log_syslog) { + if (log_destination == LOG_DESTINATION_UNSET) { + if (inetd_mode || detach) + log_destination = LOG_DESTINATION_SYSLOG; + else + log_destination = LOG_DESTINATION_STDERR; + } + + if (log_destination == LOG_DESTINATION_SYSLOG) { openlog("git-daemon", LOG_PID, LOG_DAEMON); set_die_routine(daemon_die); } else diff --git a/diff-lib.c b/diff-lib.c index 8104603a3b..104f954a25 100644 --- a/diff-lib.c +++ b/diff-lib.c @@ -92,6 +92,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option) int diff_unmerged_stage = revs->max_count; unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED) ? CE_MATCH_RACY_IS_DIRTY : 0); + uint64_t start = getnanotime(); diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/"); @@ -246,6 +247,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option) } diffcore_std(&revs->diffopt); diff_flush(&revs->diffopt); + trace_performance_since(start, "diff-files"); return 0; } @@ -302,7 +304,7 @@ static int get_stat_data(const struct cache_entry *ce, } static void show_new_file(struct rev_info *revs, - const struct cache_entry *new, + const struct cache_entry *new_file, int cached, int match_missing) { const struct object_id *oid; @@ -313,16 +315,16 @@ static void show_new_file(struct rev_info *revs, * New file in the index: it might actually be different in * the working tree. */ - if (get_stat_data(new, &oid, &mode, cached, match_missing, + if (get_stat_data(new_file, &oid, &mode, cached, match_missing, &dirty_submodule, &revs->diffopt) < 0) return; - diff_index_show_file(revs, "+", new, oid, !is_null_oid(oid), mode, dirty_submodule); + diff_index_show_file(revs, "+", new_file, oid, !is_null_oid(oid), mode, dirty_submodule); } static int show_modified(struct rev_info *revs, - const struct cache_entry *old, - const struct cache_entry *new, + const struct cache_entry *old_entry, + const struct cache_entry *new_entry, int report_missing, int cached, int match_missing) { @@ -330,47 +332,47 @@ static int show_modified(struct rev_info *revs, const struct object_id *oid; unsigned dirty_submodule = 0; - if (get_stat_data(new, &oid, &mode, cached, match_missing, + if (get_stat_data(new_entry, &oid, &mode, cached, match_missing, &dirty_submodule, &revs->diffopt) < 0) { if (report_missing) - diff_index_show_file(revs, "-", old, - &old->oid, 1, old->ce_mode, + diff_index_show_file(revs, "-", old_entry, + &old_entry->oid, 1, old_entry->ce_mode, 0); return -1; } if (revs->combine_merges && !cached && - (oidcmp(oid, &old->oid) || oidcmp(&old->oid, &new->oid))) { + (oidcmp(oid, &old_entry->oid) || oidcmp(&old_entry->oid, &new_entry->oid))) { struct combine_diff_path *p; - int pathlen = ce_namelen(new); + int pathlen = ce_namelen(new_entry); p = xmalloc(combine_diff_path_size(2, pathlen)); p->path = (char *) &p->parent[2]; p->next = NULL; - memcpy(p->path, new->name, pathlen); + memcpy(p->path, new_entry->name, pathlen); p->path[pathlen] = 0; p->mode = mode; oidclr(&p->oid); memset(p->parent, 0, 2 * sizeof(struct combine_diff_parent)); p->parent[0].status = DIFF_STATUS_MODIFIED; - p->parent[0].mode = new->ce_mode; - oidcpy(&p->parent[0].oid, &new->oid); + p->parent[0].mode = new_entry->ce_mode; + oidcpy(&p->parent[0].oid, &new_entry->oid); p->parent[1].status = DIFF_STATUS_MODIFIED; - p->parent[1].mode = old->ce_mode; - oidcpy(&p->parent[1].oid, &old->oid); + p->parent[1].mode = old_entry->ce_mode; + oidcpy(&p->parent[1].oid, &old_entry->oid); show_combined_diff(p, 2, revs->dense_combined_merges, revs); free(p); return 0; } - oldmode = old->ce_mode; - if (mode == oldmode && !oidcmp(oid, &old->oid) && !dirty_submodule && + oldmode = old_entry->ce_mode; + if (mode == oldmode && !oidcmp(oid, &old_entry->oid) && !dirty_submodule && !revs->diffopt.flags.find_copies_harder) return 0; diff_change(&revs->diffopt, oldmode, mode, - &old->oid, oid, 1, !is_null_oid(oid), - old->name, 0, dirty_submodule); + &old_entry->oid, oid, 1, !is_null_oid(oid), + old_entry->name, 0, dirty_submodule); return 0; } @@ -512,6 +514,7 @@ static int diff_cache(struct rev_info *revs, int run_diff_index(struct rev_info *revs, int cached) { struct object_array_entry *ent; + uint64_t start = getnanotime(); ent = revs->pending.objects; if (diff_cache(revs, &ent->item->oid, ent->name, cached)) @@ -521,6 +524,7 @@ int run_diff_index(struct rev_info *revs, int cached) diffcore_fix_diff_index(&revs->diffopt); diffcore_std(&revs->diffopt); diff_flush(&revs->diffopt); + trace_performance_since(start, "diff-index"); return 0; } @@ -1504,7 +1504,7 @@ struct diff_words_style_elem { struct diff_words_style { enum diff_words_type type; - struct diff_words_style_elem new, old, ctx; + struct diff_words_style_elem new_word, old_word, ctx; const char *newline; }; @@ -1655,12 +1655,12 @@ static void fn_out_diff_words_aux(void *priv, char *line, unsigned long len) } if (minus_begin != minus_end) { fn_out_diff_words_write_helper(diff_words->opt, - &style->old, style->newline, + &style->old_word, style->newline, minus_end - minus_begin, minus_begin); } if (plus_begin != plus_end) { fn_out_diff_words_write_helper(diff_words->opt, - &style->new, style->newline, + &style->new_word, style->newline, plus_end - plus_begin, plus_begin); } @@ -1758,7 +1758,7 @@ static void diff_words_show(struct diff_words_data *diff_words) emit_diff_symbol(diff_words->opt, DIFF_SYMBOL_WORD_DIFF, line_prefix, strlen(line_prefix), 0); fn_out_diff_words_write_helper(diff_words->opt, - &style->old, style->newline, + &style->old_word, style->newline, diff_words->minus.text.size, diff_words->minus.text.ptr); diff_words->minus.text.size = 0; @@ -1883,8 +1883,8 @@ static void init_diff_words_data(struct emit_callback *ecbdata, } if (want_color(o->use_color)) { struct diff_words_style *st = ecbdata->diff_words->style; - st->old.color = diff_get_color_opt(o, DIFF_FILE_OLD); - st->new.color = diff_get_color_opt(o, DIFF_FILE_NEW); + st->old_word.color = diff_get_color_opt(o, DIFF_FILE_OLD); + st->new_word.color = diff_get_color_opt(o, DIFF_FILE_NEW); st->ctx.color = diff_get_color_opt(o, DIFF_CONTEXT); } } @@ -2045,11 +2045,10 @@ static void fn_out_consume(void *priv, char *line, unsigned long len) } } -static char *pprint_rename(const char *a, const char *b) +static void pprint_rename(struct strbuf *name, const char *a, const char *b) { - const char *old = a; - const char *new = b; - struct strbuf name = STRBUF_INIT; + const char *old_name = a; + const char *new_name = b; int pfx_length, sfx_length; int pfx_adjust_for_slash; int len_a = strlen(a); @@ -2059,24 +2058,24 @@ static char *pprint_rename(const char *a, const char *b) int qlen_b = quote_c_style(b, NULL, NULL, 0); if (qlen_a || qlen_b) { - quote_c_style(a, &name, NULL, 0); - strbuf_addstr(&name, " => "); - quote_c_style(b, &name, NULL, 0); - return strbuf_detach(&name, NULL); + quote_c_style(a, name, NULL, 0); + strbuf_addstr(name, " => "); + quote_c_style(b, name, NULL, 0); + return; } /* Find common prefix */ pfx_length = 0; - while (*old && *new && *old == *new) { - if (*old == '/') - pfx_length = old - a + 1; - old++; - new++; + while (*old_name && *new_name && *old_name == *new_name) { + if (*old_name == '/') + pfx_length = old_name - a + 1; + old_name++; + new_name++; } /* Find common suffix */ - old = a + len_a; - new = b + len_b; + old_name = a + len_a; + new_name = b + len_b; sfx_length = 0; /* * If there is a common prefix, it must end in a slash. In @@ -2087,13 +2086,13 @@ static char *pprint_rename(const char *a, const char *b) * underrun the input strings. */ pfx_adjust_for_slash = (pfx_length ? 1 : 0); - while (a + pfx_length - pfx_adjust_for_slash <= old && - b + pfx_length - pfx_adjust_for_slash <= new && - *old == *new) { - if (*old == '/') - sfx_length = len_a - (old - a); - old--; - new--; + while (a + pfx_length - pfx_adjust_for_slash <= old_name && + b + pfx_length - pfx_adjust_for_slash <= new_name && + *old_name == *new_name) { + if (*old_name == '/') + sfx_length = len_a - (old_name - a); + old_name--; + new_name--; } /* @@ -2109,19 +2108,18 @@ static char *pprint_rename(const char *a, const char *b) if (b_midlen < 0) b_midlen = 0; - strbuf_grow(&name, pfx_length + a_midlen + b_midlen + sfx_length + 7); + strbuf_grow(name, pfx_length + a_midlen + b_midlen + sfx_length + 7); if (pfx_length + sfx_length) { - strbuf_add(&name, a, pfx_length); - strbuf_addch(&name, '{'); + strbuf_add(name, a, pfx_length); + strbuf_addch(name, '{'); } - strbuf_add(&name, a + pfx_length, a_midlen); - strbuf_addstr(&name, " => "); - strbuf_add(&name, b + pfx_length, b_midlen); + strbuf_add(name, a + pfx_length, a_midlen); + strbuf_addstr(name, " => "); + strbuf_add(name, b + pfx_length, b_midlen); if (pfx_length + sfx_length) { - strbuf_addch(&name, '}'); - strbuf_add(&name, a + len_a - sfx_length, sfx_length); + strbuf_addch(name, '}'); + strbuf_add(name, a + len_a - sfx_length, sfx_length); } - return strbuf_detach(&name, NULL); } struct diffstat_t { @@ -2131,6 +2129,7 @@ struct diffstat_t { char *from_name; char *name; char *print_name; + const char *comments; unsigned is_unmerged:1; unsigned is_binary:1; unsigned is_renamed:1; @@ -2197,23 +2196,20 @@ static void show_graph(struct strbuf *out, char ch, int cnt, static void fill_print_name(struct diffstat_file *file) { - char *pname; + struct strbuf pname = STRBUF_INIT; if (file->print_name) return; - if (!file->is_renamed) { - struct strbuf buf = STRBUF_INIT; - if (quote_c_style(file->name, &buf, NULL, 0)) { - pname = strbuf_detach(&buf, NULL); - } else { - pname = file->name; - strbuf_release(&buf); - } - } else { - pname = pprint_rename(file->from_name, file->name); - } - file->print_name = pname; + if (file->is_renamed) + pprint_rename(&pname, file->from_name, file->name); + else + quote_c_style(file->name, &pname, NULL, 0); + + if (file->comments) + strbuf_addf(&pname, " (%s)", file->comments); + + file->print_name = strbuf_detach(&pname, NULL); } static void print_stat_summary_inserts_deletes(struct diff_options *options, @@ -2594,14 +2590,14 @@ struct dirstat_dir { static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir, unsigned long changed, const char *base, int baselen) { - unsigned long this_dir = 0; + unsigned long sum_changes = 0; unsigned int sources = 0; const char *line_prefix = diff_line_prefix(opt); while (dir->nr) { struct dirstat_file *f = dir->files; int namelen = strlen(f->name); - unsigned long this; + unsigned long changes; char *slash; if (namelen < baselen) @@ -2611,15 +2607,15 @@ static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir, slash = strchr(f->name + baselen, '/'); if (slash) { int newbaselen = slash + 1 - f->name; - this = gather_dirstat(opt, dir, changed, f->name, newbaselen); + changes = gather_dirstat(opt, dir, changed, f->name, newbaselen); sources++; } else { - this = f->changed; + changes = f->changed; dir->files++; dir->nr--; sources += 2; } - this_dir += this; + sum_changes += changes; } /* @@ -2629,8 +2625,8 @@ static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir, * under this directory (sources == 1). */ if (baselen && sources != 1) { - if (this_dir) { - int permille = this_dir * 1000 / changed; + if (sum_changes) { + int permille = sum_changes * 1000 / changed; if (permille >= dir->permille) { fprintf(opt->file, "%s%4d.%01d%% %.*s\n", line_prefix, permille / 10, permille % 10, baselen, base); @@ -2639,7 +2635,7 @@ static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir, } } } - return this_dir; + return sum_changes; } static int dirstat_compare(const void *_a, const void *_b) @@ -2797,8 +2793,7 @@ static void free_diffstat_info(struct diffstat_t *diffstat) int i; for (i = 0; i < diffstat->nr; i++) { struct diffstat_file *f = diffstat->files[i]; - if (f->name != f->print_name) - free(f->print_name); + free(f->print_name); free(f->name); free(f->from_name); free(f); @@ -3248,6 +3243,32 @@ static void builtin_diff(const char *name_a, return; } +static char *get_compact_summary(const struct diff_filepair *p, int is_renamed) +{ + if (!is_renamed) { + if (p->status == DIFF_STATUS_ADDED) { + if (S_ISLNK(p->two->mode)) + return "new +l"; + else if ((p->two->mode & 0777) == 0755) + return "new +x"; + else + return "new"; + } else if (p->status == DIFF_STATUS_DELETED) + return "gone"; + } + if (S_ISLNK(p->one->mode) && !S_ISLNK(p->two->mode)) + return "mode -l"; + else if (!S_ISLNK(p->one->mode) && S_ISLNK(p->two->mode)) + return "mode +l"; + else if ((p->one->mode & 0777) == 0644 && + (p->two->mode & 0777) == 0755) + return "mode +x"; + else if ((p->one->mode & 0777) == 0755 && + (p->two->mode & 0777) == 0644) + return "mode -x"; + return NULL; +} + static void builtin_diffstat(const char *name_a, const char *name_b, struct diff_filespec *one, struct diff_filespec *two, @@ -3267,6 +3288,8 @@ static void builtin_diffstat(const char *name_a, const char *name_b, data = diffstat_add(diffstat, name_a, name_b); data->is_interesting = p->status != DIFF_STATUS_UNKNOWN; + if (o->flags.stat_with_summary) + data->comments = get_compact_summary(p, data->is_renamed); if (!one || !two) { data->is_unmerged = 1; @@ -3520,13 +3543,13 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags) { int size_only = flags & CHECK_SIZE_ONLY; int err = 0; + int conv_flags = global_conv_flags_eol; /* * demote FAIL to WARN to allow inspecting the situation * instead of refusing. */ - enum safe_crlf crlf_warn = (safe_crlf == SAFE_CRLF_FAIL - ? SAFE_CRLF_WARN - : safe_crlf); + if (conv_flags & CONV_EOL_RNDTRP_DIE) + conv_flags = CONV_EOL_RNDTRP_WARN; if (!DIFF_FILE_VALID(s)) die("internal error: asking to populate invalid file."); @@ -3603,7 +3626,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags) /* * Convert from working tree format to canonical git format */ - if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, crlf_warn)) { + if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, conv_flags)) { size_t size = 0; munmap(s->data, s->size); s->should_munmap = 0; @@ -3660,15 +3683,15 @@ static void prep_temp_blob(const char *path, struct diff_tempfile *temp, int mode) { struct strbuf buf = STRBUF_INIT; - struct strbuf template = STRBUF_INIT; + struct strbuf tempfile = STRBUF_INIT; char *path_dup = xstrdup(path); const char *base = basename(path_dup); /* Generate "XXXXXX_basename.ext" */ - strbuf_addstr(&template, "XXXXXX_"); - strbuf_addstr(&template, base); + strbuf_addstr(&tempfile, "XXXXXX_"); + strbuf_addstr(&tempfile, base); - temp->tempfile = mks_tempfile_ts(template.buf, strlen(base) + 1); + temp->tempfile = mks_tempfile_ts(tempfile.buf, strlen(base) + 1); if (!temp->tempfile) die_errno("unable to create temp-file"); if (convert_to_working_tree(path, @@ -3683,7 +3706,7 @@ static void prep_temp_blob(const char *path, struct diff_tempfile *temp, oid_to_hex_r(temp->hex, oid); xsnprintf(temp->mode, sizeof(temp->mode), "%06o", mode); strbuf_release(&buf); - strbuf_release(&template); + strbuf_release(&tempfile); free(path_dup); } @@ -4086,6 +4109,7 @@ void diff_setup(struct diff_options *options) options->interhunkcontext = diff_interhunk_context_default; options->ws_error_highlight = ws_error_highlight_default; options->flags.rename_empty = 1; + options->objfind = NULL; /* pathchange left =NULL by default */ options->change = diff_change; @@ -4110,22 +4134,20 @@ void diff_setup(struct diff_options *options) void diff_setup_done(struct diff_options *options) { - int count = 0; + unsigned check_mask = DIFF_FORMAT_NAME | + DIFF_FORMAT_NAME_STATUS | + DIFF_FORMAT_CHECKDIFF | + DIFF_FORMAT_NO_OUTPUT; if (options->set_default) options->set_default(options); - if (options->output_format & DIFF_FORMAT_NAME) - count++; - if (options->output_format & DIFF_FORMAT_NAME_STATUS) - count++; - if (options->output_format & DIFF_FORMAT_CHECKDIFF) - count++; - if (options->output_format & DIFF_FORMAT_NO_OUTPUT) - count++; - if (count > 1) + if (HAS_MULTI_BITS(options->output_format & check_mask)) die(_("--name-only, --name-status, --check and -s are mutually exclusive")); + if (HAS_MULTI_BITS(options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK)) + die(_("-G, -S and --find-object are mutually exclusive")); + /* * Most of the time we can say "there are changes" * only by checking if there are changed paths, but @@ -4175,7 +4197,7 @@ void diff_setup_done(struct diff_options *options) /* * Also pickaxe would not work very well if you do not say recursive */ - if (options->pickaxe) + if (options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) options->flags.recursive = 1; /* * When patches are generated, submodules diffed against the work tree @@ -4489,6 +4511,23 @@ static int parse_ws_error_highlight_opt(struct diff_options *opt, const char *ar return 1; } +static int parse_objfind_opt(struct diff_options *opt, const char *arg) +{ + struct object_id oid; + + if (get_oid(arg, &oid)) + return error("unable to resolve '%s'", arg); + + if (!opt->objfind) + opt->objfind = xcalloc(1, sizeof(*opt->objfind)); + + opt->pickaxe_opts |= DIFF_PICKAXE_KIND_OBJFIND; + opt->flags.recursive = 1; + opt->flags.tree_in_recursive = 1; + oidset_insert(opt->objfind, &oid); + return 1; +} + int diff_opt_parse(struct diff_options *options, const char **av, int ac, const char *prefix) { @@ -4537,6 +4576,11 @@ int diff_opt_parse(struct diff_options *options, else if (starts_with(arg, "--stat")) /* --stat, --stat-width, --stat-name-width, or --stat-count */ return stat_opt(options, av); + else if (!strcmp(arg, "--compact-summary")) { + options->flags.stat_with_summary = 1; + options->output_format |= DIFF_FORMAT_DIFFSTAT; + } else if (!strcmp(arg, "--no-compact-summary")) + options->flags.stat_with_summary = 0; /* renames options */ else if (starts_with(arg, "-B") || @@ -4736,7 +4780,8 @@ int diff_opt_parse(struct diff_options *options, else if ((argcount = short_opt('O', av, &optarg))) { options->orderfile = prefix_filename(prefix, optarg); return argcount; - } + } else if (skip_prefix(arg, "--find-object=", &arg)) + return parse_objfind_opt(options, arg); else if ((argcount = parse_long_opt("diff-filter", av, &optarg))) { int offending = parse_diff_filter_opt(optarg, options); if (offending) @@ -5224,10 +5269,12 @@ static void show_rename_copy(struct diff_options *opt, const char *renamecopy, struct diff_filepair *p) { struct strbuf sb = STRBUF_INIT; - char *names = pprint_rename(p->one->path, p->two->path); + struct strbuf names = STRBUF_INIT; + + pprint_rename(&names, p->one->path, p->two->path); strbuf_addf(&sb, " %s %s (%d%%)\n", - renamecopy, names, similarity_index(p)); - free(names); + renamecopy, names.buf, similarity_index(p)); + strbuf_release(&names); emit_diff_symbol(opt, DIFF_SYMBOL_SUMMARY, sb.buf, sb.len, 0); show_mode_change(opt, p, 0); @@ -5784,7 +5831,7 @@ void diffcore_std(struct diff_options *options) if (options->break_opt != -1) diffcore_merge_broken(); } - if (options->pickaxe) + if (options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) diffcore_pickaxe(options); if (options->orderfile) diffcore_order(options->orderfile); @@ -7,6 +7,7 @@ #include "tree-walk.h" #include "pathspec.h" #include "object.h" +#include "oidset.h" struct rev_info; struct diff_options; @@ -91,8 +92,8 @@ struct diff_flags { unsigned override_submodule_config:1; unsigned dirstat_by_line:1; unsigned funccontext:1; - unsigned pickaxe_ignore_case:1; unsigned default_follow_renames:1; + unsigned stat_with_summary:1; }; static inline void diff_flags_or(struct diff_flags *a, @@ -146,7 +147,7 @@ struct diff_options { int skip_stat_unmatch; int line_termination; int output_format; - int pickaxe_opts; + unsigned pickaxe_opts; int rename_score; int rename_limit; int needed_rename_limit; @@ -178,6 +179,8 @@ struct diff_options { enum diff_words_type word_diff; enum diff_submodule_format submodule_format; + struct oidset *objfind; + /* this is set by diffcore for DIFF_FORMAT_PATCH */ int found_changes; @@ -330,6 +333,13 @@ extern void diff_setup_done(struct diff_options *); #define DIFF_PICKAXE_KIND_S 4 /* traditional plumbing counter */ #define DIFF_PICKAXE_KIND_G 8 /* grep in the patch */ +#define DIFF_PICKAXE_KIND_OBJFIND 16 /* specific object IDs */ + +#define DIFF_PICKAXE_KINDS_MASK (DIFF_PICKAXE_KIND_S | \ + DIFF_PICKAXE_KIND_G | \ + DIFF_PICKAXE_KIND_OBJFIND) + +#define DIFF_PICKAXE_IGNORE_CASE 32 extern void diffcore_std(struct diff_options *); extern void diffcore_fix_diff_index(struct diff_options *); diff --git a/diffcore-delta.c b/diffcore-delta.c index ebe70fb068..c83d45a047 100644 --- a/diffcore-delta.c +++ b/diffcore-delta.c @@ -48,16 +48,16 @@ struct spanhash_top { static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig) { - struct spanhash_top *new; + struct spanhash_top *new_spanhash; int i; int osz = 1 << orig->alloc_log2; int sz = osz << 1; - new = xmalloc(st_add(sizeof(*orig), + new_spanhash = xmalloc(st_add(sizeof(*orig), st_mult(sizeof(struct spanhash), sz))); - new->alloc_log2 = orig->alloc_log2 + 1; - new->free = INITIAL_FREE(new->alloc_log2); - memset(new->data, 0, sizeof(struct spanhash) * sz); + new_spanhash->alloc_log2 = orig->alloc_log2 + 1; + new_spanhash->free = INITIAL_FREE(new_spanhash->alloc_log2); + memset(new_spanhash->data, 0, sizeof(struct spanhash) * sz); for (i = 0; i < osz; i++) { struct spanhash *o = &(orig->data[i]); int bucket; @@ -65,11 +65,11 @@ static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig) continue; bucket = o->hashval & (sz - 1); while (1) { - struct spanhash *h = &(new->data[bucket++]); + struct spanhash *h = &(new_spanhash->data[bucket++]); if (!h->cnt) { h->hashval = o->hashval; h->cnt = o->cnt; - new->free--; + new_spanhash->free--; break; } if (sz <= bucket) @@ -77,7 +77,7 @@ static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig) } } free(orig); - return new; + return new_spanhash; } static struct spanhash_top *add_spanhash(struct spanhash_top *top, diff --git a/diffcore-pickaxe.c b/diffcore-pickaxe.c index 9476bd2108..239ce5122b 100644 --- a/diffcore-pickaxe.c +++ b/diffcore-pickaxe.c @@ -124,13 +124,20 @@ static int pickaxe_match(struct diff_filepair *p, struct diff_options *o, mmfile_t mf1, mf2; int ret; - if (!o->pickaxe[0]) - return 0; - /* ignore unmerged */ if (!DIFF_FILE_VALID(p->one) && !DIFF_FILE_VALID(p->two)) return 0; + if (o->objfind) { + return (DIFF_FILE_VALID(p->one) && + oidset_contains(o->objfind, &p->one->oid)) || + (DIFF_FILE_VALID(p->two) && + oidset_contains(o->objfind, &p->two->oid)); + } + + if (!o->pickaxe[0]) + return 0; + if (o->flags.allow_textconv) { textconv_one = get_textconv(p->one); textconv_two = get_textconv(p->two); @@ -222,33 +229,34 @@ void diffcore_pickaxe(struct diff_options *o) if (opts & (DIFF_PICKAXE_REGEX | DIFF_PICKAXE_KIND_G)) { int cflags = REG_EXTENDED | REG_NEWLINE; - if (o->flags.pickaxe_ignore_case) + if (o->pickaxe_opts & DIFF_PICKAXE_IGNORE_CASE) cflags |= REG_ICASE; regcomp_or_die(®ex, needle, cflags); regexp = ®ex; - } else if (o->flags.pickaxe_ignore_case && - has_non_ascii(needle)) { - struct strbuf sb = STRBUF_INIT; - int cflags = REG_NEWLINE | REG_ICASE; - - basic_regex_quote_buf(&sb, needle); - regcomp_or_die(®ex, sb.buf, cflags); - strbuf_release(&sb); - regexp = ®ex; - } else { - kws = kwsalloc(o->flags.pickaxe_ignore_case - ? tolower_trans_tbl : NULL); - kwsincr(kws, needle, strlen(needle)); - kwsprep(kws); + } else if (opts & DIFF_PICKAXE_KIND_S) { + if (o->pickaxe_opts & DIFF_PICKAXE_IGNORE_CASE && + has_non_ascii(needle)) { + struct strbuf sb = STRBUF_INIT; + int cflags = REG_NEWLINE | REG_ICASE; + + basic_regex_quote_buf(&sb, needle); + regcomp_or_die(®ex, sb.buf, cflags); + strbuf_release(&sb); + regexp = ®ex; + } else { + kws = kwsalloc(o->pickaxe_opts & DIFF_PICKAXE_IGNORE_CASE + ? tolower_trans_tbl : NULL); + kwsincr(kws, needle, strlen(needle)); + kwsprep(kws); + } } - /* Might want to warn when both S and G are on; I don't care... */ pickaxe(&diff_queued_diff, o, regexp, kws, (opts & DIFF_PICKAXE_KIND_G) ? diff_grep : has_changes); if (regexp) regfree(regexp); - else + if (kws) kwsfree(kws); return; } diff --git a/diffcore-rename.c b/diffcore-rename.c index 245e999fe5..0b7e4989a8 100644 --- a/diffcore-rename.c +++ b/diffcore-rename.c @@ -57,8 +57,8 @@ static int add_rename_dst(struct diff_filespec *two) ALLOC_GROW(rename_dst, rename_dst_nr + 1, rename_dst_alloc); rename_dst_nr++; if (first < rename_dst_nr) - memmove(rename_dst + first + 1, rename_dst + first, - (rename_dst_nr - first - 1) * sizeof(*rename_dst)); + MOVE_ARRAY(rename_dst + first + 1, rename_dst + first, + rename_dst_nr - first - 1); rename_dst[first].two = alloc_filespec(two->path); fill_filespec(rename_dst[first].two, &two->oid, two->oid_valid, two->mode); @@ -98,8 +98,8 @@ static struct diff_rename_src *register_rename_src(struct diff_filepair *p) ALLOC_GROW(rename_src, rename_src_nr + 1, rename_src_alloc); rename_src_nr++; if (first < rename_src_nr) - memmove(rename_src + first + 1, rename_src + first, - (rename_src_nr - first - 1) * sizeof(*rename_src)); + MOVE_ARRAY(rename_src + first + 1, rename_src + first, + rename_src_nr - first - 1); rename_src[first].p = p; rename_src[first].score = score; return &(rename_src[first]); @@ -260,8 +260,8 @@ static unsigned int hash_filespec(struct diff_filespec *filespec) if (!filespec->oid_valid) { if (diff_populate_filespec(filespec, 0)) return 0; - hash_sha1_file(filespec->data, filespec->size, "blob", - filespec->oid.hash); + hash_object_file(filespec->data, filespec->size, "blob", + &filespec->oid); } return sha1hash(filespec->oid.hash); } @@ -231,12 +231,10 @@ int within_depth(const char *name, int namelen, * 1 along with { data, size } of the (possibly augmented) buffer * when successful. * - * Optionally updates the given sha1_stat with the given OID (when valid). + * Optionally updates the given oid_stat with the given OID (when valid). */ -static int do_read_blob(const struct object_id *oid, - struct sha1_stat *sha1_stat, - size_t *size_out, - char **data_out) +static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat, + size_t *size_out, char **data_out) { enum object_type type; unsigned long sz; @@ -251,9 +249,9 @@ static int do_read_blob(const struct object_id *oid, return -1; } - if (sha1_stat) { - memset(&sha1_stat->stat, 0, sizeof(sha1_stat->stat)); - hashcpy(sha1_stat->sha1, oid->hash); + if (oid_stat) { + memset(&oid_stat->stat, 0, sizeof(oid_stat->stat)); + oidcpy(&oid_stat->oid, oid); } if (sz == 0) { @@ -654,9 +652,8 @@ void add_exclude(const char *string, const char *base, static int read_skip_worktree_file_from_index(const struct index_state *istate, const char *path, - size_t *size_out, - char **data_out, - struct sha1_stat *sha1_stat) + size_t *size_out, char **data_out, + struct oid_stat *oid_stat) { int pos, len; @@ -667,7 +664,7 @@ static int read_skip_worktree_file_from_index(const struct index_state *istate, if (!ce_skip_worktree(istate->cache[pos])) return -1; - return do_read_blob(&istate->cache[pos]->oid, sha1_stat, size_out, data_out); + return do_read_blob(&istate->cache[pos]->oid, oid_stat, size_out, data_out); } /* @@ -747,8 +744,8 @@ static struct untracked_cache_dir *lookup_untracked(struct untracked_cache *uc, FLEX_ALLOC_MEM(d, name, name, len); ALLOC_GROW(dir->dirs, dir->dirs_nr + 1, dir->dirs_alloc); - memmove(dir->dirs + first + 1, dir->dirs + first, - (dir->dirs_nr - first) * sizeof(*dir->dirs)); + MOVE_ARRAY(dir->dirs + first + 1, dir->dirs + first, + dir->dirs_nr - first); dir->dirs_nr++; dir->dirs[first] = d; return d; @@ -774,7 +771,16 @@ static void invalidate_directory(struct untracked_cache *uc, struct untracked_cache_dir *dir) { int i; - uc->dir_invalidated++; + + /* + * Invalidation increment here is just roughly correct. If + * untracked_nr or any of dirs[].recurse is non-zero, we + * should increment dir_invalidated too. But that's more + * expensive to do. + */ + if (dir->valid) + uc->dir_invalidated++; + dir->valid = 0; dir->untracked_nr = 0; for (i = 0; i < dir->dirs_nr; i++) @@ -795,9 +801,8 @@ static int add_excludes_from_buffer(char *buf, size_t size, * ss_valid is non-zero, "ss" must contain good value as input. */ static int add_excludes(const char *fname, const char *base, int baselen, - struct exclude_list *el, - struct index_state *istate, - struct sha1_stat *sha1_stat) + struct exclude_list *el, struct index_state *istate, + struct oid_stat *oid_stat) { struct stat st; int r; @@ -815,16 +820,16 @@ static int add_excludes(const char *fname, const char *base, int baselen, return -1; r = read_skip_worktree_file_from_index(istate, fname, &size, &buf, - sha1_stat); + oid_stat); if (r != 1) return r; } else { size = xsize_t(st.st_size); if (size == 0) { - if (sha1_stat) { - fill_stat_data(&sha1_stat->stat, &st); - hashcpy(sha1_stat->sha1, EMPTY_BLOB_SHA1_BIN); - sha1_stat->valid = 1; + if (oid_stat) { + fill_stat_data(&oid_stat->stat, &st); + oidcpy(&oid_stat->oid, &empty_blob_oid); + oid_stat->valid = 1; } close(fd); return 0; @@ -837,22 +842,23 @@ static int add_excludes(const char *fname, const char *base, int baselen, } buf[size++] = '\n'; close(fd); - if (sha1_stat) { + if (oid_stat) { int pos; - if (sha1_stat->valid && - !match_stat_data_racy(istate, &sha1_stat->stat, &st)) + if (oid_stat->valid && + !match_stat_data_racy(istate, &oid_stat->stat, &st)) ; /* no content change, ss->sha1 still good */ else if (istate && (pos = index_name_pos(istate, fname, strlen(fname))) >= 0 && !ce_stage(istate->cache[pos]) && ce_uptodate(istate->cache[pos]) && !would_convert_to_git(istate, fname)) - hashcpy(sha1_stat->sha1, - istate->cache[pos]->oid.hash); + oidcpy(&oid_stat->oid, + &istate->cache[pos]->oid); else - hash_sha1_file(buf, size, "blob", sha1_stat->sha1); - fill_stat_data(&sha1_stat->stat, &st); - sha1_stat->valid = 1; + hash_object_file(buf, size, "blob", + &oid_stat->oid); + fill_stat_data(&oid_stat->stat, &st); + oid_stat->valid = 1; } } @@ -930,7 +936,7 @@ struct exclude_list *add_exclude_list(struct dir_struct *dir, * Used to set up core.excludesfile and .git/info/exclude lists. */ static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname, - struct sha1_stat *sha1_stat) + struct oid_stat *oid_stat) { struct exclude_list *el; /* @@ -941,7 +947,7 @@ static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname, if (!dir->untracked) dir->unmanaged_exclude_files++; el = add_exclude_list(dir, EXC_FILE, fname); - if (add_excludes(fname, "", 0, el, NULL, sha1_stat) < 0) + if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0) die("cannot use %s as an exclude file", fname); } @@ -1180,7 +1186,7 @@ static void prep_exclude(struct dir_struct *dir, while (current < baselen) { const char *cp; - struct sha1_stat sha1_stat; + struct oid_stat oid_stat; stk = xcalloc(1, sizeof(*stk)); if (current < 0) { @@ -1223,8 +1229,8 @@ static void prep_exclude(struct dir_struct *dir, } /* Try to read per-directory file */ - hashclr(sha1_stat.sha1); - sha1_stat.valid = 0; + oidclr(&oid_stat.oid); + oid_stat.valid = 0; if (dir->exclude_per_dir && /* * If we know that no files have been added in @@ -1252,7 +1258,7 @@ static void prep_exclude(struct dir_struct *dir, strbuf_addstr(&sb, dir->exclude_per_dir); el->src = strbuf_detach(&sb, NULL); add_excludes(el->src, el->src, stk->baselen, el, istate, - untracked ? &sha1_stat : NULL); + untracked ? &oid_stat : NULL); } /* * NEEDSWORK: when untracked cache is enabled, prep_exclude() @@ -1269,9 +1275,9 @@ static void prep_exclude(struct dir_struct *dir, * order, though, if you do that. */ if (untracked && - hashcmp(sha1_stat.sha1, untracked->exclude_sha1)) { + hashcmp(oid_stat.oid.hash, untracked->exclude_sha1)) { invalidate_gitignore(dir->untracked, untracked); - hashcpy(untracked->exclude_sha1, sha1_stat.sha1); + hashcpy(untracked->exclude_sha1, oid_stat.oid.hash); } dir->exclude_stack = stk; current = stk->baselen; @@ -1773,7 +1779,7 @@ static enum path_treatment treat_path(struct dir_struct *dir, if (!de) return treat_path_fast(dir, untracked, cdir, istate, path, baselen, pathspec); - if (is_dot_or_dotdot(de->d_name) || !strcmp(de->d_name, ".git")) + if (is_dot_or_dotdot(de->d_name) || !fspathcmp(de->d_name, ".git")) return path_none; strbuf_setlen(path, baselen); strbuf_addstr(path, de->d_name); @@ -1809,24 +1815,19 @@ static int valid_cached_dir(struct dir_struct *dir, */ refresh_fsmonitor(istate); if (!(dir->untracked->use_fsmonitor && untracked->valid)) { - if (stat(path->len ? path->buf : ".", &st)) { - invalidate_directory(dir->untracked, untracked); + if (lstat(path->len ? path->buf : ".", &st)) { memset(&untracked->stat_data, 0, sizeof(untracked->stat_data)); return 0; } if (!untracked->valid || match_stat_data_racy(istate, &untracked->stat_data, &st)) { - if (untracked->valid) - invalidate_directory(dir->untracked, untracked); fill_stat_data(&untracked->stat_data, &st); return 0; } } - if (untracked->check_only != !!check_only) { - invalidate_directory(dir->untracked, untracked); + if (untracked->check_only != !!check_only) return 0; - } /* * prep_exclude will be called eventually on this directory, @@ -1853,13 +1854,20 @@ static int open_cached_dir(struct cached_dir *cdir, struct strbuf *path, int check_only) { + const char *c_path; + memset(cdir, 0, sizeof(*cdir)); cdir->untracked = untracked; if (valid_cached_dir(dir, untracked, istate, path, check_only)) return 0; - cdir->fdir = opendir(path->len ? path->buf : "."); - if (dir->untracked) + c_path = path->len ? path->buf : "."; + cdir->fdir = opendir(c_path); + if (!cdir->fdir) + warning_errno(_("could not open directory '%s'"), c_path); + if (dir->untracked) { + invalidate_directory(dir->untracked, untracked); dir->untracked->dir_opened++; + } if (!cdir->fdir) return -1; return 0; @@ -2164,8 +2172,13 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d const struct pathspec *pathspec) { struct untracked_cache_dir *root; + static int untracked_cache_disabled = -1; - if (!dir->untracked || getenv("GIT_DISABLE_UNTRACKED_CACHE")) + if (!dir->untracked) + return NULL; + if (untracked_cache_disabled < 0) + untracked_cache_disabled = git_env_bool("GIT_DISABLE_UNTRACKED_CACHE", 0); + if (untracked_cache_disabled) return NULL; /* @@ -2228,13 +2241,13 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d /* Validate $GIT_DIR/info/exclude and core.excludesfile */ root = dir->untracked->root; - if (hashcmp(dir->ss_info_exclude.sha1, - dir->untracked->ss_info_exclude.sha1)) { + if (oidcmp(&dir->ss_info_exclude.oid, + &dir->untracked->ss_info_exclude.oid)) { invalidate_gitignore(dir->untracked, root); dir->untracked->ss_info_exclude = dir->ss_info_exclude; } - if (hashcmp(dir->ss_excludes_file.sha1, - dir->untracked->ss_excludes_file.sha1)) { + if (oidcmp(&dir->ss_excludes_file.oid, + &dir->untracked->ss_excludes_file.oid)) { invalidate_gitignore(dir->untracked, root); dir->untracked->ss_excludes_file = dir->ss_excludes_file; } @@ -2248,6 +2261,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, const char *path, int len, const struct pathspec *pathspec) { struct untracked_cache_dir *untracked; + uint64_t start = getnanotime(); if (has_symlink_leading_path(path, len)) return dir->nr; @@ -2286,8 +2300,14 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, dir->nr = i; } + trace_performance_since(start, "read directory %.*s", len, path); if (dir->untracked) { + static int force_untracked_cache = -1; static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS); + + if (force_untracked_cache < 0) + force_untracked_cache = + git_env_bool("GIT_FORCE_UNTRACKED_CACHE", 0); trace_printf_key(&trace_untracked_stats, "node creation: %u\n" "gitignore invalidation: %u\n" @@ -2297,7 +2317,8 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, dir->untracked->gitignore_invalidated, dir->untracked->dir_invalidated, dir->untracked->dir_opened); - if (dir->untracked == istate->untracked && + if (force_untracked_cache && + dir->untracked == istate->untracked && (dir->untracked->dir_opened || dir->untracked->gitignore_invalidated || dir->untracked->dir_invalidated)) @@ -2638,8 +2659,8 @@ void write_untracked_extension(struct strbuf *out, struct untracked_cache *untra FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len); stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat); stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat); - hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.sha1); - hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.sha1); + hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash); + hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash); ouc->dir_flags = htonl(untracked->dir_flags); varint_len = encode_varint(untracked->ident.len, varbuf); @@ -2816,13 +2837,12 @@ static void read_sha1(size_t pos, void *cb) rd->data += 20; } -static void load_sha1_stat(struct sha1_stat *sha1_stat, - const unsigned char *data, - const unsigned char *sha1) +static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data, + const unsigned char *sha1) { - stat_data_from_disk(&sha1_stat->stat, data); - hashcpy(sha1_stat->sha1, sha1); - sha1_stat->valid = 1; + stat_data_from_disk(&oid_stat->stat, data); + hashcpy(oid_stat->oid.hash, sha1); + oid_stat->valid = 1; } struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz) @@ -2850,12 +2870,12 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long uc = xcalloc(1, sizeof(*uc)); strbuf_init(&uc->ident, ident_len); strbuf_add(&uc->ident, ident, ident_len); - load_sha1_stat(&uc->ss_info_exclude, - next + ouc_offset(info_exclude_stat), - next + ouc_offset(info_exclude_sha1)); - load_sha1_stat(&uc->ss_excludes_file, - next + ouc_offset(excludes_file_stat), - next + ouc_offset(excludes_file_sha1)); + load_oid_stat(&uc->ss_info_exclude, + next + ouc_offset(info_exclude_stat), + next + ouc_offset(info_exclude_sha1)); + load_oid_stat(&uc->ss_excludes_file, + next + ouc_offset(excludes_file_stat), + next + ouc_offset(excludes_file_sha1)); uc->dir_flags = get_be32(next + ouc_offset(dir_flags)); exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir); uc->exclude_per_dir = xstrdup(exclude_per_dir); @@ -2968,10 +2988,12 @@ static int invalidate_one_component(struct untracked_cache *uc, } void untracked_cache_invalidate_path(struct index_state *istate, - const char *path) + const char *path, int safe_path) { if (!istate->untracked || !istate->untracked->root) return; + if (!safe_path && !verify_path(path)) + return; invalidate_one_component(istate->untracked, istate->untracked->root, path, strlen(path)); } @@ -2979,13 +3001,13 @@ void untracked_cache_invalidate_path(struct index_state *istate, void untracked_cache_remove_from_index(struct index_state *istate, const char *path) { - untracked_cache_invalidate_path(istate, path); + untracked_cache_invalidate_path(istate, path, 1); } void untracked_cache_add_to_index(struct index_state *istate, const char *path) { - untracked_cache_invalidate_path(istate, path); + untracked_cache_invalidate_path(istate, path, 1); } /* Update gitfile and core.worktree setting to connect work tree and git dir */ @@ -74,9 +74,9 @@ struct exclude_list_group { struct exclude_list *el; }; -struct sha1_stat { +struct oid_stat { struct stat_data stat; - unsigned char sha1[20]; + struct object_id oid; int valid; }; @@ -124,8 +124,8 @@ struct untracked_cache_dir { }; struct untracked_cache { - struct sha1_stat ss_info_exclude; - struct sha1_stat ss_excludes_file; + struct oid_stat ss_info_exclude; + struct oid_stat ss_excludes_file; const char *exclude_per_dir; struct strbuf ident; /* @@ -195,8 +195,8 @@ struct dir_struct { /* Enable untracked file cache if set */ struct untracked_cache *untracked; - struct sha1_stat ss_info_exclude; - struct sha1_stat ss_excludes_file; + struct oid_stat ss_info_exclude; + struct oid_stat ss_excludes_file; unsigned unmanaged_exclude_files; }; @@ -350,7 +350,7 @@ static inline int dir_path_match(const struct dir_entry *ent, int cmp_dir_entry(const void *p1, const void *p2); int check_dir_entry_contains(const struct dir_entry *out, const struct dir_entry *in); -void untracked_cache_invalidate_path(struct index_state *, const char *); +void untracked_cache_invalidate_path(struct index_state *, const char *, int safe_path); void untracked_cache_remove_from_index(struct index_state *, const char *); void untracked_cache_add_to_index(struct index_state *, const char *); @@ -85,12 +85,12 @@ static int create_file(const char *path, unsigned int mode) static void *read_blob_entry(const struct cache_entry *ce, unsigned long *size) { enum object_type type; - void *new = read_sha1_file(ce->oid.hash, &type, size); + void *blob_data = read_sha1_file(ce->oid.hash, &type, size); - if (new) { + if (blob_data) { if (type == OBJ_BLOB) - return new; - free(new); + return blob_data; + free(blob_data); } return NULL; } @@ -256,7 +256,7 @@ static int write_entry(struct cache_entry *ce, unsigned int ce_mode_s_ifmt = ce->ce_mode & S_IFMT; struct delayed_checkout *dco = state->delayed_checkout; int fd, ret, fstat_done = 0; - char *new; + char *new_blob; struct strbuf buf = STRBUF_INIT; unsigned long size; ssize_t wrote; @@ -276,8 +276,8 @@ static int write_entry(struct cache_entry *ce, switch (ce_mode_s_ifmt) { case S_IFLNK: - new = read_blob_entry(ce, &size); - if (!new) + new_blob = read_blob_entry(ce, &size); + if (!new_blob) return error("unable to read sha1 file of %s (%s)", path, oid_to_hex(&ce->oid)); @@ -288,8 +288,8 @@ static int write_entry(struct cache_entry *ce, if (!has_symlinks || to_tempfile) goto write_file_entry; - ret = symlink(new, path); - free(new); + ret = symlink(new_blob, path); + free(new_blob); if (ret) return error_errno("unable to create symlink %s", path); break; @@ -300,11 +300,11 @@ static int write_entry(struct cache_entry *ce, * bother reading it at all. */ if (dco && dco->state == CE_RETRY) { - new = NULL; + new_blob = NULL; size = 0; } else { - new = read_blob_entry(ce, &size); - if (!new) + new_blob = read_blob_entry(ce, &size); + if (!new_blob) return error("unable to read sha1 file of %s (%s)", path, oid_to_hex(&ce->oid)); } @@ -313,18 +313,18 @@ static int write_entry(struct cache_entry *ce, * Convert from git internal format to working tree format */ if (dco && dco->state != CE_NO_DELAY) { - ret = async_convert_to_working_tree(ce->name, new, + ret = async_convert_to_working_tree(ce->name, new_blob, size, &buf, dco); if (ret && string_list_has_string(&dco->paths, ce->name)) { - free(new); + free(new_blob); goto delayed; } } else - ret = convert_to_working_tree(ce->name, new, size, &buf); + ret = convert_to_working_tree(ce->name, new_blob, size, &buf); if (ret) { - free(new); - new = strbuf_detach(&buf, &newsize); + free(new_blob); + new_blob = strbuf_detach(&buf, &newsize); size = newsize; } /* @@ -336,15 +336,15 @@ static int write_entry(struct cache_entry *ce, write_file_entry: fd = open_output_fd(path, ce, to_tempfile); if (fd < 0) { - free(new); + free(new_blob); return error_errno("unable to create file %s", path); } - wrote = write_in_full(fd, new, size); + wrote = write_in_full(fd, new_blob, size); if (!to_tempfile) fstat_done = fstat_output(fd, state, &st); close(fd); - free(new); + free(new_blob); if (wrote < 0) return error("unable to write file %s", path); break; diff --git a/environment.c b/environment.c index 63ac38a46f..d6dd64662c 100644 --- a/environment.c +++ b/environment.c @@ -27,6 +27,8 @@ int warn_ambiguous_refs = 1; int warn_on_object_refname_ambiguity = 1; int ref_paranoia = -1; int repository_format_precious_objects; +char *repository_format_partial_clone; +const char *core_partial_clone_filter_default; const char *git_commit_encoding; const char *git_log_output_encoding; const char *apply_default_whitespace; @@ -49,7 +51,7 @@ enum auto_crlf auto_crlf = AUTO_CRLF_FALSE; int check_replace_refs = 1; char *git_replace_ref_base; enum eol core_eol = EOL_UNSET; -enum safe_crlf safe_crlf = SAFE_CRLF_WARN; +int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN; unsigned whitespace_rule_cfg = WS_DEFAULT_RULE; enum branch_track git_branch_track = BRANCH_TRACK_REMOTE; enum rebase_setup_type autorebase = AUTOREBASE_NEVER; @@ -98,7 +100,7 @@ int ignore_untracked_cache_config; /* This is set by setup_git_dir_gently() and/or git_default_config() */ char *git_work_tree_cfg; -static char *namespace; +static char *git_namespace; static const char *super_prefix; @@ -156,8 +158,8 @@ void setup_git_env(void) free(git_replace_ref_base); git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base : "refs/replace/"); - free(namespace); - namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT)); + free(git_namespace); + git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT)); shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT); if (shallow_file) set_alternate_shallow_file(shallow_file, 0); @@ -191,9 +193,9 @@ const char *get_git_common_dir(void) const char *get_git_namespace(void) { - if (!namespace) + if (!git_namespace) BUG("git environment hasn't been setup"); - return namespace; + return git_namespace; } const char *strip_namespace(const char *namespaced_ref) @@ -247,7 +249,7 @@ char *get_object_directory(void) return the_repository->objectdir; } -int odb_mkstemp(struct strbuf *template, const char *pattern) +int odb_mkstemp(struct strbuf *temp_filename, const char *pattern) { int fd; /* @@ -255,16 +257,16 @@ int odb_mkstemp(struct strbuf *template, const char *pattern) * restrictive except to remove write permission. */ int mode = 0444; - git_path_buf(template, "objects/%s", pattern); - fd = git_mkstemp_mode(template->buf, mode); + git_path_buf(temp_filename, "objects/%s", pattern); + fd = git_mkstemp_mode(temp_filename->buf, mode); if (0 <= fd) return fd; /* slow path */ - /* some mkstemp implementations erase template on failure */ - git_path_buf(template, "objects/%s", pattern); - safe_create_leading_directories(template->buf); - return xmkstemp_mode(template->buf, mode); + /* some mkstemp implementations erase temp_filename on failure */ + git_path_buf(temp_filename, "objects/%s", pattern); + safe_create_leading_directories(temp_filename->buf); + return xmkstemp_mode(temp_filename->buf, mode); } int odb_pack_keep(const char *name) diff --git a/fast-import.c b/fast-import.c index b70ac025e0..b5db5d20b1 100644 --- a/fast-import.c +++ b/fast-import.c @@ -316,7 +316,7 @@ static struct atom_str **atom_table; /* The .pack file being generated */ static struct pack_idx_option pack_idx_opts; static unsigned int pack_id; -static struct sha1file *pack_file; +static struct hashfile *pack_file; static struct packed_git *pack_data; static struct packed_git **all_packs; static off_t pack_size; @@ -905,12 +905,12 @@ static void start_packfile(void) p->pack_fd = pack_fd; p->do_not_close = 1; - pack_file = sha1fd(pack_fd, p->pack_name); + pack_file = hashfd(pack_fd, p->pack_name); hdr.hdr_signature = htonl(PACK_SIGNATURE); hdr.hdr_version = htonl(2); hdr.hdr_entries = 0; - sha1write(pack_file, &hdr, sizeof(hdr)); + hashwrite(pack_file, &hdr, sizeof(hdr)); pack_data = p; pack_size = sizeof(hdr); @@ -1016,7 +1016,7 @@ static void end_packfile(void) struct tag *t; close_pack_windows(pack_data); - sha1close(pack_file, cur_pack_oid.hash, 0); + hashclose(pack_file, cur_pack_oid.hash, 0); fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1, pack_data->pack_name, object_count, cur_pack_oid.hash, pack_size); @@ -1092,15 +1092,15 @@ static int store_object( unsigned char hdr[96]; struct object_id oid; unsigned long hdrlen, deltalen; - git_SHA_CTX c; + git_hash_ctx c; git_zstream s; hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu", - typename(type), (unsigned long)dat->len) + 1; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, hdrlen); - git_SHA1_Update(&c, dat->buf, dat->len); - git_SHA1_Final(oid.hash, &c); + type_name(type), (unsigned long)dat->len) + 1; + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, hdrlen); + the_hash_algo->update_fn(&c, dat->buf, dat->len); + the_hash_algo->final_fn(oid.hash, &c); if (oidout) oidcpy(oidout, &oid); @@ -1118,11 +1118,13 @@ static int store_object( return 1; } - if (last && last->data.buf && last->depth < max_depth && dat->len > 20) { + if (last && last->data.buf && last->depth < max_depth + && dat->len > the_hash_algo->rawsz) { + delta_count_attempts_by_type[type]++; delta = diff_delta(last->data.buf, last->data.len, dat->buf, dat->len, - &deltalen, dat->len - 20); + &deltalen, dat->len - the_hash_algo->rawsz); } else delta = NULL; @@ -1180,23 +1182,23 @@ static int store_object( hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr), OBJ_OFS_DELTA, deltalen); - sha1write(pack_file, hdr, hdrlen); + hashwrite(pack_file, hdr, hdrlen); pack_size += hdrlen; hdr[pos] = ofs & 127; while (ofs >>= 7) hdr[--pos] = 128 | (--ofs & 127); - sha1write(pack_file, hdr + pos, sizeof(hdr) - pos); + hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos); pack_size += sizeof(hdr) - pos; } else { e->depth = 0; hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr), type, dat->len); - sha1write(pack_file, hdr, hdrlen); + hashwrite(pack_file, hdr, hdrlen); pack_size += hdrlen; } - sha1write(pack_file, out, s.total_out); + hashwrite(pack_file, out, s.total_out); pack_size += s.total_out; e->idx.crc32 = crc32_end(pack_file); @@ -1215,9 +1217,9 @@ static int store_object( return 0; } -static void truncate_pack(struct sha1file_checkpoint *checkpoint) +static void truncate_pack(struct hashfile_checkpoint *checkpoint) { - if (sha1file_truncate(pack_file, checkpoint)) + if (hashfile_truncate(pack_file, checkpoint)) die_errno("cannot truncate pack to skip duplicate"); pack_size = checkpoint->offset; } @@ -1231,9 +1233,9 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) struct object_id oid; unsigned long hdrlen; off_t offset; - git_SHA_CTX c; + git_hash_ctx c; git_zstream s; - struct sha1file_checkpoint checkpoint; + struct hashfile_checkpoint checkpoint; int status = Z_OK; /* Determine if we should auto-checkpoint. */ @@ -1241,13 +1243,13 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) || (pack_size + 60 + len) < pack_size) cycle_packfile(); - sha1file_checkpoint(pack_file, &checkpoint); + hashfile_checkpoint(pack_file, &checkpoint); offset = checkpoint.offset; hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1; - git_SHA1_Init(&c); - git_SHA1_Update(&c, out_buf, hdrlen); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, out_buf, hdrlen); crc32_begin(pack_file); @@ -1265,7 +1267,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) if (!n && feof(stdin)) die("EOF in data (%" PRIuMAX " bytes remaining)", len); - git_SHA1_Update(&c, in_buf, n); + the_hash_algo->update_fn(&c, in_buf, n); s.next_in = in_buf; s.avail_in = n; len -= n; @@ -1275,7 +1277,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) if (!s.avail_out || status == Z_STREAM_END) { size_t n = s.next_out - out_buf; - sha1write(pack_file, out_buf, n); + hashwrite(pack_file, out_buf, n); pack_size += n; s.next_out = out_buf; s.avail_out = out_sz; @@ -1291,7 +1293,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) } } git_deflate_end(&s); - git_SHA1_Final(oid.hash, &c); + the_hash_algo->final_fn(oid.hash, &c); if (oidout) oidcpy(oidout, &oid); @@ -1350,25 +1352,25 @@ static void *gfi_unpack_entry( { enum object_type type; struct packed_git *p = all_packs[oe->pack_id]; - if (p == pack_data && p->pack_size < (pack_size + 20)) { + if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) { /* The object is stored in the packfile we are writing to * and we have modified it since the last time we scanned * back to read a previously written object. If an old - * window covered [p->pack_size, p->pack_size + 20) its + * window covered [p->pack_size, p->pack_size + rawsz) its * data is stale and is not valid. Closing all windows * and updating the packfile length ensures we can read * the newly written data. */ close_pack_windows(p); - sha1flush(pack_file); + hashflush(pack_file); - /* We have to offer 20 bytes additional on the end of + /* We have to offer rawsz bytes additional on the end of * the packfile as the core unpacker code assumes the * footer is present at the file end and must promise - * at least 20 bytes within any window it maps. But + * at least rawsz bytes within any window it maps. But * we don't actually create the footer here. */ - p->pack_size = pack_size + 20; + p->pack_size = pack_size + the_hash_algo->rawsz; } return unpack_entry(p, oe->idx.offset, &type, sizep); } @@ -2204,7 +2206,7 @@ static void construct_path_with_fanout(const char *hex_sha1, unsigned char fanout, char *path) { unsigned int i = 0, j = 0; - if (fanout >= 20) + if (fanout >= the_hash_algo->rawsz) die("Too large fanout (%u)", fanout); while (fanout) { path[i++] = hex_sha1[j++]; @@ -2212,8 +2214,8 @@ static void construct_path_with_fanout(const char *hex_sha1, path[i++] = '/'; fanout--; } - memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j); - path[i + GIT_SHA1_HEXSZ - j] = '\0'; + memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j); + path[i + the_hash_algo->hexsz - j] = '\0'; } static uintmax_t do_change_note_fanout( @@ -2421,7 +2423,7 @@ static void file_change_m(const char *p, struct branch *b) else if (oe) { if (oe->type != OBJ_COMMIT) die("Not a commit (actually a %s): %s", - typename(oe->type), command_buf.buf); + type_name(oe->type), command_buf.buf); } /* * Accept the sha1 without checking; it expected to be in @@ -2448,7 +2450,7 @@ static void file_change_m(const char *p, struct branch *b) command_buf.buf); if (type != expected) die("Not a %s (actually a %s): %s", - typename(expected), typename(type), + type_name(expected), type_name(type), command_buf.buf); } @@ -2599,14 +2601,14 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa } else if (oe) { if (oe->type != OBJ_BLOB) die("Not a blob (actually a %s): %s", - typename(oe->type), command_buf.buf); + type_name(oe->type), command_buf.buf); } else if (!is_null_oid(&oid)) { enum object_type type = sha1_object_info(oid.hash, NULL); if (type < 0) die("Blob not found: %s", command_buf.buf); if (type != OBJ_BLOB) die("Not a blob (actually a %s): %s", - typename(type), command_buf.buf); + type_name(type), command_buf.buf); } construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path); @@ -2914,7 +2916,7 @@ static void parse_new_tag(const char *arg) "object %s\n" "type %s\n" "tag %s\n", - oid_to_hex(&oid), typename(type), t->name); + oid_to_hex(&oid), type_name(type), t->name); if (tagger) strbuf_addf(&new_data, "tagger %s\n", tagger); @@ -2985,10 +2987,10 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid) die("Can't read object %s", oid_to_hex(oid)); if (type != OBJ_BLOB) die("Object %s is a %s but a blob was expected.", - oid_to_hex(oid), typename(type)); + oid_to_hex(oid), type_name(type)); strbuf_reset(&line); strbuf_addf(&line, "%s %s %lu\n", oid_to_hex(oid), - typename(type), size); + type_name(type), size); cat_blob_write(line.buf, line.len); strbuf_release(&line); cat_blob_write(buf, size); @@ -3003,7 +3005,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid) static void parse_get_mark(const char *p) { - struct object_entry *oe = oe; + struct object_entry *oe; char output[GIT_MAX_HEXSZ + 2]; /* get-mark SP <object> LF */ @@ -3020,7 +3022,7 @@ static void parse_get_mark(const char *p) static void parse_cat_blob(const char *p) { - struct object_entry *oe = oe; + struct object_entry *oe; struct object_id oid; /* cat-blob SP <object> LF */ diff --git a/fetch-object.c b/fetch-object.c new file mode 100644 index 0000000000..853624f811 --- /dev/null +++ b/fetch-object.c @@ -0,0 +1,45 @@ +#include "cache.h" +#include "packfile.h" +#include "pkt-line.h" +#include "strbuf.h" +#include "transport.h" +#include "fetch-object.h" + +static void fetch_refs(const char *remote_name, struct ref *ref) +{ + struct remote *remote; + struct transport *transport; + int original_fetch_if_missing = fetch_if_missing; + + fetch_if_missing = 0; + remote = remote_get(remote_name); + if (!remote->url[0]) + die(_("Remote with no URL")); + transport = transport_get(remote, remote->url[0]); + + transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); + transport_set_option(transport, TRANS_OPT_NO_DEPENDENTS, "1"); + transport_fetch_refs(transport, ref); + fetch_if_missing = original_fetch_if_missing; +} + +void fetch_object(const char *remote_name, const unsigned char *sha1) +{ + struct ref *ref = alloc_ref(sha1_to_hex(sha1)); + hashcpy(ref->old_oid.hash, sha1); + fetch_refs(remote_name, ref); +} + +void fetch_objects(const char *remote_name, const struct oid_array *to_fetch) +{ + struct ref *ref = NULL; + int i; + + for (i = 0; i < to_fetch->nr; i++) { + struct ref *new_ref = alloc_ref(oid_to_hex(&to_fetch->oid[i])); + oidcpy(&new_ref->old_oid, &to_fetch->oid[i]); + new_ref->next = ref; + ref = new_ref; + } + fetch_refs(remote_name, ref); +} diff --git a/fetch-object.h b/fetch-object.h new file mode 100644 index 0000000000..4b269d07ed --- /dev/null +++ b/fetch-object.h @@ -0,0 +1,11 @@ +#ifndef FETCH_OBJECT_H +#define FETCH_OBJECT_H + +#include "sha1-array.h" + +extern void fetch_object(const char *remote_name, const unsigned char *sha1); + +extern void fetch_objects(const char *remote_name, + const struct oid_array *to_fetch); + +#endif diff --git a/fetch-pack.c b/fetch-pack.c index 9f6b07ad91..1d6117565c 100644 --- a/fetch-pack.c +++ b/fetch-pack.c @@ -29,6 +29,7 @@ static int deepen_not_ok; static int fetch_fsck_objects = -1; static int transfer_fsck_objects = -1; static int agent_supported; +static int server_supports_filtering; static struct lock_file shallow_lock; static const char *alternate_shallow_file; @@ -260,8 +261,8 @@ static enum ack_type get_ack(int fd, struct object_id *result_oid) char *line = packet_read_line(fd, &len); const char *arg; - if (!len) - die(_("git fetch-pack: expected ACK/NAK, got EOF")); + if (!line) + die(_("git fetch-pack: expected ACK/NAK, got a flush packet")); if (!strcmp(line, "NAK")) return NAK; if (skip_prefix(line, "ACK ", &arg)) { @@ -379,6 +380,8 @@ static int find_common(struct fetch_pack_args *args, if (deepen_not_ok) strbuf_addstr(&c, " deepen-not"); if (agent_supported) strbuf_addf(&c, " agent=%s", git_user_agent_sanitized()); + if (args->filter_options.choice) + strbuf_addstr(&c, " filter"); packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf); strbuf_release(&c); } else @@ -407,6 +410,9 @@ static int find_common(struct fetch_pack_args *args, packet_buf_write(&req_buf, "deepen-not %s", s->string); } } + if (server_supports_filtering && args->filter_options.choice) + packet_buf_write(&req_buf, "filter %s", + args->filter_options.filter_spec); packet_buf_flush(&req_buf); state_len = req_buf.len; @@ -450,6 +456,8 @@ static int find_common(struct fetch_pack_args *args, flushes = 0; retval = -1; + if (args->no_dependents) + goto done; while ((oid = get_rev())) { packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid)); print_verbose(args, "have %s", oid_to_hex(oid)); @@ -709,6 +717,7 @@ static int everything_local(struct fetch_pack_args *args, { struct ref *ref; int retval; + int old_save_commit_buffer = save_commit_buffer; timestamp_t cutoff = 0; save_commit_buffer = 0; @@ -735,29 +744,31 @@ static int everything_local(struct fetch_pack_args *args, } } - if (!args->deepen) { - for_each_ref(mark_complete_oid, NULL); - for_each_cached_alternate(mark_alternate_complete); - commit_list_sort_by_date(&complete); - if (cutoff) - mark_recent_complete_commits(args, cutoff); - } + if (!args->no_dependents) { + if (!args->deepen) { + for_each_ref(mark_complete_oid, NULL); + for_each_cached_alternate(mark_alternate_complete); + commit_list_sort_by_date(&complete); + if (cutoff) + mark_recent_complete_commits(args, cutoff); + } - /* - * Mark all complete remote refs as common refs. - * Don't mark them common yet; the server has to be told so first. - */ - for (ref = *refs; ref; ref = ref->next) { - struct object *o = deref_tag(lookup_object(ref->old_oid.hash), - NULL, 0); + /* + * Mark all complete remote refs as common refs. + * Don't mark them common yet; the server has to be told so first. + */ + for (ref = *refs; ref; ref = ref->next) { + struct object *o = deref_tag(lookup_object(ref->old_oid.hash), + NULL, 0); - if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE)) - continue; + if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE)) + continue; - if (!(o->flags & SEEN)) { - rev_list_push((struct commit *)o, COMMON_REF | SEEN); + if (!(o->flags & SEEN)) { + rev_list_push((struct commit *)o, COMMON_REF | SEEN); - mark_common((struct commit *)o, 1, 1); + mark_common((struct commit *)o, 1, 1); + } } } @@ -777,6 +788,9 @@ static int everything_local(struct fetch_pack_args *args, print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote), ref->name); } + + save_commit_buffer = old_save_commit_buffer; + return retval; } @@ -833,7 +847,7 @@ static int get_pack(struct fetch_pack_args *args, argv_array_push(&cmd.args, alternate_shallow_file); } - if (do_keep) { + if (do_keep || args->from_promisor) { if (pack_lockfile) cmd.out = -1; cmd_name = "index-pack"; @@ -843,7 +857,7 @@ static int get_pack(struct fetch_pack_args *args, argv_array_push(&cmd.args, "-v"); if (args->use_thin_pack) argv_array_push(&cmd.args, "--fix-thin"); - if (args->lock_pack || unpack_limit) { + if (do_keep && (args->lock_pack || unpack_limit)) { char hostname[HOST_NAME_MAX + 1]; if (xgethostname(hostname, sizeof(hostname))) xsnprintf(hostname, sizeof(hostname), "localhost"); @@ -853,6 +867,8 @@ static int get_pack(struct fetch_pack_args *args, } if (args->check_self_contained_and_connected) argv_array_push(&cmd.args, "--check-self-contained-and-connected"); + if (args->from_promisor) + argv_array_push(&cmd.args, "--promisor"); } else { cmd_name = "unpack-objects"; @@ -870,8 +886,17 @@ static int get_pack(struct fetch_pack_args *args, ? fetch_fsck_objects : transfer_fsck_objects >= 0 ? transfer_fsck_objects - : 0) - argv_array_push(&cmd.args, "--strict"); + : 0) { + if (args->from_promisor) + /* + * We cannot use --strict in index-pack because it + * checks both broken objects and links, but we only + * want to check for broken objects. + */ + argv_array_push(&cmd.args, "--fsck-objects"); + else + argv_array_push(&cmd.args, "--strict"); + } cmd.in = demux.out; cmd.git_cmd = 1; @@ -964,6 +989,13 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args, else prefer_ofs_delta = 0; + if (server_supports("filter")) { + server_supports_filtering = 1; + print_verbose(args, _("Server supports filter")); + } else if (args->filter_options.choice) { + warning("filtering not recognized by server, ignoring"); + } + if ((agent_feature = server_feature_value("agent", &agent_len))) { agent_supported = 1; if (agent_len) diff --git a/fetch-pack.h b/fetch-pack.h index b6aeb43a8e..3e224a1822 100644 --- a/fetch-pack.h +++ b/fetch-pack.h @@ -3,6 +3,7 @@ #include "string-list.h" #include "run-command.h" +#include "list-objects-filter-options.h" struct oid_array; @@ -12,6 +13,7 @@ struct fetch_pack_args { int depth; const char *deepen_since; const struct string_list *deepen_not; + struct list_objects_filter_options filter_options; unsigned deepen_relative:1; unsigned quiet:1; unsigned keep_pack:1; @@ -29,6 +31,14 @@ struct fetch_pack_args { unsigned cloning:1; unsigned update_shallow:1; unsigned deepen:1; + unsigned from_promisor:1; + + /* + * If 1, fetch_pack() will also not modify any object flags. + * This allows fetch_pack() to safely be called by any function, + * regardless of which object flags it uses (if any). + */ + unsigned no_dependents:1; }; /* @@ -821,7 +821,7 @@ static int fsck_tag_buffer(struct tag *tag, const char *data, ret = report(options, &tag->object, FSCK_MSG_TAG_OBJECT_NOT_TAG, "expected tag got %s", - typename(type)); + type_name(type)); goto done; } } diff --git a/fsmonitor.c b/fsmonitor.c index 0af7c4edba..6d7bcd5d0e 100644 --- a/fsmonitor.c +++ b/fsmonitor.c @@ -130,7 +130,7 @@ static void fsmonitor_refresh_callback(struct index_state *istate, const char *n * as it could be a new untracked file. */ trace_printf_key(&trace_fsmonitor, "fsmonitor_refresh_callback '%s'", name); - untracked_cache_invalidate_path(istate, name); + untracked_cache_invalidate_path(istate, name, 0); } void refresh_fsmonitor(struct index_state *istate) diff --git a/fsmonitor.h b/fsmonitor.h index cd3cc0ccf2..65f3743636 100644 --- a/fsmonitor.h +++ b/fsmonitor.h @@ -65,7 +65,7 @@ static inline void mark_fsmonitor_invalid(struct index_state *istate, struct cac { if (core_fsmonitor) { ce->ce_flags &= ~CE_FSMONITOR_VALID; - untracked_cache_invalidate_path(istate, ce->name); + untracked_cache_invalidate_path(istate, ce->name, 1); trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_invalid '%s'", ce->name); } } diff --git a/git-add--interactive.perl b/git-add--interactive.perl index 964c3a7542..d190469cd8 100755 --- a/git-add--interactive.perl +++ b/git-add--interactive.perl @@ -677,7 +677,7 @@ sub add_untracked_cmd { sub run_git_apply { my $cmd = shift; my $fh; - open $fh, '| git ' . $cmd . " --recount --allow-overlap"; + open $fh, '| git ' . $cmd . " --allow-overlap"; print $fh @_; return close $fh; } @@ -705,6 +705,14 @@ sub parse_diff { } my (@hunk) = { TEXT => [], DISPLAY => [], TYPE => 'header' }; + if (@colored && @colored != @diff) { + print STDERR + "fatal: mismatched output from interactive.diffFilter\n", + "hint: Your filter must maintain a one-to-one correspondence\n", + "hint: between its input and output lines.\n"; + exit 1; + } + for (my $i = 0; $i < @diff; $i++) { if ($diff[$i] =~ /^@@ /) { push @hunk, { TEXT => [], DISPLAY => [], @@ -751,6 +759,15 @@ sub parse_hunk_header { return ($o_ofs, $o_cnt, $n_ofs, $n_cnt); } +sub format_hunk_header { + my ($o_ofs, $o_cnt, $n_ofs, $n_cnt) = @_; + return ("@@ -$o_ofs" . + (($o_cnt != 1) ? ",$o_cnt" : '') . + " +$n_ofs" . + (($n_cnt != 1) ? ",$n_cnt" : '') . + " @@\n"); +} + sub split_hunk { my ($text, $display) = @_; my @split = (); @@ -784,6 +801,11 @@ sub split_hunk { while (++$i < @$text) { my $line = $text->[$i]; my $display = $display->[$i]; + if ($line =~ /^\\/) { + push @{$this->{TEXT}}, $line; + push @{$this->{DISPLAY}}, $display; + next; + } if ($line =~ /^ /) { if ($this->{ADDDEL} && !defined $next_hunk_start) { @@ -838,11 +860,7 @@ sub split_hunk { my $o_cnt = $hunk->{OCNT}; my $n_cnt = $hunk->{NCNT}; - my $head = ("@@ -$o_ofs" . - (($o_cnt != 1) ? ",$o_cnt" : '') . - " +$n_ofs" . - (($n_cnt != 1) ? ",$n_cnt" : '') . - " @@\n"); + my $head = format_hunk_header($o_ofs, $o_cnt, $n_ofs, $n_cnt); my $display_head = $head; unshift @{$hunk->{TEXT}}, $head; if ($diff_use_color) { @@ -886,6 +904,9 @@ sub merge_hunk { $n_cnt++; push @line, $line; next; + } elsif ($line =~ /^\\/) { + push @line, $line; + next; } last if ($o1_ofs <= $ofs); @@ -904,6 +925,9 @@ sub merge_hunk { $n_cnt++; push @line, $line; next; + } elsif ($line =~ /^\\/) { + push @line, $line; + next; } $ofs++; $o_cnt++; @@ -912,11 +936,7 @@ sub merge_hunk { } push @line, $line; } - my $head = ("@@ -$o0_ofs" . - (($o_cnt != 1) ? ",$o_cnt" : '') . - " +$n0_ofs" . - (($n_cnt != 1) ? ",$n_cnt" : '') . - " @@\n"); + my $head = format_hunk_header($o0_ofs, $o_cnt, $n0_ofs, $n_cnt); @{$prev->{TEXT}} = ($head, @line); } @@ -925,14 +945,35 @@ sub coalesce_overlapping_hunks { my @out = (); my ($last_o_ctx, $last_was_dirty); + my $ofs_delta = 0; - for (grep { $_->{USE} } @in) { + for (@in) { if ($_->{TYPE} ne 'hunk') { push @out, $_; next; } my $text = $_->{TEXT}; - my ($o_ofs) = parse_hunk_header($text->[0]); + my ($o_ofs, $o_cnt, $n_ofs, $n_cnt) = + parse_hunk_header($text->[0]); + unless ($_->{USE}) { + $ofs_delta += $o_cnt - $n_cnt; + # If this hunk has been edited then subtract + # the delta that is due to the edit. + if ($_->{OFS_DELTA}) { + $ofs_delta -= $_->{OFS_DELTA}; + } + next; + } + if ($ofs_delta) { + $n_ofs += $ofs_delta; + $_->{TEXT}->[0] = format_hunk_header($o_ofs, $o_cnt, + $n_ofs, $n_cnt); + } + # If this hunk was edited then adjust the offset delta + # to reflect the edit. + if ($_->{OFS_DELTA}) { + $ofs_delta += $_->{OFS_DELTA}; + } if (defined $last_o_ctx && $o_ofs <= $last_o_ctx && !$_->{DIRTY} && @@ -1004,6 +1045,30 @@ marked for discarding."), marked for applying."), ); +sub recount_edited_hunk { + local $_; + my ($oldtext, $newtext) = @_; + my ($o_cnt, $n_cnt) = (0, 0); + for (@{$newtext}[1..$#{$newtext}]) { + my $mode = substr($_, 0, 1); + if ($mode eq '-') { + $o_cnt++; + } elsif ($mode eq '+') { + $n_cnt++; + } elsif ($mode eq ' ') { + $o_cnt++; + $n_cnt++; + } + } + my ($o_ofs, undef, $n_ofs, undef) = + parse_hunk_header($newtext->[0]); + $newtext->[0] = format_hunk_header($o_ofs, $o_cnt, $n_ofs, $n_cnt); + my (undef, $orig_o_cnt, undef, $orig_n_cnt) = + parse_hunk_header($oldtext->[0]); + # Return the change in the number of lines inserted by this hunk + return $orig_o_cnt - $orig_n_cnt - $o_cnt + $n_cnt; +} + sub edit_hunk_manually { my ($oldtext) = @_; @@ -1102,25 +1167,32 @@ sub prompt_yesno { } sub edit_hunk_loop { - my ($head, $hunk, $ix) = @_; - my $text = $hunk->[$ix]->{TEXT}; + my ($head, $hunks, $ix) = @_; + my $hunk = $hunks->[$ix]; + my $text = $hunk->{TEXT}; while (1) { - $text = edit_hunk_manually($text); - if (!defined $text) { + my $newtext = edit_hunk_manually($text); + if (!defined $newtext) { return undef; } my $newhunk = { - TEXT => $text, - TYPE => $hunk->[$ix]->{TYPE}, + TEXT => $newtext, + TYPE => $hunk->{TYPE}, USE => 1, DIRTY => 1, }; + $newhunk->{OFS_DELTA} = recount_edited_hunk($text, $newtext); + # If this hunk has already been edited then add the + # offset delta of the previous edit to get the real + # delta from the original unedited hunk. + $hunk->{OFS_DELTA} and + $newhunk->{OFS_DELTA} += $hunk->{OFS_DELTA}; if (diff_applies($head, - @{$hunk}[0..$ix-1], + @{$hunks}[0..$ix-1], $newhunk, - @{$hunk}[$ix+1..$#{$hunk}])) { - $newhunk->{DISPLAY} = [color_diff(@{$text})]; + @{$hunks}[$ix+1..$#{$hunks}])) { + $newhunk->{DISPLAY} = [color_diff(@{$newtext})]; return $newhunk; } else { @@ -1184,7 +1256,13 @@ d - do not apply this hunk or any of the later hunks in the file"), ); sub help_patch_cmd { - print colored $help_color, __($help_patch_modes{$patch_mode}), "\n", __ <<EOF ; + local $_; + my $other = $_[0] . ",?"; + print colored $help_color, __($help_patch_modes{$patch_mode}), "\n", + map { "$_\n" } grep { + my $c = quotemeta(substr($_, 0, 1)); + $other =~ /,$c/ + } split "\n", __ <<EOF ; g - select a hunk to go to / - search for a hunk matching the given regex j - leave this hunk undecided, see next undecided hunk @@ -1302,39 +1380,39 @@ sub display_hunks { my %patch_update_prompt_modes = ( stage => { - mode => N__("Stage mode change [y,n,q,a,d,/%s,?]? "), - deletion => N__("Stage deletion [y,n,q,a,d,/%s,?]? "), - hunk => N__("Stage this hunk [y,n,q,a,d,/%s,?]? "), + mode => N__("Stage mode change [y,n,q,a,d%s,?]? "), + deletion => N__("Stage deletion [y,n,q,a,d%s,?]? "), + hunk => N__("Stage this hunk [y,n,q,a,d%s,?]? "), }, stash => { - mode => N__("Stash mode change [y,n,q,a,d,/%s,?]? "), - deletion => N__("Stash deletion [y,n,q,a,d,/%s,?]? "), - hunk => N__("Stash this hunk [y,n,q,a,d,/%s,?]? "), + mode => N__("Stash mode change [y,n,q,a,d%s,?]? "), + deletion => N__("Stash deletion [y,n,q,a,d%s,?]? "), + hunk => N__("Stash this hunk [y,n,q,a,d%s,?]? "), }, reset_head => { - mode => N__("Unstage mode change [y,n,q,a,d,/%s,?]? "), - deletion => N__("Unstage deletion [y,n,q,a,d,/%s,?]? "), - hunk => N__("Unstage this hunk [y,n,q,a,d,/%s,?]? "), + mode => N__("Unstage mode change [y,n,q,a,d%s,?]? "), + deletion => N__("Unstage deletion [y,n,q,a,d%s,?]? "), + hunk => N__("Unstage this hunk [y,n,q,a,d%s,?]? "), }, reset_nothead => { - mode => N__("Apply mode change to index [y,n,q,a,d,/%s,?]? "), - deletion => N__("Apply deletion to index [y,n,q,a,d,/%s,?]? "), - hunk => N__("Apply this hunk to index [y,n,q,a,d,/%s,?]? "), + mode => N__("Apply mode change to index [y,n,q,a,d%s,?]? "), + deletion => N__("Apply deletion to index [y,n,q,a,d%s,?]? "), + hunk => N__("Apply this hunk to index [y,n,q,a,d%s,?]? "), }, checkout_index => { - mode => N__("Discard mode change from worktree [y,n,q,a,d,/%s,?]? "), - deletion => N__("Discard deletion from worktree [y,n,q,a,d,/%s,?]? "), - hunk => N__("Discard this hunk from worktree [y,n,q,a,d,/%s,?]? "), + mode => N__("Discard mode change from worktree [y,n,q,a,d%s,?]? "), + deletion => N__("Discard deletion from worktree [y,n,q,a,d%s,?]? "), + hunk => N__("Discard this hunk from worktree [y,n,q,a,d%s,?]? "), }, checkout_head => { - mode => N__("Discard mode change from index and worktree [y,n,q,a,d,/%s,?]? "), - deletion => N__("Discard deletion from index and worktree [y,n,q,a,d,/%s,?]? "), - hunk => N__("Discard this hunk from index and worktree [y,n,q,a,d,/%s,?]? "), + mode => N__("Discard mode change from index and worktree [y,n,q,a,d%s,?]? "), + deletion => N__("Discard deletion from index and worktree [y,n,q,a,d%s,?]? "), + hunk => N__("Discard this hunk from index and worktree [y,n,q,a,d%s,?]? "), }, checkout_nothead => { - mode => N__("Apply mode change to index and worktree [y,n,q,a,d,/%s,?]? "), - deletion => N__("Apply deletion to index and worktree [y,n,q,a,d,/%s,?]? "), - hunk => N__("Apply this hunk to index and worktree [y,n,q,a,d,/%s,?]? "), + mode => N__("Apply mode change to index and worktree [y,n,q,a,d%s,?]? "), + deletion => N__("Apply deletion to index and worktree [y,n,q,a,d%s,?]? "), + hunk => N__("Apply this hunk to index and worktree [y,n,q,a,d%s,?]? "), }, ); @@ -1390,7 +1468,7 @@ sub patch_update_file { $other .= ',J'; } if ($num > 1) { - $other .= ',g'; + $other .= ',g,/'; } for ($i = 0; $i < $num; $i++) { if (!defined $hunk[$i]{USE}) { @@ -1431,8 +1509,12 @@ sub patch_update_file { } next; } - elsif ($other =~ /g/ && $line =~ /^g(.*)/) { + elsif ($line =~ /^g(.*)/) { my $response = $1; + unless ($other =~ /g/) { + error_msg __("No other hunks to goto\n"); + next; + } my $no = $ix > 10 ? $ix - 10 : 0; while ($response eq '') { $no = display_hunks(\@hunk, $no); @@ -1478,6 +1560,10 @@ sub patch_update_file { } elsif ($line =~ m|^/(.*)|) { my $regex = $1; + unless ($other =~ m|/|) { + error_msg __("No other hunks to search\n"); + next; + } if ($1 eq "") { print colored $prompt_color, __("search for regex? "); $regex = <STDIN>; @@ -1546,7 +1632,11 @@ sub patch_update_file { next; } } - elsif ($other =~ /s/ && $line =~ /^s/) { + elsif ($line =~ /^s/) { + unless ($other =~ /s/) { + error_msg __("Sorry, cannot split this hunk\n"); + next; + } my @split = split_hunk($hunk[$ix]{TEXT}, $hunk[$ix]{DISPLAY}); if (1 < @split) { print colored $header_color, sprintf( @@ -1558,7 +1648,11 @@ sub patch_update_file { $num = scalar @hunk; next; } - elsif ($other =~ /e/ && $line =~ /^e/) { + elsif ($line =~ /^e/) { + unless ($other =~ /e/) { + error_msg __("Sorry, cannot edit this hunk\n"); + next; + } my $newhunk = edit_hunk_loop($head, \@hunk, $ix); if (defined $newhunk) { splice @hunk, $ix, 1, $newhunk; diff --git a/git-compat-util.h b/git-compat-util.h index 68b2ad531e..07e383257b 100644 --- a/git-compat-util.h +++ b/git-compat-util.h @@ -826,8 +826,8 @@ extern ssize_t xpread(int fd, void *buf, size_t len, off_t offset); extern int xdup(int fd); extern FILE *xfopen(const char *path, const char *mode); extern FILE *xfdopen(int fd, const char *mode); -extern int xmkstemp(char *template); -extern int xmkstemp_mode(char *template, int mode); +extern int xmkstemp(char *temp_filename); +extern int xmkstemp_mode(char *temp_filename, int mode); extern char *xgetcwd(void); extern FILE *fopen_for_writing(const char *path); extern FILE *fopen_or_warn(const char *path, const char *mode); diff --git a/git-cvsimport.perl b/git-cvsimport.perl index 2d8df83172..b31613cb8a 100755 --- a/git-cvsimport.perl +++ b/git-cvsimport.perl @@ -601,7 +601,9 @@ sub pdate($) { my ($d) = @_; m#(\d{2,4})/(\d\d)/(\d\d)\s(\d\d):(\d\d)(?::(\d\d))?# or die "Unparseable date: $d\n"; - my $y=$1; $y-=1900 if $y>1900; + my $y=$1; + $y+=100 if $y<70; + $y+=1900 if $y<1000; return timegm($6||0,$5,$4,$3,$2-1,$y); } diff --git a/git-filter-branch.sh b/git-filter-branch.sh index 1b7e4b2cdb..98c76ec589 100755 --- a/git-filter-branch.sh +++ b/git-filter-branch.sh @@ -627,12 +627,12 @@ then print H "$_:$f\n" or die; } close(H) or die;' || die "Unable to save state") - state_tree=$(/bin/echo -e "100644 blob $state_blob\tfilter.map" | git mktree) + state_tree=$(printf '100644 blob %s\tfilter.map\n' "$state_blob" | git mktree) if test -n "$state_commit" then - state_commit=$(/bin/echo "Sync" | git commit-tree "$state_tree" -p "$state_commit") + state_commit=$(echo "Sync" | git commit-tree "$state_tree" -p "$state_commit") else - state_commit=$(/bin/echo "Sync" | git commit-tree "$state_tree" ) + state_commit=$(echo "Sync" | git commit-tree "$state_tree" ) fi git update-ref "$state_branch" "$state_commit" fi diff --git a/git-rebase--am.sh b/git-rebase--am.sh index 14c50782e0..be3f068922 100644 --- a/git-rebase--am.sh +++ b/git-rebase--am.sh @@ -27,6 +27,9 @@ skip) move_to_original_branch return ;; +show-current-patch) + exec git am --show-current-patch + ;; esac if test -z "$rebase_root" @@ -46,6 +49,7 @@ then # makes this easy git cherry-pick ${gpg_sign_opt:+"$gpg_sign_opt"} --allow-empty \ $allow_rerere_autoupdate --right-only "$revisions" \ + $allow_empty_message \ ${restrict_revision+^$restrict_revision} ret=$? else diff --git a/git-rebase--interactive.sh b/git-rebase--interactive.sh index d47bd29593..331c8dfeac 100644 --- a/git-rebase--interactive.sh +++ b/git-rebase--interactive.sh @@ -199,12 +199,14 @@ make_patch () { die_with_patch () { echo "$1" > "$state_dir"/stopped-sha + git update-ref REBASE_HEAD "$1" make_patch "$1" die "$2" } exit_with_patch () { echo "$1" > "$state_dir"/stopped-sha + git update-ref REBASE_HEAD "$1" make_patch $1 git rev-parse --verify HEAD > "$amend" gpg_sign_opt_quoted=${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} @@ -281,7 +283,7 @@ pick_one () { test -d "$rewritten" && pick_one_preserving_merges "$@" && return - output eval git cherry-pick $allow_rerere_autoupdate \ + output eval git cherry-pick $allow_rerere_autoupdate $allow_empty_message \ ${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \ "$strategy_args" $empty_args $ff "$@" @@ -396,7 +398,7 @@ pick_one_preserving_merges () { --sq-quote "$gpg_sign_opt")} \ $allow_rerere_autoupdate "$merge_args" \ "$strategy_args" \ - -m $(git rev-parse --sq-quote "$msg_content") \ + -m "$(git rev-parse --sq-quote "$msg_content")" \ "$new_parents" then printf "%s\n" "$msg_content" > "$GIT_DIR"/MERGE_MSG @@ -406,6 +408,7 @@ pick_one_preserving_merges () { ;; *) output eval git cherry-pick $allow_rerere_autoupdate \ + $allow_empty_message \ ${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \ "$strategy_args" "$@" || die_with_patch $sha1 "$(eval_gettext "Could not pick \$sha1")" @@ -559,7 +562,8 @@ do_next () { mark_action_done do_pick $sha1 "$rest" - git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} || { + git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} \ + $allow_empty_message || { warn "$(eval_gettext "\ Could not amend commit after successfully picking \$sha1... \$rest This is most likely due to an empty commit message, or the pre-commit hook @@ -607,7 +611,7 @@ you are able to reword the commit.")" # This is an intermediate commit; its message will only be # used in case of trouble. So use the long version: do_with_author output git commit --amend --no-verify -F "$squash_msg" \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die_failed_squash $sha1 "$rest" ;; *) @@ -615,13 +619,13 @@ you are able to reword the commit.")" if test -f "$fixup_msg" then do_with_author git commit --amend --no-verify -F "$fixup_msg" \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die_failed_squash $sha1 "$rest" else cp "$squash_msg" "$GIT_DIR"/SQUASH_MSG || exit rm -f "$GIT_DIR"/MERGE_MSG do_with_author git commit --amend --no-verify -F "$GIT_DIR"/SQUASH_MSG -e \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die_failed_squash $sha1 "$rest" fi rm -f "$squash_msg" "$fixup_msg" @@ -754,7 +758,8 @@ case "$action" in continue) if test ! -d "$rewritten" then - exec git rebase--helper ${force_rebase:+--no-ff} --continue + exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \ + --continue fi # do we have anything to commit? if git diff-index --cached --quiet HEAD -- @@ -794,11 +799,11 @@ In both cases, once you're done, continue with: You have uncommitted changes in your working tree. Please commit them first and then run 'git rebase --continue' again.")" do_with_author git commit --amend --no-verify -F "$msg" -e \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die "$(gettext "Could not commit staged changes.")" else do_with_author git commit --no-verify -F "$msg" -e \ - ${gpg_sign_opt:+"$gpg_sign_opt"} || + ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message || die "$(gettext "Could not commit staged changes.")" fi fi @@ -817,7 +822,8 @@ skip) if test ! -d "$rewritten" then - exec git rebase--helper ${force_rebase:+--no-ff} --continue + exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \ + --continue fi do_rest return 0 @@ -840,6 +846,9 @@ To continue rebase after editing, run: exit ;; +show-current-patch) + exec git show REBASE_HEAD -- + ;; esac comment_for_reflog start @@ -855,6 +864,7 @@ fi orig_head=$(git rev-parse --verify HEAD) || die "$(gettext "No HEAD?")" mkdir -p "$state_dir" || die "$(eval_gettext "Could not create temporary \$state_dir")" +rm -f "$(git rev-parse --git-path REBASE_HEAD)" : > "$state_dir"/interactive || die "$(gettext "Could not mark as interactive")" write_basic_state @@ -1016,7 +1026,8 @@ checkout_onto if test -z "$rebase_root" && test ! -d "$rewritten" then require_clean_work_tree "rebase" - exec git rebase--helper ${force_rebase:+--no-ff} --continue + exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \ + --continue fi do_rest diff --git a/git-rebase--merge.sh b/git-rebase--merge.sh index 06a4723d4d..ceb715453c 100644 --- a/git-rebase--merge.sh +++ b/git-rebase--merge.sh @@ -27,7 +27,8 @@ continue_merge () { cmt=$(cat "$state_dir/current") if ! git diff-index --quiet --ignore-submodules HEAD -- then - if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} --no-verify -C "$cmt" + if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message \ + --no-verify -C "$cmt" then echo "Commit failed, please do not call \"git commit\"" echo "directly, but instead do one of the following: " @@ -57,6 +58,7 @@ call_merge () { echo "$msgnum" >"$state_dir/msgnum" cmt="$(cat "$state_dir/cmt.$msgnum")" echo "$cmt" > "$state_dir/current" + git update-ref REBASE_HEAD "$cmt" hd=$(git rev-parse --verify HEAD) cmt_name=$(git symbolic-ref HEAD 2> /dev/null || echo HEAD) eval GITHEAD_$cmt='"${cmt_name##refs/heads/}~$(($end - $msgnum))"' @@ -137,11 +139,15 @@ skip) finish_rb_merge return ;; +show-current-patch) + exec git show REBASE_HEAD -- + ;; esac mkdir -p "$state_dir" echo "$onto_name" > "$state_dir/onto_name" write_basic_state +rm -f "$(git rev-parse --git-path REBASE_HEAD)" msgnum=0 for cmt in $(git rev-list --reverse --no-merges "$revisions") diff --git a/git-rebase.sh b/git-rebase.sh index fd72a35c65..a1f6e5de6a 100755 --- a/git-rebase.sh +++ b/git-rebase.sh @@ -24,6 +24,7 @@ m,merge! use merging strategies to rebase i,interactive! let the user edit the list of commits to rebase x,exec=! add exec lines after each commit of the editable list k,keep-empty preserve empty commits during rebase +allow-empty-message allow rebasing commits with empty messages f,force-rebase! force rebase even if branch is up to date X,strategy-option=! pass the argument through to the merge strategy stat! display a diffstat of what changed upstream @@ -45,6 +46,7 @@ abort! abort and check out the original branch skip! skip current patch and continue edit-todo! edit the todo list during an interactive rebase quit! abort but keep HEAD where it is +show-current-patch! show the patch file being applied or merged " . git-sh-setup set_reflog_action rebase @@ -89,6 +91,7 @@ action= preserve_merges= autosquash= keep_empty= +allow_empty_message= test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t case "$(git config --bool commit.gpgsign)" in true) gpg_sign_opt=-S ;; @@ -181,6 +184,7 @@ You can run "git stash pop" or "git stash drop" at any time. } finish_rebase () { + rm -f "$(git rev-parse --git-path REBASE_HEAD)" apply_autostash && { git gc --auto || true; } && rm -rf "$state_dir" @@ -245,7 +249,7 @@ do --verify) ok_to_skip_pre_rebase= ;; - --continue|--skip|--abort|--quit|--edit-todo) + --continue|--skip|--abort|--quit|--edit-todo|--show-current-patch) test $total_argc -eq 2 || usage action=${1##--} ;; @@ -262,6 +266,9 @@ do --keep-empty) keep_empty=yes ;; + --allow-empty-message) + allow_empty_message=--allow-empty-message + ;; --preserve-merges) preserve_merges=t test -z "$interactive_rebase" && interactive_rebase=implied @@ -412,6 +419,10 @@ quit) edit-todo) run_specific_rebase ;; +show-current-patch) + run_specific_rebase + die "BUG: run_specific_rebase is not supposed to return here" + ;; esac # Make sure no rebase is in progress diff --git a/git-send-email.perl b/git-send-email.perl index edcc6d3469..2fa7818ca9 100755 --- a/git-send-email.perl +++ b/git-send-email.perl @@ -26,10 +26,13 @@ use Text::ParseWords; use Term::ANSIColor; use File::Temp qw/ tempdir tempfile /; use File::Spec::Functions qw(catdir catfile); -use Error qw(:try); +use Git::LoadCPAN::Error qw(:try); use Cwd qw(abs_path cwd); use Git; use Git::I18N; +use Net::Domain (); +use Net::SMTP (); +use Git::LoadCPAN::Mail::Address; Getopt::Long::Configure qw/ pass_through /; @@ -56,6 +59,7 @@ git send-email --dump-aliases --[no-]cc <str> * Email Cc: --[no-]bcc <str> * Email Bcc: --subject <str> * Email "Subject:" + --reply-to <str> * Email "Reply-To:" --in-reply-to <str> * Email "In-Reply-To:" --[no-]xmailer * Add "X-Mailer:" header (default). --[no-]annotate * Review each patch that will be sent in an editor. @@ -166,13 +170,13 @@ my $re_encoded_word = qr/=\?($re_token)\?($re_token)\?($re_encoded_text)\?=/; # Variables we fill in automatically, or via prompting: my (@to,$no_to,@initial_to,@cc,$no_cc,@initial_cc,@bcclist,$no_bcc,@xh, - $initial_reply_to,$initial_subject,@files, + $initial_in_reply_to,$reply_to,$initial_subject,@files, $author,$sender,$smtp_authpass,$annotate,$use_xmailer,$compose,$time); my $envelope_sender; # Example reply to: -#$initial_reply_to = ''; #<20050203173208.GA23964@foobar.com>'; +#$initial_in_reply_to = ''; #<20050203173208.GA23964@foobar.com>'; my $repo = eval { Git->repository() }; my @repo = $repo ? ($repo) : (); @@ -314,7 +318,8 @@ die __("--dump-aliases incompatible with other options\n") if !$help and $dump_aliases and @ARGV; $rc = GetOptions( "sender|from=s" => \$sender, - "in-reply-to=s" => \$initial_reply_to, + "in-reply-to=s" => \$initial_in_reply_to, + "reply-to=s" => \$reply_to, "subject=s" => \$initial_subject, "to=s" => \@initial_to, "to-cmd=s" => \$to_cmd, @@ -378,6 +383,10 @@ unless ($rc) { die __("Cannot run git format-patch from outside a repository\n") if $format_patch and not $repo; +die __("`batch-size` and `relogin` must be specified together " . + "(via command-line or configuration option)\n") + if defined $relogin_delay and not defined $batch_size; + # Now, let's fill any that aren't set in with defaults: sub read_config { @@ -489,7 +498,7 @@ my ($repoauthor, $repocommitter); ($repocommitter) = Git::ident_person(@repo, 'committer'); sub parse_address_line { - return Git::parse_mailboxes($_[0]); + return map { $_->format } Mail::Address->parse($_[0]); } sub split_addrs { @@ -676,7 +685,8 @@ if ($compose) { my $tpl_sender = $sender || $repoauthor || $repocommitter || ''; my $tpl_subject = $initial_subject || ''; - my $tpl_reply_to = $initial_reply_to || ''; + my $tpl_in_reply_to = $initial_in_reply_to || ''; + my $tpl_reply_to = $reply_to || ''; print $c <<EOT1, Git::prefix_lines("GIT: ", __ <<EOT2), <<EOT3; From $tpl_sender # This line is ignored. @@ -688,8 +698,9 @@ for the patch you are writing. Clear the body content if you don't wish to send a summary. EOT2 From: $tpl_sender +Reply-To: $tpl_reply_to Subject: $tpl_subject -In-Reply-To: $tpl_reply_to +In-Reply-To: $tpl_in_reply_to EOT3 for my $f (@files) { @@ -703,57 +714,73 @@ EOT3 do_edit($compose_filename); } - open my $c2, ">", $compose_filename . ".final" - or die sprintf(__("Failed to open %s.final: %s"), $compose_filename, $!); - open $c, "<", $compose_filename or die sprintf(__("Failed to open %s: %s"), $compose_filename, $!); - my $need_8bit_cte = file_has_nonascii($compose_filename); - my $in_body = 0; - my $summary_empty = 1; if (!defined $compose_encoding) { $compose_encoding = "UTF-8"; } - while(<$c>) { - next if m/^GIT:/; - if ($in_body) { - $summary_empty = 0 unless (/^\n$/); - } elsif (/^\n$/) { - $in_body = 1; - if ($need_8bit_cte) { - print $c2 "MIME-Version: 1.0\n", - "Content-Type: text/plain; ", - "charset=$compose_encoding\n", - "Content-Transfer-Encoding: 8bit\n"; - } - } elsif (/^MIME-Version:/i) { - $need_8bit_cte = 0; - } elsif (/^Subject:\s*(.+)\s*$/i) { - $initial_subject = $1; - my $subject = $initial_subject; - $_ = "Subject: " . - quote_subject($subject, $compose_encoding) . - "\n"; - } elsif (/^In-Reply-To:\s*(.+)\s*$/i) { - $initial_reply_to = $1; - next; - } elsif (/^From:\s*(.+)\s*$/i) { - $sender = $1; - next; - } elsif (/^(?:To|Cc|Bcc):/i) { - print __("To/Cc/Bcc fields are not interpreted yet, they have been ignored\n"); - next; + + my %parsed_email; + while (my $line = <$c>) { + next if $line =~ m/^GIT:/; + parse_header_line($line, \%parsed_email); + if ($line =~ /^$/) { + $parsed_email{'body'} = filter_body($c); } - print $c2 $_; } close $c; - close $c2; - if ($summary_empty) { + open my $c2, ">", $compose_filename . ".final" + or die sprintf(__("Failed to open %s.final: %s"), $compose_filename, $!); + + + if ($parsed_email{'From'}) { + $sender = delete($parsed_email{'From'}); + } + if ($parsed_email{'In-Reply-To'}) { + $initial_in_reply_to = delete($parsed_email{'In-Reply-To'}); + } + if ($parsed_email{'Reply-To'}) { + $reply_to = delete($parsed_email{'Reply-To'}); + } + if ($parsed_email{'Subject'}) { + $initial_subject = delete($parsed_email{'Subject'}); + print $c2 "Subject: " . + quote_subject($initial_subject, $compose_encoding) . + "\n"; + } + + if ($parsed_email{'MIME-Version'}) { + print $c2 "MIME-Version: $parsed_email{'MIME-Version'}\n", + "Content-Type: $parsed_email{'Content-Type'};\n", + "Content-Transfer-Encoding: $parsed_email{'Content-Transfer-Encoding'}\n"; + delete($parsed_email{'MIME-Version'}); + delete($parsed_email{'Content-Type'}); + delete($parsed_email{'Content-Transfer-Encoding'}); + } elsif (file_has_nonascii($compose_filename)) { + my $content_type = (delete($parsed_email{'Content-Type'}) or + "text/plain; charset=$compose_encoding"); + print $c2 "MIME-Version: 1.0\n", + "Content-Type: $content_type\n", + "Content-Transfer-Encoding: 8bit\n"; + } + # Preserve unknown headers + foreach my $key (keys %parsed_email) { + next if $key eq 'body'; + print $c2 "$key: $parsed_email{$key}"; + } + + if ($parsed_email{'body'}) { + print $c2 "\n$parsed_email{'body'}\n"; + delete($parsed_email{'body'}); + } else { print __("Summary email is empty, skipping it\n"); $compose = -1; } + + close $c2; + } elsif ($annotate) { do_edit(@files); } @@ -792,6 +819,32 @@ sub ask { return; } +sub parse_header_line { + my $lines = shift; + my $parsed_line = shift; + my $addr_pat = join "|", qw(To Cc Bcc); + + foreach (split(/\n/, $lines)) { + if (/^($addr_pat):\s*(.+)$/i) { + $parsed_line->{$1} = [ parse_address_line($2) ]; + } elsif (/^([^:]*):\s*(.+)\s*$/i) { + $parsed_line->{$1} = $2; + } + } +} + +sub filter_body { + my $c = shift; + my $body = ""; + while (my $body_line = <$c>) { + if ($body_line !~ m/^GIT:/) { + $body .= $body_line; + } + } + return $body; +} + + my %broken_encoding; sub file_declares_8bit_cte { @@ -872,16 +925,22 @@ sub expand_one_alias { @initial_cc = process_address_list(@initial_cc); @bcclist = process_address_list(@bcclist); -if ($thread && !defined $initial_reply_to && $prompting) { - $initial_reply_to = ask( +if ($thread && !defined $initial_in_reply_to && $prompting) { + $initial_in_reply_to = ask( __("Message-ID to be used as In-Reply-To for the first email (if any)? "), default => "", valid_re => qr/\@.*\./, confirm_only => 1); } -if (defined $initial_reply_to) { - $initial_reply_to =~ s/^\s*<?//; - $initial_reply_to =~ s/>?\s*$//; - $initial_reply_to = "<$initial_reply_to>" if $initial_reply_to ne ''; +if (defined $initial_in_reply_to) { + $initial_in_reply_to =~ s/^\s*<?//; + $initial_in_reply_to =~ s/>?\s*$//; + $initial_in_reply_to = "<$initial_in_reply_to>" if $initial_in_reply_to ne ''; +} + +if (defined $reply_to) { + $reply_to =~ s/^\s+|\s+$//g; + ($reply_to) = expand_aliases($reply_to); + $reply_to = sanitize_address($reply_to); } if (!defined $smtp_server) { @@ -901,7 +960,7 @@ if ($compose && $compose > 0) { } # Variables we set as part of the loop over files -our ($message_id, %mail, $subject, $reply_to, $references, $message, +our ($message_id, %mail, $subject, $in_reply_to, $references, $message, $needs_confirm, $message_num, $ask_default); sub extract_valid_address { @@ -1142,10 +1201,8 @@ sub valid_fqdn { sub maildomain_net { my $maildomain; - if (eval { require Net::Domain; 1 }) { - my $domain = Net::Domain::domainname(); - $maildomain = $domain if valid_fqdn($domain); - } + my $domain = Net::Domain::domainname(); + $maildomain = $domain if valid_fqdn($domain); return $maildomain; } @@ -1153,17 +1210,15 @@ sub maildomain_net { sub maildomain_mta { my $maildomain; - if (eval { require Net::SMTP; 1 }) { - for my $host (qw(mailhost localhost)) { - my $smtp = Net::SMTP->new($host); - if (defined $smtp) { - my $domain = $smtp->domain; - $smtp->quit; + for my $host (qw(mailhost localhost)) { + my $smtp = Net::SMTP->new($host); + if (defined $smtp) { + my $domain = $smtp->domain; + $smtp->quit; - $maildomain = $domain if valid_fqdn($domain); + $maildomain = $domain if valid_fqdn($domain); - last if $maildomain; - } + last if $maildomain; } } @@ -1310,11 +1365,14 @@ Message-Id: $message_id if ($use_xmailer) { $header .= "X-Mailer: git-send-email $gitversion\n"; } - if ($reply_to) { + if ($in_reply_to) { - $header .= "In-Reply-To: $reply_to\n"; + $header .= "In-Reply-To: $in_reply_to\n"; $header .= "References: $references\n"; } + if ($reply_to) { + $header .= "Reply-To: $reply_to\n"; + } if (@xh) { $header .= join("\n", @xh) . "\n"; } @@ -1489,8 +1547,8 @@ EOF return 1; } -$reply_to = $initial_reply_to; -$references = $initial_reply_to || ''; +$in_reply_to = $initial_in_reply_to; +$references = $initial_in_reply_to || ''; $subject = $initial_subject; $message_num = 0; @@ -1700,9 +1758,9 @@ foreach my $t (@files) { # set up for the next message if ($thread && $message_was_sent && - ($chain_reply_to || !defined $reply_to || length($reply_to) == 0 || + ($chain_reply_to || !defined $in_reply_to || length($in_reply_to) == 0 || $message_num == 1)) { - $reply_to = $message_id; + $in_reply_to = $message_id; if (length $references > 0) { $references .= "\n $message_id"; } else { diff --git a/git-submodule.sh b/git-submodule.sh index 156255a9e5..24914963ca 100755 --- a/git-submodule.sh +++ b/git-submodule.sh @@ -428,60 +428,7 @@ cmd_deinit() shift done - if test -n "$deinit_all" && test "$#" -ne 0 - then - echo >&2 "$(eval_gettext "pathspec and --all are incompatible")" - usage - fi - if test $# = 0 && test -z "$deinit_all" - then - die "$(eval_gettext "Use '--all' if you really want to deinitialize all submodules")" - fi - - { - git submodule--helper list --prefix "$wt_prefix" "$@" || - echo "#unmatched" $? - } | - while read -r mode sha1 stage sm_path - do - die_if_unmatched "$mode" "$sha1" - name=$(git submodule--helper name "$sm_path") || exit - - displaypath=$(git submodule--helper relative-path "$sm_path" "$wt_prefix") - - # Remove the submodule work tree (unless the user already did it) - if test -d "$sm_path" - then - # Protect submodules containing a .git directory - if test -d "$sm_path/.git" - then - die "$(eval_gettext "\ -Submodule work tree '\$displaypath' contains a .git directory -(use 'rm -rf' if you really want to remove it including all of its history)")" - fi - - if test -z "$force" - then - git rm -qn "$sm_path" || - die "$(eval_gettext "Submodule work tree '\$displaypath' contains local modifications; use '-f' to discard them")" - fi - rm -rf "$sm_path" && - say "$(eval_gettext "Cleared directory '\$displaypath'")" || - say "$(eval_gettext "Could not remove submodule work tree '\$displaypath'")" - fi - - mkdir "$sm_path" || say "$(eval_gettext "Could not create empty submodule directory '\$displaypath'")" - - # Remove the .git/config entries (unless the user already did it) - if test -n "$(git config --get-regexp submodule."$name\.")" - then - # Remove the whole section so we have a clean state when - # the user later decides to init this submodule again - url=$(git config submodule."$name".url) - git config --remove-section submodule."$name" 2>/dev/null && - say "$(eval_gettext "Submodule '\$name' (\$url) unregistered for path '\$displaypath'")" - fi - done + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${prefix:+--prefix "$prefix"} ${force:+--force} ${deinit_all:+--all} "$@" } is_tip_reachable () ( @@ -1036,63 +983,8 @@ cmd_sync() ;; esac done - cd_to_toplevel - { - git submodule--helper list --prefix "$wt_prefix" "$@" || - echo "#unmatched" $? - } | - while read -r mode sha1 stage sm_path - do - die_if_unmatched "$mode" "$sha1" - - # skip inactive submodules - if ! git submodule--helper is-active "$sm_path" - then - continue - fi - - name=$(git submodule--helper name "$sm_path") - url=$(git config -f .gitmodules --get submodule."$name".url) - - # Possibly a url relative to parent - case "$url" in - ./*|../*) - # rewrite foo/bar as ../.. to find path from - # submodule work tree to superproject work tree - up_path="$(printf '%s\n' "$sm_path" | sed "s/[^/][^/]*/../g")" && - # guarantee a trailing / - up_path=${up_path%/}/ && - # path from submodule work tree to submodule origin repo - sub_origin_url=$(git submodule--helper resolve-relative-url "$url" "$up_path") && - # path from superproject work tree to submodule origin repo - super_config_url=$(git submodule--helper resolve-relative-url "$url") || exit - ;; - *) - sub_origin_url="$url" - super_config_url="$url" - ;; - esac - - displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix") - say "$(eval_gettext "Synchronizing submodule url for '\$displaypath'")" - git config submodule."$name".url "$super_config_url" - - if test -e "$sm_path"/.git - then - ( - sanitize_submodule_env - cd "$sm_path" - remote=$(get_default_remote) - git config remote."$remote".url "$sub_origin_url" - if test -n "$recursive" - then - prefix="$prefix$sm_path/" - eval cmd_sync - fi - ) - fi - done + git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} "$@" } cmd_absorbgitdirs() @@ -5,11 +5,11 @@ #include "run-command.h" const char git_usage_string[] = - "git [--version] [--help] [-C <path>] [-c name=value]\n" - " [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n" - " [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n" - " [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n" - " <command> [<args>]"; + N_("git [--version] [--help] [-C <path>] [-c <name>=<value>]\n" + " [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n" + " [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n" + " [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n" + " <command> [<args>]"); const char git_more_info_string[] = N_("'git help -a' and 'git help -g' list available subcommands and some\n" @@ -92,7 +92,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--git-dir")) { if (*argc < 2) { - fprintf(stderr, "No directory given for --git-dir.\n" ); + fprintf(stderr, _("no directory given for --git-dir\n" )); usage(git_usage_string); } setenv(GIT_DIR_ENVIRONMENT, (*argv)[1], 1); @@ -106,7 +106,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--namespace")) { if (*argc < 2) { - fprintf(stderr, "No namespace given for --namespace.\n" ); + fprintf(stderr, _("no namespace given for --namespace\n" )); usage(git_usage_string); } setenv(GIT_NAMESPACE_ENVIRONMENT, (*argv)[1], 1); @@ -120,7 +120,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--work-tree")) { if (*argc < 2) { - fprintf(stderr, "No directory given for --work-tree.\n" ); + fprintf(stderr, _("no directory given for --work-tree\n" )); usage(git_usage_string); } setenv(GIT_WORK_TREE_ENVIRONMENT, (*argv)[1], 1); @@ -134,7 +134,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--super-prefix")) { if (*argc < 2) { - fprintf(stderr, "No prefix given for --super-prefix.\n" ); + fprintf(stderr, _("no prefix given for --super-prefix\n" )); usage(git_usage_string); } setenv(GIT_SUPER_PREFIX_ENVIRONMENT, (*argv)[1], 1); @@ -156,7 +156,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "-c")) { if (*argc < 2) { - fprintf(stderr, "-c expects a configuration string\n" ); + fprintf(stderr, _("-c expects a configuration string\n" )); usage(git_usage_string); } git_config_push_parameter((*argv)[1]); @@ -194,12 +194,12 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) *envchanged = 1; } else if (!strcmp(cmd, "-C")) { if (*argc < 2) { - fprintf(stderr, "No directory given for -C.\n" ); + fprintf(stderr, _("no directory given for -C\n" )); usage(git_usage_string); } if ((*argv)[1][0]) { if (chdir((*argv)[1])) - die_errno("Cannot change to '%s'", (*argv)[1]); + die_errno("cannot change to '%s'", (*argv)[1]); if (envchanged) *envchanged = 1; } @@ -209,7 +209,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) list_builtins(); exit(0); } else { - fprintf(stderr, "Unknown option: %s\n", cmd); + fprintf(stderr, _("unknown option: %s\n"), cmd); usage(git_usage_string); } @@ -247,7 +247,7 @@ static int handle_alias(int *argcp, const char ***argv) if (ret >= 0) /* normal exit */ exit(ret); - die_errno("While expanding alias '%s': '%s'", + die_errno("while expanding alias '%s': '%s'", alias_command, alias_string + 1); } count = split_cmdline(alias_string, &new_argv); @@ -256,8 +256,8 @@ static int handle_alias(int *argcp, const char ***argv) split_cmdline_strerror(count)); option_count = handle_options(&new_argv, &count, &envchanged); if (envchanged) - die("alias '%s' changes environment variables\n" - "You can use '!git' in the alias to do this.", + die("alias '%s' changes environment variables.\n" + "You can use '!git' in the alias to do this", alias_command); memmove(new_argv - option_count, new_argv, count * sizeof(char *)); @@ -389,7 +389,7 @@ static struct cmd_struct commands[] = { { "column", cmd_column, RUN_SETUP_GENTLY }, { "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE }, { "commit-tree", cmd_commit_tree, RUN_SETUP }, - { "config", cmd_config, RUN_SETUP_GENTLY }, + { "config", cmd_config, RUN_SETUP_GENTLY | DELAY_PAGER_CONFIG }, { "count-objects", cmd_count_objects, RUN_SETUP }, { "credential", cmd_credential, RUN_SETUP_GENTLY }, { "describe", cmd_describe, RUN_SETUP }, @@ -684,8 +684,8 @@ int cmd_main(int argc, const char **argv) if (errno != ENOENT) break; if (was_alias) { - fprintf(stderr, "Expansion of alias '%s' failed; " - "'%s' is not a git command\n", + fprintf(stderr, _("expansion of alias '%s' failed; " + "'%s' is not a git command\n"), cmd, argv[0]); exit(1); } @@ -696,7 +696,7 @@ int cmd_main(int argc, const char **argv) break; } - fprintf(stderr, "Failed to run command '%s': %s\n", + fprintf(stderr, _("failed to run command '%s': %s\n"), cmd, strerror(errno)); return 1; diff --git a/gitweb/INSTALL b/gitweb/INSTALL index 408f2859d3..a58e6b3c44 100644 --- a/gitweb/INSTALL +++ b/gitweb/INSTALL @@ -29,12 +29,11 @@ Requirements ------------ - Core git tools - - Perl + - Perl 5.8 - Perl modules: CGI, Encode, Fcntl, File::Find, File::Basename. - web server The following optional Perl modules are required for extra features - - Digest::MD5 - for gravatar support - CGI::Fast and FCGI - for running gitweb as FastCGI script - HTML::TagCloud - for fancy tag cloud in project list view - HTTP::Date or Time::ParseDate - to support If-Modified-Since for feeds diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl index 2417057f2b..2594a4badb 100755 --- a/gitweb/gitweb.perl +++ b/gitweb/gitweb.perl @@ -20,6 +20,8 @@ use Fcntl ':mode'; use File::Find qw(); use File::Basename qw(basename); use Time::HiRes qw(gettimeofday tv_interval); +use Digest::MD5 qw(md5_hex); + binmode STDOUT, ':utf8'; if (!defined($CGI::VERSION) || $CGI::VERSION < 4.08) { @@ -490,7 +492,6 @@ our %feature = ( # Currently available providers are gravatar and picon. # If an unknown provider is specified, the feature is disabled. - # Gravatar depends on Digest::MD5. # Picon currently relies on the indiana.edu database. # To enable system wide have in $GITWEB_CONFIG @@ -1166,18 +1167,8 @@ sub configure_gitweb_features { our @snapshot_fmts = gitweb_get_feature('snapshot'); @snapshot_fmts = filter_snapshot_fmts(@snapshot_fmts); - # check that the avatar feature is set to a known provider name, - # and for each provider check if the dependencies are satisfied. - # if the provider name is invalid or the dependencies are not met, - # reset $git_avatar to the empty string. our ($git_avatar) = gitweb_get_feature('avatar'); - if ($git_avatar eq 'gravatar') { - $git_avatar = '' unless (eval { require Digest::MD5; 1; }); - } elsif ($git_avatar eq 'picon') { - # no dependencies - } else { - $git_avatar = ''; - } + $git_avatar = '' unless $git_avatar =~ /^(?:gravatar|picon)$/s; our @extra_branch_refs = gitweb_get_feature('extra-branch-refs'); @extra_branch_refs = filter_and_validate_refs (@extra_branch_refs); @@ -2167,7 +2158,7 @@ sub gravatar_url { my $size = shift; $avatar_cache{$email} ||= "//www.gravatar.com/avatar/" . - Digest::MD5::md5_hex($email) . "?s="; + md5_hex($email) . "?s="; return $avatar_cache{$email} . $size; } @@ -18,6 +18,11 @@ static void std_output(struct grep_opt *opt, const void *buf, size_t size) fwrite(buf, size, 1, stdout); } +static void color_set(char *dst, const char *color_bytes) +{ + xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes); +} + /* * Initialize the grep_defaults template with hardcoded defaults. * We could let the compiler do this, but without C99 initializers @@ -15,6 +15,31 @@ #include "block-sha1/sha1.h" #endif +#ifndef platform_SHA_CTX +/* + * platform's underlying implementation of SHA-1; could be OpenSSL, + * blk_SHA, Apple CommonCrypto, etc... Note that the relevant + * SHA-1 header may have already defined platform_SHA_CTX for our + * own implementations like block-sha1 and ppc-sha1, so we list + * the default for OpenSSL compatible SHA-1 implementations here. + */ +#define platform_SHA_CTX SHA_CTX +#define platform_SHA1_Init SHA1_Init +#define platform_SHA1_Update SHA1_Update +#define platform_SHA1_Final SHA1_Final +#endif + +#define git_SHA_CTX platform_SHA_CTX +#define git_SHA1_Init platform_SHA1_Init +#define git_SHA1_Update platform_SHA1_Update +#define git_SHA1_Final platform_SHA1_Final + +#ifdef SHA1_MAX_BLOCK_SIZE +#include "compat/sha1-chunked.h" +#undef git_SHA1_Update +#define git_SHA1_Update git_SHA1_Update_Chunked +#endif + /* * Note that these constants are suitable for indexing the hash_algos array and * comparing against each other, but are otherwise arbitrary, so they should not @@ -30,9 +55,15 @@ /* Number of algorithms supported (including unknown). */ #define GIT_HASH_NALGOS (GIT_HASH_SHA1 + 1) -typedef void (*git_hash_init_fn)(void *ctx); -typedef void (*git_hash_update_fn)(void *ctx, const void *in, size_t len); -typedef void (*git_hash_final_fn)(unsigned char *hash, void *ctx); +/* A suitably aligned type for stack allocations of hash contexts. */ +union git_hash_ctx { + git_SHA_CTX sha1; +}; +typedef union git_hash_ctx git_hash_ctx; + +typedef void (*git_hash_init_fn)(git_hash_ctx *ctx); +typedef void (*git_hash_update_fn)(git_hash_ctx *ctx, const void *in, size_t len); +typedef void (*git_hash_final_fn)(unsigned char *hash, git_hash_ctx *ctx); struct git_hash_algo { /* @@ -44,9 +75,6 @@ struct git_hash_algo { /* A four-byte version identifier, used in pack indices. */ uint32_t format_id; - /* The size of a hash context (e.g. git_SHA_CTX). */ - size_t ctxsz; - /* The length of the hash in binary. */ size_t rawsz; diff --git a/http-push.c b/http-push.c index 14435ab65d..7dcd9daf62 100644 --- a/http-push.c +++ b/http-push.c @@ -362,7 +362,7 @@ static void start_put(struct transfer_request *request) git_zstream stream; unpacked = read_sha1_file(request->obj->oid.hash, &type, &len); - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1; + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1; /* Set it up */ git_deflate_init(&stream, zlib_compression_level); @@ -915,6 +915,10 @@ static struct remote_lock *lock_remote(const char *path, long timeout) lock->timeout = -1; } XML_ParserFree(parser); + } else { + fprintf(stderr, + "error: curl result=%d, HTTP code=%ld\n", + results.curl_result, results.http_code); } } else { fprintf(stderr, "Unable to start LOCK request\n"); diff --git a/http-walker.c b/http-walker.c index 1ae8363de2..07c2b1af82 100644 --- a/http-walker.c +++ b/http-walker.c @@ -544,8 +544,10 @@ static int fetch_object(struct walker *walker, unsigned char *sha1) } else if (hashcmp(obj_req->sha1, req->real_sha1)) { ret = error("File %s has bad hash", hex); } else if (req->rename < 0) { - ret = error("unable to write sha1 filename %s", - sha1_file_name(req->sha1)); + struct strbuf buf = STRBUF_INIT; + sha1_file_name(&buf, req->sha1); + ret = error("unable to write sha1 filename %s", buf.buf); + strbuf_release(&buf); } release_http_object_request(req); @@ -69,6 +69,9 @@ static const char *ssl_key; #if LIBCURL_VERSION_NUM >= 0x070908 static const char *ssl_capath; #endif +#if LIBCURL_VERSION_NUM >= 0x071304 +static const char *curl_no_proxy; +#endif #if LIBCURL_VERSION_NUM >= 0x072c00 static const char *ssl_pinnedkey; #endif @@ -77,7 +80,6 @@ static long curl_low_speed_limit = -1; static long curl_low_speed_time = -1; static int curl_ftp_no_epsv; static const char *curl_http_proxy; -static const char *curl_no_proxy; static const char *http_proxy_authmethod; static struct { const char *name; @@ -1260,14 +1262,14 @@ static struct fill_chain *fill_cfg; void add_fill_function(void *data, int (*fill)(void *)) { - struct fill_chain *new = xmalloc(sizeof(*new)); + struct fill_chain *new_fill = xmalloc(sizeof(*new_fill)); struct fill_chain **linkp = &fill_cfg; - new->data = data; - new->fill = fill; - new->next = NULL; + new_fill->data = data; + new_fill->fill = fill; + new_fill->next = NULL; while (*linkp) linkp = &(*linkp)->next; - *linkp = new; + *linkp = new_fill; } void fill_active_slots(void) @@ -2234,7 +2236,7 @@ struct http_object_request *new_http_object_request(const char *base_url, unsigned char *sha1) { char *hex = sha1_to_hex(sha1); - const char *filename; + struct strbuf filename = STRBUF_INIT; char prevfile[PATH_MAX]; int prevlocal; char prev_buf[PREV_BUF_SIZE]; @@ -2246,14 +2248,15 @@ struct http_object_request *new_http_object_request(const char *base_url, hashcpy(freq->sha1, sha1); freq->localfile = -1; - filename = sha1_file_name(sha1); + sha1_file_name(&filename, sha1); snprintf(freq->tmpfile, sizeof(freq->tmpfile), - "%s.temp", filename); + "%s.temp", filename.buf); - snprintf(prevfile, sizeof(prevfile), "%s.prev", filename); + snprintf(prevfile, sizeof(prevfile), "%s.prev", filename.buf); unlink_or_warn(prevfile); rename(freq->tmpfile, prevfile); unlink_or_warn(freq->tmpfile); + strbuf_release(&filename); if (freq->localfile != -1) error("fd leakage in start: %d", freq->localfile); @@ -2368,6 +2371,7 @@ void process_http_object_request(struct http_object_request *freq) int finish_http_object_request(struct http_object_request *freq) { struct stat st; + struct strbuf filename = STRBUF_INIT; close(freq->localfile); freq->localfile = -1; @@ -2393,8 +2397,10 @@ int finish_http_object_request(struct http_object_request *freq) unlink_or_warn(freq->tmpfile); return -1; } - freq->rename = - finalize_object_file(freq->tmpfile, sha1_file_name(freq->sha1)); + + sha1_file_name(&filename, freq->sha1); + freq->rename = finalize_object_file(freq->tmpfile, filename.buf); + strbuf_release(&filename); return freq->rename; } diff --git a/imap-send.c b/imap-send.c index 36c7c1b4f6..ffb0a6eca8 100644 --- a/imap-send.c +++ b/imap-send.c @@ -1189,11 +1189,11 @@ bail: */ static void lf_to_crlf(struct strbuf *msg) { - char *new; + char *new_msg; size_t i, j; char lastc; - /* First pass: tally, in j, the size of the new string: */ + /* First pass: tally, in j, the size of the new_msg string: */ for (i = j = 0, lastc = '\0'; i < msg->len; i++) { if (msg->buf[i] == '\n' && lastc != '\r') j++; /* a CR will need to be added here */ @@ -1201,18 +1201,18 @@ static void lf_to_crlf(struct strbuf *msg) j++; } - new = xmallocz(j); + new_msg = xmallocz(j); /* - * Second pass: write the new string. Note that this loop is + * Second pass: write the new_msg string. Note that this loop is * otherwise identical to the first pass. */ for (i = j = 0, lastc = '\0'; i < msg->len; i++) { if (msg->buf[i] == '\n' && lastc != '\r') - new[j++] = '\r'; - lastc = new[j++] = msg->buf[i]; + new_msg[j++] = '\r'; + lastc = new_msg[j++] = msg->buf[i]; } - strbuf_attach(msg, new, j, j + 1); + strbuf_attach(msg, new_msg, j, j + 1); } /* diff --git a/line-log.c b/line-log.c index 545ad0f28b..cdc2257db5 100644 --- a/line-log.c +++ b/line-log.c @@ -151,29 +151,29 @@ static void range_set_union(struct range_set *out, assert(out->nr == 0); while (i < a->nr || j < b->nr) { - struct range *new; + struct range *new_range; if (i < a->nr && j < b->nr) { if (ra[i].start < rb[j].start) - new = &ra[i++]; + new_range = &ra[i++]; else if (ra[i].start > rb[j].start) - new = &rb[j++]; + new_range = &rb[j++]; else if (ra[i].end < rb[j].end) - new = &ra[i++]; + new_range = &ra[i++]; else - new = &rb[j++]; + new_range = &rb[j++]; } else if (i < a->nr) /* b exhausted */ - new = &ra[i++]; + new_range = &ra[i++]; else /* a exhausted */ - new = &rb[j++]; - if (new->start == new->end) + new_range = &rb[j++]; + if (new_range->start == new_range->end) ; /* empty range */ - else if (!out->nr || out->ranges[out->nr-1].end < new->start) { + else if (!out->nr || out->ranges[out->nr-1].end < new_range->start) { range_set_grow(out, 1); - out->ranges[out->nr].start = new->start; - out->ranges[out->nr].end = new->end; + out->ranges[out->nr].start = new_range->start; + out->ranges[out->nr].end = new_range->end; out->nr++; - } else if (out->ranges[out->nr-1].end < new->end) { - out->ranges[out->nr-1].end = new->end; + } else if (out->ranges[out->nr-1].end < new_range->end) { + out->ranges[out->nr-1].end = new_range->end; } } } @@ -696,18 +696,18 @@ static struct line_log_data *line_log_data_merge(struct line_log_data *a, static void add_line_range(struct rev_info *revs, struct commit *commit, struct line_log_data *range) { - struct line_log_data *old = NULL; - struct line_log_data *new = NULL; + struct line_log_data *old_line = NULL; + struct line_log_data *new_line = NULL; - old = lookup_decoration(&revs->line_log_data, &commit->object); - if (old && range) { - new = line_log_data_merge(old, range); - free_line_log_data(old); + old_line = lookup_decoration(&revs->line_log_data, &commit->object); + if (old_line && range) { + new_line = line_log_data_merge(old_line, range); + free_line_log_data(old_line); } else if (range) - new = line_log_data_copy(range); + new_line = line_log_data_copy(range); - if (new) - add_decoration(&revs->line_log_data, &commit->object, new); + if (new_line) + add_decoration(&revs->line_log_data, &commit->object, new_line); } static void clear_commit_line_range(struct rev_info *revs, struct commit *commit) @@ -1042,12 +1042,12 @@ static int process_diff_filepair(struct rev_info *rev, static struct diff_filepair *diff_filepair_dup(struct diff_filepair *pair) { - struct diff_filepair *new = xmalloc(sizeof(struct diff_filepair)); - new->one = pair->one; - new->two = pair->two; - new->one->count++; - new->two->count++; - return new; + struct diff_filepair *new_filepair = xmalloc(sizeof(struct diff_filepair)); + new_filepair->one = pair->one; + new_filepair->two = pair->two; + new_filepair->one->count++; + new_filepair->two->count++; + return new_filepair; } static void free_diffqueues(int n, struct diff_queue_struct *dq) diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c index 4c5b34e949..6a3cc985c4 100644 --- a/list-objects-filter-options.c +++ b/list-objects-filter-options.c @@ -21,29 +21,36 @@ * subordinate commands when necessary. We also "intern" the arg for * the convenience of the current command. */ -int parse_list_objects_filter(struct list_objects_filter_options *filter_options, - const char *arg) +static int gently_parse_list_objects_filter( + struct list_objects_filter_options *filter_options, + const char *arg, + struct strbuf *errbuf) { const char *v0; - if (filter_options->choice) - die(_("multiple object filter types cannot be combined")); + if (filter_options->choice) { + if (errbuf) { + strbuf_init(errbuf, 0); + strbuf_addstr( + errbuf, + _("multiple filter-specs cannot be combined")); + } + return 1; + } filter_options->filter_spec = strdup(arg); if (!strcmp(arg, "blob:none")) { filter_options->choice = LOFC_BLOB_NONE; return 0; - } - if (skip_prefix(arg, "blob:limit=", &v0)) { - if (!git_parse_ulong(v0, &filter_options->blob_limit_value)) - die(_("invalid filter-spec expression '%s'"), arg); - filter_options->choice = LOFC_BLOB_LIMIT; - return 0; - } + } else if (skip_prefix(arg, "blob:limit=", &v0)) { + if (git_parse_ulong(v0, &filter_options->blob_limit_value)) { + filter_options->choice = LOFC_BLOB_LIMIT; + return 0; + } - if (skip_prefix(arg, "sparse:oid=", &v0)) { + } else if (skip_prefix(arg, "sparse:oid=", &v0)) { struct object_context oc; struct object_id sparse_oid; @@ -57,15 +64,27 @@ int parse_list_objects_filter(struct list_objects_filter_options *filter_options filter_options->sparse_oid_value = oiddup(&sparse_oid); filter_options->choice = LOFC_SPARSE_OID; return 0; - } - if (skip_prefix(arg, "sparse:path=", &v0)) { + } else if (skip_prefix(arg, "sparse:path=", &v0)) { filter_options->choice = LOFC_SPARSE_PATH; filter_options->sparse_path_value = strdup(v0); return 0; } - die(_("invalid filter-spec expression '%s'"), arg); + if (errbuf) { + strbuf_init(errbuf, 0); + strbuf_addf(errbuf, "invalid filter-spec '%s'", arg); + } + memset(filter_options, 0, sizeof(*filter_options)); + return 1; +} + +int parse_list_objects_filter(struct list_objects_filter_options *filter_options, + const char *arg) +{ + struct strbuf buf = STRBUF_INIT; + if (gently_parse_list_objects_filter(filter_options, arg, &buf)) + die("%s", buf.buf); return 0; } @@ -75,7 +94,7 @@ int opt_parse_list_objects_filter(const struct option *opt, struct list_objects_filter_options *filter_options = opt->value; if (unset || !arg) { - list_objects_filter_release(filter_options); + list_objects_filter_set_no_filter(filter_options); return 0; } @@ -90,3 +109,44 @@ void list_objects_filter_release( free(filter_options->sparse_path_value); memset(filter_options, 0, sizeof(*filter_options)); } + +void partial_clone_register( + const char *remote, + const struct list_objects_filter_options *filter_options) +{ + /* + * Record the name of the partial clone remote in the + * config and in the global variable -- the latter is + * used throughout to indicate that partial clone is + * enabled and to expect missing objects. + */ + if (repository_format_partial_clone && + *repository_format_partial_clone && + strcmp(remote, repository_format_partial_clone)) + die(_("cannot change partial clone promisor remote")); + + git_config_set("core.repositoryformatversion", "1"); + git_config_set("extensions.partialclone", remote); + + repository_format_partial_clone = xstrdup(remote); + + /* + * Record the initial filter-spec in the config as + * the default for subsequent fetches from this remote. + */ + core_partial_clone_filter_default = + xstrdup(filter_options->filter_spec); + git_config_set("core.partialclonefilter", + core_partial_clone_filter_default); +} + +void partial_clone_get_default_filter_spec( + struct list_objects_filter_options *filter_options) +{ + /* + * Parse default value, but silently ignore it if it is invalid. + */ + gently_parse_list_objects_filter(filter_options, + core_partial_clone_filter_default, + NULL); +} diff --git a/list-objects-filter-options.h b/list-objects-filter-options.h index eea44a1a51..0000a61f82 100644 --- a/list-objects-filter-options.h +++ b/list-objects-filter-options.h @@ -31,6 +31,11 @@ struct list_objects_filter_options { enum list_objects_filter_choice choice; /* + * Choice is LOFC_DISABLED because "--no-filter" was requested. + */ + unsigned int no_filter : 1; + + /* * Parsed values (fields) from within the filter-spec. These are * choice-specific; not all values will be defined for any given * choice. @@ -58,4 +63,17 @@ int opt_parse_list_objects_filter(const struct option *opt, void list_objects_filter_release( struct list_objects_filter_options *filter_options); +static inline void list_objects_filter_set_no_filter( + struct list_objects_filter_options *filter_options) +{ + list_objects_filter_release(filter_options); + filter_options->no_filter = 1; +} + +void partial_clone_register( + const char *remote, + const struct list_objects_filter_options *filter_options); +void partial_clone_get_default_filter_spec( + struct list_objects_filter_options *filter_options); + #endif /* LIST_OBJECTS_FILTER_OPTIONS_H */ diff --git a/list-objects.c b/list-objects.c index 0966cdc9fa..168bef688a 100644 --- a/list-objects.c +++ b/list-objects.c @@ -9,6 +9,7 @@ #include "list-objects.h" #include "list-objects-filter.h" #include "list-objects-filter-options.h" +#include "packfile.h" static void process_blob(struct rev_info *revs, struct blob *blob, @@ -30,6 +31,20 @@ static void process_blob(struct rev_info *revs, if (obj->flags & (UNINTERESTING | SEEN)) return; + /* + * Pre-filter known-missing objects when explicitly requested. + * Otherwise, a missing object error message may be reported + * later (depending on other filtering criteria). + * + * Note that this "--exclude-promisor-objects" pre-filtering + * may cause the actual filter to report an incomplete list + * of missing objects. + */ + if (revs->exclude_promisor_objects && + !has_object_file(&obj->oid) && + is_promisor_object(&obj->oid)) + return; + pathlen = path->len; strbuf_addstr(path, name); if (filter_fn) @@ -91,6 +106,8 @@ static void process_tree(struct rev_info *revs, all_entries_interesting: entry_not_interesting; int baselen = base->len; enum list_objects_filter_result r = LOFR_MARK_SEEN | LOFR_DO_SHOW; + int gently = revs->ignore_missing_links || + revs->exclude_promisor_objects; if (!revs->tree_objects) return; @@ -98,9 +115,19 @@ static void process_tree(struct rev_info *revs, die("bad tree object"); if (obj->flags & (UNINTERESTING | SEEN)) return; - if (parse_tree_gently(tree, revs->ignore_missing_links) < 0) { + if (parse_tree_gently(tree, gently) < 0) { if (revs->ignore_missing_links) return; + + /* + * Pre-filter known-missing tree objects when explicitly + * requested. This may cause the actual filter to report + * an incomplete list of missing objects. + */ + if (revs->exclude_promisor_objects && + is_promisor_object(&obj->oid)) + return; + die("bad tree object %s", oid_to_hex(&obj->oid)); } diff --git a/log-tree.c b/log-tree.c index 5c6b09034c..bdf23c5f7b 100644 --- a/log-tree.c +++ b/log-tree.c @@ -499,7 +499,7 @@ static void show_one_mergetag(struct commit *commit, int status, nth; size_t payload_size, gpg_message_offset; - hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), oid.hash); + hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid); tag = lookup_tag(&oid); if (!tag) return; /* error message already given */ diff --git a/match-trees.c b/match-trees.c index 396b7338df..0ca99d5162 100644 --- a/match-trees.c +++ b/match-trees.c @@ -158,22 +158,20 @@ static void match_trees(const struct object_id *hash1, } /* - * A tree "hash1" has a subdirectory at "prefix". Come up with a - * tree object by replacing it with another tree "hash2". + * A tree "oid1" has a subdirectory at "prefix". Come up with a tree object by + * replacing it with another tree "oid2". */ -static int splice_tree(const unsigned char *hash1, - const char *prefix, - const unsigned char *hash2, - unsigned char *result) +static int splice_tree(const struct object_id *oid1, const char *prefix, + const struct object_id *oid2, struct object_id *result) { char *subpath; int toplen; char *buf; unsigned long sz; struct tree_desc desc; - unsigned char *rewrite_here; - const unsigned char *rewrite_with; - unsigned char subtree[20]; + struct object_id *rewrite_here; + const struct object_id *rewrite_with; + struct object_id subtree; enum object_type type; int status; @@ -182,9 +180,9 @@ static int splice_tree(const unsigned char *hash1, if (*subpath) subpath++; - buf = read_sha1_file(hash1, &type, &sz); + buf = read_sha1_file(oid1->hash, &type, &sz); if (!buf) - die("cannot read tree %s", sha1_to_hex(hash1)); + die("cannot read tree %s", oid_to_hex(oid1)); init_tree_desc(&desc, buf, sz); rewrite_here = NULL; @@ -197,26 +195,26 @@ static int splice_tree(const unsigned char *hash1, if (strlen(name) == toplen && !memcmp(name, prefix, toplen)) { if (!S_ISDIR(mode)) - die("entry %s in tree %s is not a tree", - name, sha1_to_hex(hash1)); - rewrite_here = (unsigned char *) oid->hash; + die("entry %s in tree %s is not a tree", name, + oid_to_hex(oid1)); + rewrite_here = (struct object_id *)oid; break; } update_tree_entry(&desc); } if (!rewrite_here) - die("entry %.*s not found in tree %s", - toplen, prefix, sha1_to_hex(hash1)); + die("entry %.*s not found in tree %s", toplen, prefix, + oid_to_hex(oid1)); if (*subpath) { - status = splice_tree(rewrite_here, subpath, hash2, subtree); + status = splice_tree(rewrite_here, subpath, oid2, &subtree); if (status) return status; - rewrite_with = subtree; + rewrite_with = &subtree; + } else { + rewrite_with = oid2; } - else - rewrite_with = hash2; - hashcpy(rewrite_here, rewrite_with); - status = write_sha1_file(buf, sz, tree_type, result); + oidcpy(rewrite_here, rewrite_with); + status = write_object_file(buf, sz, tree_type, result); free(buf); return status; } @@ -280,7 +278,7 @@ void shift_tree(const struct object_id *hash1, if (!*add_prefix) return; - splice_tree(hash1->hash, add_prefix, hash2->hash, shifted->hash); + splice_tree(hash1, add_prefix, hash2, shifted); } /* @@ -334,7 +332,7 @@ void shift_tree_by(const struct object_id *hash1, * shift tree2 down by adding shift_prefix above it * to match tree1. */ - splice_tree(hash1->hash, shift_prefix, hash2->hash, shifted->hash); + splice_tree(hash1, shift_prefix, hash2, shifted); else /* * shift tree2 up by removing shift_prefix from it diff --git a/merge-recursive.c b/merge-recursive.c index cc5fa0a949..869092f7b9 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -513,6 +513,25 @@ static void record_df_conflict_files(struct merge_options *o, struct rename { struct diff_filepair *pair; + /* + * Purpose of src_entry and dst_entry: + * + * If 'before' is renamed to 'after' then src_entry will contain + * the versions of 'before' from the merge_base, HEAD, and MERGE in + * stages 1, 2, and 3; dst_entry will contain the respective + * versions of 'after' in corresponding locations. Thus, we have a + * total of six modes and oids, though some will be null. (Stage 0 + * is ignored; we're interested in handling conflicts.) + * + * Since we don't turn on break-rewrites by default, neither + * src_entry nor dst_entry can have all three of their stages have + * non-null oids, meaning at most four of the six will be non-null. + * Also, since this is a rename, both src_entry and dst_entry will + * have at least one non-null oid, meaning at least two will be + * non-null. Of the six oids, a typical rename will have three be + * non-null. Only two implies a rename/delete, and four implies a + * rename/add. + */ struct stage_data *src_entry; struct stage_data *dst_entry; unsigned processed:1; @@ -1009,8 +1028,9 @@ static int merge_file_1(struct merge_options *o, if ((merge_status < 0) || !result_buf.ptr) ret = err(o, _("Failed to execute internal merge")); - if (!ret && write_sha1_file(result_buf.ptr, result_buf.size, - blob_type, result->oid.hash)) + if (!ret && + write_object_file(result_buf.ptr, result_buf.size, + blob_type, &result->oid)) ret = err(o, _("Unable to add %s to database"), a->path); @@ -1998,10 +2018,10 @@ int merge_trees(struct merge_options *o, get_files_dirs(o, merge); entries = get_unmerged(); - record_df_conflict_files(o, entries); re_head = get_renames(o, head, common, head, merge, entries); re_merge = get_renames(o, merge, common, head, merge, entries); clean = process_renames(o, re_head, re_merge); + record_df_conflict_files(o, entries); if (clean < 0) goto cleanup; for (i = entries->nr-1; 0 <= i; i--) { @@ -2070,7 +2090,7 @@ int merge_recursive(struct merge_options *o, { struct commit_list *iter; struct commit *merged_common_ancestors; - struct tree *mrtree = mrtree; + struct tree *mrtree; int clean; if (show(o, 4)) { @@ -2198,11 +2218,13 @@ int merge_recursive_generic(struct merge_options *o, hold_locked_index(&lock, LOCK_DIE_ON_ERROR); clean = merge_recursive(o, head_commit, next_commit, ca, result); - if (clean < 0) + if (clean < 0) { + rollback_lock_file(&lock); return clean; + } - if (active_cache_changed && - write_locked_index(&the_index, &lock, COMMIT_LOCK)) + if (write_locked_index(&the_index, &lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) return err(o, _("Unable to write index.")); return clean ? 0 : 1; @@ -113,17 +113,23 @@ int checkout_fast_forward(const struct object_id *head, setup_unpack_trees_porcelain(&opts, "merge"); trees[nr_trees] = parse_tree_indirect(head); - if (!trees[nr_trees++]) + if (!trees[nr_trees++]) { + rollback_lock_file(&lock_file); return -1; + } trees[nr_trees] = parse_tree_indirect(remote); - if (!trees[nr_trees++]) + if (!trees[nr_trees++]) { + rollback_lock_file(&lock_file); return -1; + } for (i = 0; i < nr_trees; i++) { parse_tree(trees[i]); init_tree_desc(t+i, trees[i]->buffer, trees[i]->size); } - if (unpack_trees(nr_trees, t, &opts)) + if (unpack_trees(nr_trees, t, &opts)) { + rollback_lock_file(&lock_file); return -1; + } if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK)) return error(_("unable to write new index file")); return 0; diff --git a/mru.c b/mru.c deleted file mode 100644 index 9dedae0287..0000000000 --- a/mru.c +++ /dev/null @@ -1,50 +0,0 @@ -#include "cache.h" -#include "mru.h" - -void mru_append(struct mru *mru, void *item) -{ - struct mru_entry *cur = xmalloc(sizeof(*cur)); - cur->item = item; - cur->prev = mru->tail; - cur->next = NULL; - - if (mru->tail) - mru->tail->next = cur; - else - mru->head = cur; - mru->tail = cur; -} - -void mru_mark(struct mru *mru, struct mru_entry *entry) -{ - /* If we're already at the front of the list, nothing to do */ - if (mru->head == entry) - return; - - /* Otherwise, remove us from our current slot... */ - if (entry->prev) - entry->prev->next = entry->next; - if (entry->next) - entry->next->prev = entry->prev; - else - mru->tail = entry->prev; - - /* And insert us at the beginning. */ - entry->prev = NULL; - entry->next = mru->head; - if (mru->head) - mru->head->prev = entry; - mru->head = entry; -} - -void mru_clear(struct mru *mru) -{ - struct mru_entry *p = mru->head; - - while (p) { - struct mru_entry *to_free = p; - p = p->next; - free(to_free); - } - mru->head = mru->tail = NULL; -} diff --git a/mru.h b/mru.h deleted file mode 100644 index 42e4aeaa10..0000000000 --- a/mru.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef MRU_H -#define MRU_H - -/** - * A simple most-recently-used cache, backed by a doubly-linked list. - * - * Usage is roughly: - * - * // Create a list. Zero-initialization is required. - * static struct mru cache; - * mru_append(&cache, item); - * ... - * - * // Iterate in MRU order. - * struct mru_entry *p; - * for (p = cache.head; p; p = p->next) { - * if (matches(p->item)) - * break; - * } - * - * // Mark an item as used, moving it to the front of the list. - * mru_mark(&cache, p); - * - * // Reset the list to empty, cleaning up all resources. - * mru_clear(&cache); - * - * Note that you SHOULD NOT call mru_mark() and then continue traversing the - * list; it reorders the marked item to the front of the list, and therefore - * you will begin traversing the whole list again. - */ - -struct mru_entry { - void *item; - struct mru_entry *prev, *next; -}; - -struct mru { - struct mru_entry *head, *tail; -}; - -void mru_append(struct mru *mru, void *item); -void mru_mark(struct mru *mru, struct mru_entry *entry); -void mru_clear(struct mru *mru); - -#endif /* MRU_H */ diff --git a/name-hash.c b/name-hash.c index 45c98db0a0..163849831c 100644 --- a/name-hash.c +++ b/name-hash.c @@ -578,6 +578,8 @@ static void threaded_lazy_init_name_hash( static void lazy_init_name_hash(struct index_state *istate) { + uint64_t start = getnanotime(); + if (istate->name_hash_initialized) return; hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr); @@ -600,6 +602,7 @@ static void lazy_init_name_hash(struct index_state *istate) } istate->name_hash_initialized = 1; + trace_performance_since(start, "initialize name hash"); } /* @@ -696,12 +699,12 @@ void adjust_dirname_case(struct index_state *istate, char *name) if (*ptr == '/') { struct dir_entry *dir; - ptr++; - dir = find_dir_entry(istate, name, ptr - name + 1); + dir = find_dir_entry(istate, name, ptr - name); if (dir) { memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr); - startPtr = ptr; + startPtr = ptr + 1; } + ptr++; } } } diff --git a/notes-cache.c b/notes-cache.c index 17ee8602b3..398e61d5e9 100644 --- a/notes-cache.c +++ b/notes-cache.c @@ -54,10 +54,10 @@ int notes_cache_write(struct notes_cache *c) if (!c->tree.dirty) return 0; - if (write_notes_tree(&c->tree, tree_oid.hash)) + if (write_notes_tree(&c->tree, &tree_oid)) return -1; - if (commit_tree(c->validity, strlen(c->validity), tree_oid.hash, NULL, - commit_oid.hash, NULL, NULL) < 0) + if (commit_tree(c->validity, strlen(c->validity), &tree_oid, NULL, + &commit_oid, NULL, NULL) < 0) return -1; if (update_ref("update notes cache", c->tree.update_ref, &commit_oid, NULL, 0, UPDATE_REFS_QUIET_ON_ERR) < 0) @@ -88,7 +88,7 @@ int notes_cache_put(struct notes_cache *c, struct object_id *key_oid, { struct object_id value_oid; - if (write_sha1_file(data, size, "blob", value_oid.hash) < 0) + if (write_object_file(data, size, "blob", &value_oid) < 0) return -1; return add_note(&c->tree, key_oid, &value_oid, NULL); } diff --git a/notes-merge.c b/notes-merge.c index 0f6573cb17..c09c5e0e47 100644 --- a/notes-merge.c +++ b/notes-merge.c @@ -642,9 +642,8 @@ int notes_merge(struct notes_merge_options *o, struct commit_list *parents = NULL; commit_list_insert(remote, &parents); /* LIFO order */ commit_list_insert(local, &parents); - create_notes_commit(local_tree, parents, - o->commit_msg.buf, o->commit_msg.len, - result_oid->hash); + create_notes_commit(local_tree, parents, o->commit_msg.buf, + o->commit_msg.len, result_oid); } found_result: @@ -718,8 +717,8 @@ int notes_merge_commit(struct notes_merge_options *o, strbuf_setlen(&path, baselen); } - create_notes_commit(partial_tree, partial_commit->parents, - msg, strlen(msg), result_oid->hash); + create_notes_commit(partial_tree, partial_commit->parents, msg, + strlen(msg), result_oid); unuse_commit_buffer(partial_commit, buffer); if (o->verbosity >= 4) printf("Finalized notes merge commit: %s\n", diff --git a/notes-utils.c b/notes-utils.c index 5c8e70c98f..02407fe2a7 100644 --- a/notes-utils.c +++ b/notes-utils.c @@ -6,13 +6,13 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents, const char *msg, size_t msg_len, - unsigned char *result_sha1) + struct object_id *result_oid) { struct object_id tree_oid; assert(t->initialized); - if (write_notes_tree(t, tree_oid.hash)) + if (write_notes_tree(t, &tree_oid)) die("Failed to write notes tree to database"); if (!parents) { @@ -27,7 +27,8 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents, /* else: t->ref points to nothing, assume root/orphan commit */ } - if (commit_tree(msg, msg_len, tree_oid.hash, parents, result_sha1, NULL, NULL)) + if (commit_tree(msg, msg_len, &tree_oid, parents, result_oid, NULL, + NULL)) die("Failed to commit notes tree to database"); } @@ -47,7 +48,7 @@ void commit_notes(struct notes_tree *t, const char *msg) strbuf_addstr(&buf, msg); strbuf_complete_line(&buf); - create_notes_commit(t, NULL, buf.buf, buf.len, commit_oid.hash); + create_notes_commit(t, NULL, buf.buf, buf.len, &commit_oid); strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */ update_ref(buf.buf, t->update_ref, &commit_oid, NULL, 0, UPDATE_REFS_DIE_ON_ERR); diff --git a/notes-utils.h b/notes-utils.h index 1190578398..5d79cbef51 100644 --- a/notes-utils.h +++ b/notes-utils.h @@ -15,7 +15,8 @@ * The resulting commit SHA1 is stored in result_sha1. */ void create_notes_commit(struct notes_tree *t, struct commit_list *parents, - const char *msg, size_t msg_len, unsigned char *result_sha1); + const char *msg, size_t msg_len, + struct object_id *result_oid); void commit_notes(struct notes_tree *t, const char *msg); @@ -270,8 +270,8 @@ static int note_tree_insert(struct notes_tree *t, struct int_node *tree, if (!oidcmp(&l->val_oid, &entry->val_oid)) return 0; - ret = combine_notes(l->val_oid.hash, - entry->val_oid.hash); + ret = combine_notes(&l->val_oid, + &entry->val_oid); if (!ret && is_null_oid(&l->val_oid)) note_tree_remove(t, tree, n, entry); free(entry); @@ -667,7 +667,7 @@ static int tree_write_stack_finish_subtree(struct tree_write_stack *tws) ret = tree_write_stack_finish_subtree(n); if (ret) return ret; - ret = write_sha1_file(n->buf.buf, n->buf.len, tree_type, s.hash); + ret = write_object_file(n->buf.buf, n->buf.len, tree_type, &s); if (ret) return ret; strbuf_release(&n->buf); @@ -786,8 +786,8 @@ static int prune_notes_helper(const struct object_id *object_oid, return 0; } -int combine_notes_concatenate(unsigned char *cur_sha1, - const unsigned char *new_sha1) +int combine_notes_concatenate(struct object_id *cur_oid, + const struct object_id *new_oid) { char *cur_msg = NULL, *new_msg = NULL, *buf; unsigned long cur_len, new_len, buf_len; @@ -795,18 +795,18 @@ int combine_notes_concatenate(unsigned char *cur_sha1, int ret; /* read in both note blob objects */ - if (!is_null_sha1(new_sha1)) - new_msg = read_sha1_file(new_sha1, &new_type, &new_len); + if (!is_null_oid(new_oid)) + new_msg = read_sha1_file(new_oid->hash, &new_type, &new_len); if (!new_msg || !new_len || new_type != OBJ_BLOB) { free(new_msg); return 0; } - if (!is_null_sha1(cur_sha1)) - cur_msg = read_sha1_file(cur_sha1, &cur_type, &cur_len); + if (!is_null_oid(cur_oid)) + cur_msg = read_sha1_file(cur_oid->hash, &cur_type, &cur_len); if (!cur_msg || !cur_len || cur_type != OBJ_BLOB) { free(cur_msg); free(new_msg); - hashcpy(cur_sha1, new_sha1); + oidcpy(cur_oid, new_oid); return 0; } @@ -825,20 +825,20 @@ int combine_notes_concatenate(unsigned char *cur_sha1, free(new_msg); /* create a new blob object from buf */ - ret = write_sha1_file(buf, buf_len, blob_type, cur_sha1); + ret = write_object_file(buf, buf_len, blob_type, cur_oid); free(buf); return ret; } -int combine_notes_overwrite(unsigned char *cur_sha1, - const unsigned char *new_sha1) +int combine_notes_overwrite(struct object_id *cur_oid, + const struct object_id *new_oid) { - hashcpy(cur_sha1, new_sha1); + oidcpy(cur_oid, new_oid); return 0; } -int combine_notes_ignore(unsigned char *cur_sha1, - const unsigned char *new_sha1) +int combine_notes_ignore(struct object_id *cur_oid, + const struct object_id *new_oid) { return 0; } @@ -848,17 +848,17 @@ int combine_notes_ignore(unsigned char *cur_sha1, * newlines removed. */ static int string_list_add_note_lines(struct string_list *list, - const unsigned char *sha1) + const struct object_id *oid) { char *data; unsigned long len; enum object_type t; - if (is_null_sha1(sha1)) + if (is_null_oid(oid)) return 0; /* read_sha1_file NUL-terminates */ - data = read_sha1_file(sha1, &t, &len); + data = read_sha1_file(oid->hash, &t, &len); if (t != OBJ_BLOB || !data || !len) { free(data); return t != OBJ_BLOB || !data; @@ -884,17 +884,17 @@ static int string_list_join_lines_helper(struct string_list_item *item, return 0; } -int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, - const unsigned char *new_sha1) +int combine_notes_cat_sort_uniq(struct object_id *cur_oid, + const struct object_id *new_oid) { struct string_list sort_uniq_list = STRING_LIST_INIT_DUP; struct strbuf buf = STRBUF_INIT; int ret = 1; /* read both note blob objects into unique_lines */ - if (string_list_add_note_lines(&sort_uniq_list, cur_sha1)) + if (string_list_add_note_lines(&sort_uniq_list, cur_oid)) goto out; - if (string_list_add_note_lines(&sort_uniq_list, new_sha1)) + if (string_list_add_note_lines(&sort_uniq_list, new_oid)) goto out; string_list_remove_empty_items(&sort_uniq_list, 0); string_list_sort(&sort_uniq_list); @@ -905,7 +905,7 @@ int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, string_list_join_lines_helper, &buf)) goto out; - ret = write_sha1_file(buf.buf, buf.len, blob_type, cur_sha1); + ret = write_object_file(buf.buf, buf.len, blob_type, cur_oid); out: strbuf_release(&buf); @@ -1123,11 +1123,12 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn, return for_each_note_helper(t, t->root, 0, 0, flags, fn, cb_data); } -int write_notes_tree(struct notes_tree *t, unsigned char *result) +int write_notes_tree(struct notes_tree *t, struct object_id *result) { struct tree_write_stack root; struct write_each_note_data cb_data; int ret; + int flags; if (!t) t = &default_notes_tree; @@ -1141,12 +1142,12 @@ int write_notes_tree(struct notes_tree *t, unsigned char *result) cb_data.next_non_note = t->first_non_note; /* Write tree objects representing current notes tree */ - ret = for_each_note(t, FOR_EACH_NOTE_DONT_UNPACK_SUBTREES | - FOR_EACH_NOTE_YIELD_SUBTREES, - write_each_note, &cb_data) || - write_each_non_note_until(NULL, &cb_data) || - tree_write_stack_finish_subtree(&root) || - write_sha1_file(root.buf.buf, root.buf.len, tree_type, result); + flags = FOR_EACH_NOTE_DONT_UNPACK_SUBTREES | + FOR_EACH_NOTE_YIELD_SUBTREES; + ret = for_each_note(t, flags, write_each_note, &cb_data) || + write_each_non_note_until(NULL, &cb_data) || + tree_write_stack_finish_subtree(&root) || + write_object_file(root.buf.buf, root.buf.len, tree_type, result); strbuf_release(&root.buf); return ret; } @@ -9,27 +9,32 @@ * When adding a new note annotating the same object as an existing note, it is * up to the caller to decide how to combine the two notes. The decision is * made by passing in a function of the following form. The function accepts - * two SHA1s -- of the existing note and the new note, respectively. The + * two object_ids -- of the existing note and the new note, respectively. The * function then combines the notes in whatever way it sees fit, and writes the - * resulting SHA1 into the first SHA1 argument (cur_sha1). A non-zero return + * resulting oid into the first argument (cur_oid). A non-zero return * value indicates failure. * - * The two given SHA1s shall both be non-NULL and different from each other. - * Either of them (but not both) may be == null_sha1, which indicates an - * empty/non-existent note. If the resulting SHA1 (cur_sha1) is == null_sha1, + * The two given object_ids shall both be non-NULL and different from each + * other. Either of them (but not both) may be == null_oid, which indicates an + * empty/non-existent note. If the resulting oid (cur_oid) is == null_oid, * the note will be removed from the notes tree. * * The default combine_notes function (you get this when passing NULL) is * combine_notes_concatenate(), which appends the contents of the new note to * the contents of the existing note. */ -typedef int (*combine_notes_fn)(unsigned char *cur_sha1, const unsigned char *new_sha1); +typedef int (*combine_notes_fn)(struct object_id *cur_oid, + const struct object_id *new_oid); /* Common notes combinators */ -int combine_notes_concatenate(unsigned char *cur_sha1, const unsigned char *new_sha1); -int combine_notes_overwrite(unsigned char *cur_sha1, const unsigned char *new_sha1); -int combine_notes_ignore(unsigned char *cur_sha1, const unsigned char *new_sha1); -int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, const unsigned char *new_sha1); +int combine_notes_concatenate(struct object_id *cur_oid, + const struct object_id *new_oid); +int combine_notes_overwrite(struct object_id *cur_oid, + const struct object_id *new_oid); +int combine_notes_ignore(struct object_id *cur_oid, + const struct object_id *new_oid); +int combine_notes_cat_sort_uniq(struct object_id *cur_oid, + const struct object_id *new_oid); /* * Notes tree object @@ -212,7 +217,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn, * Write the given notes_tree structure to the object database * * Creates a new tree object encapsulating the current state of the given - * notes_tree, and stores its SHA1 into the 'result' argument. + * notes_tree, and stores its object id into the 'result' argument. * * Returns zero on success, non-zero on failure. * @@ -220,7 +225,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn, * this function has returned zero. Please also remember to create a * corresponding commit object, and update the appropriate notes ref. */ -int write_notes_tree(struct notes_tree *t, unsigned char *result); +int write_notes_tree(struct notes_tree *t, struct object_id *result); /* Flags controlling the operation of prune */ #define NOTES_PRUNE_VERBOSE 1 @@ -26,7 +26,7 @@ static const char *object_type_strings[] = { "tag", /* OBJ_TAG = 4 */ }; -const char *typename(unsigned int type) +const char *type_name(unsigned int type) { if (type >= ARRAY_SIZE(object_type_strings)) return NULL; @@ -166,7 +166,7 @@ void *object_as_type(struct object *obj, enum object_type type, int quiet) if (!quiet) error("object %s is a %s, not a %s", oid_to_hex(&obj->oid), - typename(obj->type), typename(type)); + type_name(obj->type), type_name(type)); return NULL; } } @@ -252,7 +252,7 @@ struct object *parse_object(const struct object_id *oid) if (obj && obj->parsed) return obj; - if ((obj && obj->type == OBJ_BLOB) || + if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) || (!obj && has_object_file(oid) && sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) { if (check_sha1_signature(repl, NULL, 0, NULL) < 0) { @@ -265,7 +265,7 @@ struct object *parse_object(const struct object_id *oid) buffer = read_sha1_file(oid->hash, &type, &size); if (buffer) { - if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) { + if (check_sha1_signature(repl, buffer, size, type_name(type)) < 0) { free(buffer); error("sha1 mismatch %s", sha1_to_hex(repl)); return NULL; @@ -28,18 +28,22 @@ struct object_array { #define TYPE_BITS 3 /* * object flag allocation: - * revision.h: 0---------10 26 - * fetch-pack.c: 0---5 - * walker.c: 0-2 - * upload-pack.c: 4 11----------------19 - * builtin/blame.c: 12-13 - * bisect.c: 16 - * bundle.c: 16 - * http-push.c: 16-----19 - * commit.c: 16-----19 - * sha1_name.c: 20 - * list-objects-filter.c: 21 - * builtin/fsck.c: 0--3 + * revision.h: 0---------10 26 + * fetch-pack.c: 0----5 + * walker.c: 0-2 + * upload-pack.c: 4 11----------------19 + * builtin/blame.c: 12-13 + * bisect.c: 16 + * bundle.c: 16 + * http-push.c: 16-----19 + * commit.c: 16-----19 + * sha1_name.c: 20 + * list-objects-filter.c: 21 + * builtin/fsck.c: 0--3 + * builtin/index-pack.c: 2021 + * builtin/pack-objects.c: 20 + * builtin/reflog.c: 10--12 + * builtin/unpack-objects.c: 2021 */ #define FLAG_BITS 27 @@ -53,7 +57,7 @@ struct object { struct object_id oid; }; -extern const char *typename(unsigned int type); +extern const char *type_name(unsigned int type); extern int type_from_string_gently(const char *str, ssize_t, int gentle); #define type_from_string(str) type_from_string_gently(str, -1, 0) diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c index a8df5ce2ab..e01f992884 100644 --- a/pack-bitmap-write.c +++ b/pack-bitmap-write.c @@ -440,19 +440,19 @@ void bitmap_writer_select_commits(struct commit **indexed_commits, } -static int sha1write_ewah_helper(void *f, const void *buf, size_t len) +static int hashwrite_ewah_helper(void *f, const void *buf, size_t len) { - /* sha1write will die on error */ - sha1write(f, buf, len); + /* hashwrite will die on error */ + hashwrite(f, buf, len); return len; } /** * Write the bitmap index to disk */ -static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap) +static inline void dump_bitmap(struct hashfile *f, struct ewah_bitmap *bitmap) { - if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0) + if (ewah_serialize_to(bitmap, hashwrite_ewah_helper, f) < 0) die("Failed to write bitmap index"); } @@ -462,7 +462,7 @@ static const unsigned char *sha1_access(size_t pos, void *table) return index[pos]->oid.hash; } -static void write_selected_commits_v1(struct sha1file *f, +static void write_selected_commits_v1(struct hashfile *f, struct pack_idx_entry **index, uint32_t index_nr) { @@ -477,15 +477,15 @@ static void write_selected_commits_v1(struct sha1file *f, if (commit_pos < 0) die("BUG: trying to write commit not in index"); - sha1write_be32(f, commit_pos); - sha1write_u8(f, stored->xor_offset); - sha1write_u8(f, stored->flags); + hashwrite_be32(f, commit_pos); + hashwrite_u8(f, stored->xor_offset); + hashwrite_u8(f, stored->flags); dump_bitmap(f, stored->write_as); } } -static void write_hash_cache(struct sha1file *f, +static void write_hash_cache(struct hashfile *f, struct pack_idx_entry **index, uint32_t index_nr) { @@ -494,7 +494,7 @@ static void write_hash_cache(struct sha1file *f, for (i = 0; i < index_nr; ++i) { struct object_entry *entry = (struct object_entry *)index[i]; uint32_t hash_value = htonl(entry->hash); - sha1write(f, &hash_value, sizeof(hash_value)); + hashwrite(f, &hash_value, sizeof(hash_value)); } } @@ -511,13 +511,13 @@ void bitmap_writer_finish(struct pack_idx_entry **index, static uint16_t default_version = 1; static uint16_t flags = BITMAP_OPT_FULL_DAG; struct strbuf tmp_file = STRBUF_INIT; - struct sha1file *f; + struct hashfile *f; struct bitmap_disk_header header; int fd = odb_mkstemp(&tmp_file, "pack/tmp_bitmap_XXXXXX"); - f = sha1fd(fd, tmp_file.buf); + f = hashfd(fd, tmp_file.buf); memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)); header.version = htons(default_version); @@ -525,7 +525,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index, header.entry_count = htonl(writer.selected_nr); hashcpy(header.checksum, writer.pack_checksum); - sha1write(f, &header, sizeof(header)); + hashwrite(f, &header, sizeof(header)); dump_bitmap(f, writer.commits); dump_bitmap(f, writer.trees); dump_bitmap(f, writer.blobs); @@ -535,7 +535,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index, if (options & BITMAP_OPT_HASH_CACHE) write_hash_cache(f, index, index_nr); - sha1close(f, NULL, CSUM_FSYNC); + hashclose(f, NULL, CSUM_FSYNC); if (adjust_shared_perm(tmp_file.buf)) die_errno("unable to make temporary bitmap file readable"); diff --git a/pack-check.c b/pack-check.c index 073c1fbd46..8fc7dd1694 100644 --- a/pack-check.c +++ b/pack-check.c @@ -41,7 +41,7 @@ int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, } while (len); index_crc = p->index_data; - index_crc += 2 + 256 + p->num_objects * (20/4) + nr; + index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr; return data_crc != ntohl(*index_crc); } @@ -54,7 +54,7 @@ static int verify_packfile(struct packed_git *p, { off_t index_size = p->index_size; const unsigned char *index_base = p->index_data; - git_SHA_CTX ctx; + git_hash_ctx ctx; unsigned char hash[GIT_MAX_RAWSZ], *pack_sig; off_t offset = 0, pack_sig_ofs = 0; uint32_t nr_objects, i; @@ -64,24 +64,24 @@ static int verify_packfile(struct packed_git *p, if (!is_pack_valid(p)) return error("packfile %s cannot be accessed", p->pack_name); - git_SHA1_Init(&ctx); + the_hash_algo->init_fn(&ctx); do { unsigned long remaining; unsigned char *in = use_pack(p, w_curs, offset, &remaining); offset += remaining; if (!pack_sig_ofs) - pack_sig_ofs = p->pack_size - 20; + pack_sig_ofs = p->pack_size - the_hash_algo->rawsz; if (offset > pack_sig_ofs) remaining -= (unsigned int)(offset - pack_sig_ofs); - git_SHA1_Update(&ctx, in, remaining); + the_hash_algo->update_fn(&ctx, in, remaining); } while (offset < pack_sig_ofs); - git_SHA1_Final(hash, &ctx); + the_hash_algo->final_fn(hash, &ctx); pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL); if (hashcmp(hash, pack_sig)) - err = error("%s SHA1 checksum mismatch", + err = error("%s pack checksum mismatch", p->pack_name); - if (hashcmp(index_base + index_size - 40, pack_sig)) - err = error("%s SHA1 does not match its index", + if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig)) + err = error("%s pack checksum does not match its index", p->pack_name); unuse_pack(w_curs); @@ -141,7 +141,7 @@ static int verify_packfile(struct packed_git *p, err = error("cannot unpack %s from %s at offset %"PRIuMAX"", oid_to_hex(entries[i].oid.oid), p->pack_name, (uintmax_t)entries[i].offset); - else if (check_sha1_signature(entries[i].oid.hash, data, size, typename(type))) + else if (check_sha1_signature(entries[i].oid.hash, data, size, type_name(type))) err = error("packed %s from %s is corrupt", oid_to_hex(entries[i].oid.oid), p->pack_name); else if (fn) { @@ -165,8 +165,8 @@ int verify_pack_index(struct packed_git *p) { off_t index_size; const unsigned char *index_base; - git_SHA_CTX ctx; - unsigned char sha1[20]; + git_hash_ctx ctx; + unsigned char hash[GIT_MAX_RAWSZ]; int err = 0; if (open_pack_index(p)) @@ -175,11 +175,11 @@ int verify_pack_index(struct packed_git *p) index_base = p->index_data; /* Verify SHA1 sum of the index file */ - git_SHA1_Init(&ctx); - git_SHA1_Update(&ctx, index_base, (unsigned int)(index_size - 20)); - git_SHA1_Final(sha1, &ctx); - if (hashcmp(sha1, index_base + index_size - 20)) - err = error("Packfile index for %s SHA1 mismatch", + the_hash_algo->init_fn(&ctx); + the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz)); + the_hash_algo->final_fn(hash, &ctx); + if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz)) + err = error("Packfile index for %s hash mismatch", p->pack_name); return err; } diff --git a/pack-revindex.c b/pack-revindex.c index 1b7ebd8d7e..ff5f62c033 100644 --- a/pack-revindex.c +++ b/pack-revindex.c @@ -134,10 +134,8 @@ static void create_pack_revindex(struct packed_git *p) if (!(off & 0x80000000)) { p->revindex[i].offset = off; } else { - p->revindex[i].offset = - ((uint64_t)ntohl(*off_64++)) << 32; - p->revindex[i].offset |= - ntohl(*off_64++); + p->revindex[i].offset = get_be64(off_64); + off_64 += 2; } p->revindex[i].nr = i; } diff --git a/pack-write.c b/pack-write.c index fea6284192..d775c7406d 100644 --- a/pack-write.c +++ b/pack-write.c @@ -46,7 +46,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec int nr_objects, const struct pack_idx_option *opts, const unsigned char *sha1) { - struct sha1file *f; + struct hashfile *f; struct pack_idx_entry **sorted_by_sha, **list, **last; off_t last_obj_offset = 0; uint32_t array[256]; @@ -68,7 +68,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec if (opts->flags & WRITE_IDX_VERIFY) { assert(index_name); - f = sha1fd_check(index_name); + f = hashfd_check(index_name); } else { if (!index_name) { struct strbuf tmp_file = STRBUF_INIT; @@ -80,7 +80,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec if (fd < 0) die_errno("unable to create '%s'", index_name); } - f = sha1fd(fd, index_name); + f = hashfd(fd, index_name); } /* if last object's offset is >= 2^31 we should use index V2 */ @@ -91,7 +91,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec struct pack_idx_header hdr; hdr.idx_signature = htonl(PACK_IDX_SIGNATURE); hdr.idx_version = htonl(index_version); - sha1write(f, &hdr, sizeof(hdr)); + hashwrite(f, &hdr, sizeof(hdr)); } /* @@ -110,7 +110,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec array[i] = htonl(next - sorted_by_sha); list = next; } - sha1write(f, array, 256 * 4); + hashwrite(f, array, 256 * 4); /* * Write the actual SHA1 entries.. @@ -120,9 +120,9 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec struct pack_idx_entry *obj = *list++; if (index_version < 2) { uint32_t offset = htonl(obj->offset); - sha1write(f, &offset, 4); + hashwrite(f, &offset, 4); } - sha1write(f, obj->oid.hash, 20); + hashwrite(f, obj->oid.hash, the_hash_algo->rawsz); if ((opts->flags & WRITE_IDX_STRICT) && (i && !oidcmp(&list[-2]->oid, &obj->oid))) die("The same object %s appears twice in the pack", @@ -137,7 +137,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec for (i = 0; i < nr_objects; i++) { struct pack_idx_entry *obj = *list++; uint32_t crc32_val = htonl(obj->crc32); - sha1write(f, &crc32_val, 4); + hashwrite(f, &crc32_val, 4); } /* write the 32-bit offset table */ @@ -150,7 +150,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec ? (0x80000000 | nr_large_offset++) : obj->offset); offset = htonl(offset); - sha1write(f, &offset, 4); + hashwrite(f, &offset, 4); } /* write the large offset table */ @@ -164,25 +164,25 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec continue; split[0] = htonl(offset >> 32); split[1] = htonl(offset & 0xffffffff); - sha1write(f, split, 8); + hashwrite(f, split, 8); nr_large_offset--; } } - sha1write(f, sha1, 20); - sha1close(f, NULL, ((opts->flags & WRITE_IDX_VERIFY) + hashwrite(f, sha1, the_hash_algo->rawsz); + hashclose(f, NULL, ((opts->flags & WRITE_IDX_VERIFY) ? CSUM_CLOSE : CSUM_FSYNC)); return index_name; } -off_t write_pack_header(struct sha1file *f, uint32_t nr_entries) +off_t write_pack_header(struct hashfile *f, uint32_t nr_entries) { struct pack_header hdr; hdr.hdr_signature = htonl(PACK_SIGNATURE); hdr.hdr_version = htonl(PACK_VERSION); hdr.hdr_entries = htonl(nr_entries); - sha1write(f, &hdr, sizeof(hdr)); + hashwrite(f, &hdr, sizeof(hdr)); return sizeof(hdr); } @@ -203,20 +203,20 @@ off_t write_pack_header(struct sha1file *f, uint32_t nr_entries) * interested in the resulting SHA1 of pack data above partial_pack_offset. */ void fixup_pack_header_footer(int pack_fd, - unsigned char *new_pack_sha1, + unsigned char *new_pack_hash, const char *pack_name, uint32_t object_count, - unsigned char *partial_pack_sha1, + unsigned char *partial_pack_hash, off_t partial_pack_offset) { int aligned_sz, buf_sz = 8 * 1024; - git_SHA_CTX old_sha1_ctx, new_sha1_ctx; + git_hash_ctx old_hash_ctx, new_hash_ctx; struct pack_header hdr; char *buf; ssize_t read_result; - git_SHA1_Init(&old_sha1_ctx); - git_SHA1_Init(&new_sha1_ctx); + the_hash_algo->init_fn(&old_hash_ctx); + the_hash_algo->init_fn(&new_hash_ctx); if (lseek(pack_fd, 0, SEEK_SET) != 0) die_errno("Failed seeking to start of '%s'", pack_name); @@ -228,9 +228,9 @@ void fixup_pack_header_footer(int pack_fd, pack_name); if (lseek(pack_fd, 0, SEEK_SET) != 0) die_errno("Failed seeking to start of '%s'", pack_name); - git_SHA1_Update(&old_sha1_ctx, &hdr, sizeof(hdr)); + the_hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr)); hdr.hdr_entries = htonl(object_count); - git_SHA1_Update(&new_sha1_ctx, &hdr, sizeof(hdr)); + the_hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr)); write_or_die(pack_fd, &hdr, sizeof(hdr)); partial_pack_offset -= sizeof(hdr); @@ -238,28 +238,28 @@ void fixup_pack_header_footer(int pack_fd, aligned_sz = buf_sz - sizeof(hdr); for (;;) { ssize_t m, n; - m = (partial_pack_sha1 && partial_pack_offset < aligned_sz) ? + m = (partial_pack_hash && partial_pack_offset < aligned_sz) ? partial_pack_offset : aligned_sz; n = xread(pack_fd, buf, m); if (!n) break; if (n < 0) die_errno("Failed to checksum '%s'", pack_name); - git_SHA1_Update(&new_sha1_ctx, buf, n); + the_hash_algo->update_fn(&new_hash_ctx, buf, n); aligned_sz -= n; if (!aligned_sz) aligned_sz = buf_sz; - if (!partial_pack_sha1) + if (!partial_pack_hash) continue; - git_SHA1_Update(&old_sha1_ctx, buf, n); + the_hash_algo->update_fn(&old_hash_ctx, buf, n); partial_pack_offset -= n; if (partial_pack_offset == 0) { - unsigned char sha1[20]; - git_SHA1_Final(sha1, &old_sha1_ctx); - if (hashcmp(sha1, partial_pack_sha1) != 0) + unsigned char hash[GIT_MAX_RAWSZ]; + the_hash_algo->final_fn(hash, &old_hash_ctx); + if (hashcmp(hash, partial_pack_hash) != 0) die("Unexpected checksum for %s " "(disk corruption?)", pack_name); /* @@ -267,23 +267,24 @@ void fixup_pack_header_footer(int pack_fd, * pack, which also means making partial_pack_offset * big enough not to matter anymore. */ - git_SHA1_Init(&old_sha1_ctx); + the_hash_algo->init_fn(&old_hash_ctx); partial_pack_offset = ~partial_pack_offset; partial_pack_offset -= MSB(partial_pack_offset, 1); } } free(buf); - if (partial_pack_sha1) - git_SHA1_Final(partial_pack_sha1, &old_sha1_ctx); - git_SHA1_Final(new_pack_sha1, &new_sha1_ctx); - write_or_die(pack_fd, new_pack_sha1, 20); + if (partial_pack_hash) + the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx); + the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx); + write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz); fsync_or_die(pack_fd, pack_name); } char *index_pack_lockfile(int ip_out) { - char packname[46]; + char packname[GIT_MAX_HEXSZ + 6]; + const int len = the_hash_algo->hexsz + 6; /* * The first thing we expect from index-pack's output @@ -292,9 +293,9 @@ char *index_pack_lockfile(int ip_out) * case, we need it to remove the corresponding .keep file * later on. If we don't get that then tough luck with it. */ - if (read_in_full(ip_out, packname, 46) == 46 && packname[45] == '\n') { + if (read_in_full(ip_out, packname, len) == len && packname[len-1] == '\n') { const char *name; - packname[45] = 0; + packname[len-1] = 0; if (skip_prefix(packname, "keep\t", &name)) return xstrfmt("%s/pack/pack-%s.keep", get_object_directory(), name); @@ -332,14 +333,14 @@ int encode_in_pack_object_header(unsigned char *hdr, int hdr_len, return n; } -struct sha1file *create_tmp_packfile(char **pack_tmp_name) +struct hashfile *create_tmp_packfile(char **pack_tmp_name) { struct strbuf tmpname = STRBUF_INIT; int fd; fd = odb_mkstemp(&tmpname, "pack/tmp_pack_XXXXXX"); *pack_tmp_name = strbuf_detach(&tmpname, NULL); - return sha1fd(fd, *pack_tmp_name); + return hashfd(fd, *pack_tmp_name); } void finish_tmp_packfile(struct strbuf *name_buffer, @@ -81,7 +81,7 @@ extern const char *write_idx_file(const char *index_name, struct pack_idx_entry extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr); extern int verify_pack_index(struct packed_git *); extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uint32_t); -extern off_t write_pack_header(struct sha1file *f, uint32_t); +extern off_t write_pack_header(struct hashfile *f, uint32_t); extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t); extern char *index_pack_lockfile(int fd); @@ -98,7 +98,7 @@ extern int encode_in_pack_object_header(unsigned char *hdr, int hdr_len, #define PH_ERROR_PROTOCOL (-3) extern int read_pack_header(int fd, struct pack_header *); -extern struct sha1file *create_tmp_packfile(char **pack_tmp_name); +extern struct hashfile *create_tmp_packfile(char **pack_tmp_name); extern void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]); #endif diff --git a/packfile.c b/packfile.c index 4a5fe7ab18..7c1a2519fc 100644 --- a/packfile.c +++ b/packfile.c @@ -1,5 +1,5 @@ #include "cache.h" -#include "mru.h" +#include "list.h" #include "pack.h" #include "dir.h" #include "mergesort.h" @@ -8,6 +8,11 @@ #include "list.h" #include "streaming.h" #include "sha1-lookup.h" +#include "commit.h" +#include "object.h" +#include "tag.h" +#include "tree-walk.h" +#include "tree.h" char *odb_pack_name(struct strbuf *buf, const unsigned char *sha1, @@ -40,7 +45,7 @@ static unsigned int pack_max_fds; static size_t peak_pack_mapped; static size_t pack_mapped; struct packed_git *packed_git; -struct mru packed_git_mru; +LIST_HEAD(packed_git_mru); #define SZ_FMT PRIuMAX static inline uintmax_t sz_fmt(size_t s) { return s; } @@ -643,10 +648,10 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local) return NULL; /* - * ".pack" is long enough to hold any suffix we're adding (and + * ".promisor" is long enough to hold any suffix we're adding (and * the use xsnprintf double-checks that) */ - alloc = st_add3(path_len, strlen(".pack"), 1); + alloc = st_add3(path_len, strlen(".promisor"), 1); p = alloc_packed_git(alloc); memcpy(p->pack_name, path, path_len); @@ -654,6 +659,10 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local) if (!access(p->pack_name, F_OK)) p->pack_keep = 1; + xsnprintf(p->pack_name + path_len, alloc - path_len, ".promisor"); + if (!access(p->pack_name, F_OK)) + p->pack_promisor = 1; + xsnprintf(p->pack_name + path_len, alloc - path_len, ".pack"); if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) { free(p); @@ -781,7 +790,8 @@ static void prepare_packed_git_one(char *objdir, int local) if (ends_with(de->d_name, ".idx") || ends_with(de->d_name, ".pack") || ends_with(de->d_name, ".bitmap") || - ends_with(de->d_name, ".keep")) + ends_with(de->d_name, ".keep") || + ends_with(de->d_name, ".promisor")) string_list_append(&garbage, path.buf); else report_garbage(PACKDIR_FILE_GARBAGE, path.buf); @@ -866,9 +876,10 @@ static void prepare_packed_git_mru(void) { struct packed_git *p; - mru_clear(&packed_git_mru); + INIT_LIST_HEAD(&packed_git_mru); + for (p = packed_git; p; p = p->next) - mru_append(&packed_git_mru, p); + list_add_tail(&p->mru, &packed_git_mru); } static int prepare_packed_git_run_once = 0; @@ -1350,16 +1361,16 @@ int packed_object_info(struct packed_git *p, off_t obj_offset, *oi->disk_sizep = revidx[1].offset - obj_offset; } - if (oi->typep || oi->typename) { + if (oi->typep || oi->type_name) { enum object_type ptot; ptot = packed_to_object_type(p, obj_offset, type, &w_curs, curpos); if (oi->typep) *oi->typep = ptot; - if (oi->typename) { - const char *tn = typename(ptot); + if (oi->type_name) { + const char *tn = type_name(ptot); if (tn) - strbuf_addstr(oi->typename, tn); + strbuf_addstr(oi->type_name, tn); } if (ptot < 0) { type = OBJ_BAD; @@ -1702,8 +1713,7 @@ off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n) return off; index += p->num_objects * 4 + (off & 0x7fffffff) * 8; check_pack_index_ptr(p, index); - return (((uint64_t)ntohl(*((uint32_t *)(index + 0)))) << 32) | - ntohl(*((uint32_t *)(index + 4))); + return get_be64(index); } } @@ -1712,11 +1722,8 @@ off_t find_pack_entry_one(const unsigned char *sha1, { const uint32_t *level1_ofs = p->index_data; const unsigned char *index = p->index_data; - unsigned hi, lo, stride; - static int debug_lookup = -1; - - if (debug_lookup < 0) - debug_lookup = !!getenv("GIT_DEBUG_LOOKUP"); + unsigned stride; + uint32_t result; if (!index) { if (open_pack_index(p)) @@ -1729,8 +1736,6 @@ off_t find_pack_entry_one(const unsigned char *sha1, index += 8; } index += 4 * 256; - hi = ntohl(level1_ofs[*sha1]); - lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1])); if (p->index_version > 1) { stride = 20; } else { @@ -1738,24 +1743,8 @@ off_t find_pack_entry_one(const unsigned char *sha1, index += 4; } - if (debug_lookup) - printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n", - sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects); - - while (lo < hi) { - unsigned mi = lo + (hi - lo) / 2; - int cmp = hashcmp(index + mi * stride, sha1); - - if (debug_lookup) - printf("lo %u hi %u rg %u mi %u\n", - lo, hi, hi - lo, mi); - if (!cmp) - return nth_packed_object_offset(p, mi); - if (cmp > 0) - hi = mi; - else - lo = mi+1; - } + if (bsearch_hash(sha1, level1_ofs, index, stride, &result)) + return nth_packed_object_offset(p, result); return 0; } @@ -1831,15 +1820,16 @@ static int fill_pack_entry(const unsigned char *sha1, */ int find_pack_entry(const unsigned char *sha1, struct pack_entry *e) { - struct mru_entry *p; + struct list_head *pos; prepare_packed_git(); if (!packed_git) return 0; - for (p = packed_git_mru.head; p; p = p->next) { - if (fill_pack_entry(sha1, e, p->item)) { - mru_mark(&packed_git_mru, p); + list_for_each(pos, &packed_git_mru) { + struct packed_git *p = list_entry(pos, struct packed_git, mru); + if (fill_pack_entry(sha1, e, p)) { + list_move(&p->mru, &packed_git_mru); return 1; } } @@ -1889,6 +1879,9 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, unsigned flags) for (p = packed_git; p; p = p->next) { if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local) continue; + if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) && + !p->pack_promisor) + continue; if (open_pack_index(p)) { pack_errors = 1; continue; @@ -1899,3 +1892,61 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, unsigned flags) } return r ? r : pack_errors; } + +static int add_promisor_object(const struct object_id *oid, + struct packed_git *pack, + uint32_t pos, + void *set_) +{ + struct oidset *set = set_; + struct object *obj = parse_object(oid); + if (!obj) + return 1; + + oidset_insert(set, oid); + + /* + * If this is a tree, commit, or tag, the objects it refers + * to are also promisor objects. (Blobs refer to no objects.) + */ + if (obj->type == OBJ_TREE) { + struct tree *tree = (struct tree *)obj; + struct tree_desc desc; + struct name_entry entry; + if (init_tree_desc_gently(&desc, tree->buffer, tree->size)) + /* + * Error messages are given when packs are + * verified, so do not print any here. + */ + return 0; + while (tree_entry_gently(&desc, &entry)) + oidset_insert(set, entry.oid); + } else if (obj->type == OBJ_COMMIT) { + struct commit *commit = (struct commit *) obj; + struct commit_list *parents = commit->parents; + + oidset_insert(set, &commit->tree->object.oid); + for (; parents; parents = parents->next) + oidset_insert(set, &parents->item->object.oid); + } else if (obj->type == OBJ_TAG) { + struct tag *tag = (struct tag *) obj; + oidset_insert(set, &tag->tagged->oid); + } + return 0; +} + +int is_promisor_object(const struct object_id *oid) +{ + static struct oidset promisor_objects; + static int promisor_objects_prepared; + + if (!promisor_objects_prepared) { + if (repository_format_partial_clone) { + for_each_packed_object(add_promisor_object, + &promisor_objects, + FOR_EACH_OBJECT_PROMISOR_ONLY); + } + promisor_objects_prepared = 1; + } + return oidset_contains(&promisor_objects, oid); +} diff --git a/packfile.h b/packfile.h index 0cdeb54dcd..a7fca598d6 100644 --- a/packfile.h +++ b/packfile.h @@ -1,6 +1,8 @@ #ifndef PACKFILE_H #define PACKFILE_H +#include "oidset.h" + /* * Generate the filename to be used for a pack file with checksum "sha1" and * extension "ext". The result is written into the strbuf "buf", overwriting @@ -125,6 +127,11 @@ extern int has_sha1_pack(const unsigned char *sha1); extern int has_pack_index(const unsigned char *sha1); /* + * Only iterate over packs obtained from the promisor remote. + */ +#define FOR_EACH_OBJECT_PROMISOR_ONLY 2 + +/* * Iterate over packed objects in both the local * repository and any alternates repositories (unless the * FOR_EACH_OBJECT_LOCAL_ONLY flag, defined in cache.h, is set). @@ -135,4 +142,10 @@ typedef int each_packed_object_fn(const struct object_id *oid, void *data); extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags); +/* + * Return 1 if an object in a promisor packfile is or refers to the given + * object, 0 otherwise. + */ +extern int is_promisor_object(const struct object_id *oid); + #endif diff --git a/parse-options.c b/parse-options.c index fca7159646..125e84f984 100644 --- a/parse-options.c +++ b/parse-options.c @@ -425,6 +425,48 @@ void parse_options_start(struct parse_opt_ctx_t *ctx, parse_options_check(options); } +/* + * TODO: we are not completing the --no-XXX form yet because there are + * many options that do not suppress it properly. + */ +static int show_gitcomp(struct parse_opt_ctx_t *ctx, + const struct option *opts) +{ + for (; opts->type != OPTION_END; opts++) { + const char *suffix = ""; + + if (!opts->long_name) + continue; + if (opts->flags & (PARSE_OPT_HIDDEN | PARSE_OPT_NOCOMPLETE)) + continue; + + switch (opts->type) { + case OPTION_GROUP: + continue; + case OPTION_STRING: + case OPTION_FILENAME: + case OPTION_INTEGER: + case OPTION_MAGNITUDE: + case OPTION_CALLBACK: + if (opts->flags & PARSE_OPT_NOARG) + break; + if (opts->flags & PARSE_OPT_OPTARG) + break; + if (opts->flags & PARSE_OPT_LASTARG_DEFAULT) + break; + suffix = "="; + break; + default: + break; + } + if (opts->flags & PARSE_OPT_COMP_ARG) + suffix = "="; + printf(" --%s%s", opts->long_name, suffix); + } + fputc('\n', stdout); + exit(0); +} + static int usage_with_options_internal(struct parse_opt_ctx_t *, const char * const *, const struct option *, int, int); @@ -455,6 +497,10 @@ int parse_options_step(struct parse_opt_ctx_t *ctx, if (internal_help && ctx->total == 1 && !strcmp(arg + 1, "h")) goto show_usage; + /* lone --git-completion-helper is asked by git-completion.bash */ + if (ctx->total == 1 && !strcmp(arg + 1, "-git-completion-helper")) + return show_gitcomp(ctx, options); + if (arg[1] != '-') { ctx->opt = arg + 1; switch (parse_short_opt(ctx, options)) { @@ -525,7 +571,7 @@ unknown: int parse_options_end(struct parse_opt_ctx_t *ctx) { - memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out)); + MOVE_ARRAY(ctx->out + ctx->cpidx, ctx->argv, ctx->argc); ctx->out[ctx->cpidx + ctx->argc] = NULL; return ctx->cpidx + ctx->argc; } diff --git a/parse-options.h b/parse-options.h index af711227ae..ab1cc362bf 100644 --- a/parse-options.h +++ b/parse-options.h @@ -38,7 +38,9 @@ enum parse_opt_option_flags { PARSE_OPT_LASTARG_DEFAULT = 16, PARSE_OPT_NODASH = 32, PARSE_OPT_LITERAL_ARGHELP = 64, - PARSE_OPT_SHELL_EVAL = 256 + PARSE_OPT_SHELL_EVAL = 256, + PARSE_OPT_NOCOMPLETE = 512, + PARSE_OPT_COMP_ARG = 1024 }; struct option; @@ -89,6 +91,11 @@ typedef int parse_opt_ll_cb(struct parse_opt_ctx_t *ctx, * PARSE_OPT_LITERAL_ARGHELP: says that argh shouldn't be enclosed in brackets * (i.e. '<argh>') in the help message. * Useful for options with multiple parameters. + * PARSE_OPT_NOCOMPLETE: by default all visible options are completable + * by git-completion.bash. This option suppresses that. + * PARSE_OPT_COMP_ARG: this option forces to git-completion.bash to + * complete an option as --name= not --name even if + * the option takes optional argument. * * `callback`:: * pointer to the callback to use for OPTION_CALLBACK or @@ -112,19 +119,24 @@ struct option { intptr_t defval; }; +#define OPT_BIT_F(s, l, v, h, b, f) { OPTION_BIT, (s), (l), (v), NULL, (h), \ + PARSE_OPT_NOARG|(f), NULL, (b) } +#define OPT_COUNTUP_F(s, l, v, h, f) { OPTION_COUNTUP, (s), (l), (v), NULL, \ + (h), PARSE_OPT_NOARG|(f) } +#define OPT_SET_INT_F(s, l, v, h, i, f) { OPTION_SET_INT, (s), (l), (v), NULL, \ + (h), PARSE_OPT_NOARG | (f), NULL, (i) } +#define OPT_BOOL_F(s, l, v, h, f) OPT_SET_INT_F(s, l, v, h, 1, f) + #define OPT_END() { OPTION_END } #define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, \ (h), PARSE_OPT_NOARG} #define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) } -#define OPT_BIT(s, l, v, h, b) { OPTION_BIT, (s), (l), (v), NULL, (h), \ - PARSE_OPT_NOARG, NULL, (b) } +#define OPT_BIT(s, l, v, h, b) OPT_BIT_F(s, l, v, h, b, 0) #define OPT_NEGBIT(s, l, v, h, b) { OPTION_NEGBIT, (s), (l), (v), NULL, \ (h), PARSE_OPT_NOARG, NULL, (b) } -#define OPT_COUNTUP(s, l, v, h) { OPTION_COUNTUP, (s), (l), (v), NULL, \ - (h), PARSE_OPT_NOARG } -#define OPT_SET_INT(s, l, v, h, i) { OPTION_SET_INT, (s), (l), (v), NULL, \ - (h), PARSE_OPT_NOARG, NULL, (i) } -#define OPT_BOOL(s, l, v, h) OPT_SET_INT(s, l, v, h, 1) +#define OPT_COUNTUP(s, l, v, h) OPT_COUNTUP_F(s, l, v, h, 0) +#define OPT_SET_INT(s, l, v, h, i) OPT_SET_INT_F(s, l, v, h, i, 0) +#define OPT_BOOL(s, l, v, h) OPT_BOOL_F(s, l, v, h, 0) #define OPT_HIDDEN_BOOL(s, l, v, h) { OPTION_SET_INT, (s), (l), (v), NULL, \ (h), PARSE_OPT_NOARG | PARSE_OPT_HIDDEN, NULL, 1} #define OPT_CMDMODE(s, l, v, h, i) { OPTION_CMDMODE, (s), (l), (v), NULL, \ @@ -240,7 +252,7 @@ extern int parse_opt_passthru_argv(const struct option *, const char *, int); { OPTION_CALLBACK, 'q', "quiet", (var), NULL, N_("be more quiet"), \ PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 } #define OPT__DRY_RUN(var, h) OPT_BOOL('n', "dry-run", (var), (h)) -#define OPT__FORCE(var, h) OPT_COUNTUP('f', "force", (var), (h)) +#define OPT__FORCE(var, h, f) OPT_COUNTUP_F('f', "force", (var), (h), (f)) #define OPT__ABBREV(var) \ { OPTION_CALLBACK, 0, "abbrev", (var), N_("n"), \ N_("use <n> digits to display SHA-1s"), \ diff --git a/perl/.gitignore b/perl/.gitignore index 0f1fc27f86..84c048a73c 100644 --- a/perl/.gitignore +++ b/perl/.gitignore @@ -1,8 +1 @@ -perl.mak -perl.mak.old -MYMETA.json -MYMETA.yml -blib -blibdirs -pm_to_blib -PM.stamp +/build/ diff --git a/perl/FromCPAN/.gitattributes b/perl/FromCPAN/.gitattributes new file mode 100644 index 0000000000..8b64fc5e22 --- /dev/null +++ b/perl/FromCPAN/.gitattributes @@ -0,0 +1 @@ +/Error.pm whitespace=-blank-at-eof diff --git a/perl/private-Error.pm b/perl/FromCPAN/Error.pm index 6098135ae2..8b95e2d73d 100644 --- a/perl/private-Error.pm +++ b/perl/FromCPAN/Error.pm @@ -12,10 +12,12 @@ package Error; use strict; +use warnings; + use vars qw($VERSION); use 5.004; -$VERSION = "0.15009"; +$VERSION = "0.17025"; use overload ( '""' => 'stringify', @@ -32,21 +34,35 @@ $Error::THROWN = undef; # last error thrown, a workaround until die $ref works my $LAST; # Last error created my %ERROR; # Last error associated with package -sub throw_Error_Simple +sub _throw_Error_Simple { my $args = shift; return Error::Simple->new($args->{'text'}); } -$Error::ObjectifyCallback = \&throw_Error_Simple; +$Error::ObjectifyCallback = \&_throw_Error_Simple; # Exported subs are defined in Error::subs +use Scalar::Util (); + sub import { shift; + my @tags = @_; local $Exporter::ExportLevel = $Exporter::ExportLevel + 1; - Error::subs->import(@_); + + @tags = grep { + if( $_ eq ':warndie' ) { + Error::WarnDie->import(); + 0; + } + else { + 1; + } + } @tags; + + Error::subs->import(@tags); } # I really want to use last for the name of this method, but it is a keyword @@ -107,10 +123,6 @@ sub stacktrace { $text; } -# Allow error propagation, ie -# -# $ber->encode(...) or -# return Error->prior($ber)->associate($ldap); sub associate { my $err = shift; @@ -130,6 +142,7 @@ sub associate { return; } + sub new { my $self = shift; my($pkg,$file,$line) = caller($Error::Depth); @@ -246,6 +259,10 @@ sub value { package Error::Simple; +use vars qw($VERSION); + +$VERSION = "0.17025"; + @Error::Simple::ISA = qw(Error); sub new { @@ -288,14 +305,6 @@ use vars qw(@EXPORT_OK @ISA %EXPORT_TAGS); @ISA = qw(Exporter); - -sub blessed { - my $item = shift; - local $@; # don't kill an outer $@ - ref $item and eval { $item->can('can') }; -} - - sub run_clauses ($$$\@) { my($clauses,$err,$wantarray,$result) = @_; my $code = undef; @@ -314,16 +323,17 @@ sub run_clauses ($$$\@) { my $pkg = $catch->[$i]; unless(defined $pkg) { #except - splice(@$catch,$i,2,$catch->[$i+1]->()); + splice(@$catch,$i,2,$catch->[$i+1]->($err)); $i -= 2; next CATCHLOOP; } - elsif(blessed($err) && $err->isa($pkg)) { + elsif(Scalar::Util::blessed($err) && $err->isa($pkg)) { $code = $catch->[$i+1]; while(1) { my $more = 0; - local($Error::THROWN); + local($Error::THROWN, $@); my $ok = eval { + $@ = $err; if($wantarray) { @{$result} = $code->($err,\$more); } @@ -341,10 +351,9 @@ sub run_clauses ($$$\@) { undef $err; } else { - $err = defined($Error::THROWN) - ? $Error::THROWN : $@; - $err = $Error::ObjectifyCallback->({'text' =>$err}) - unless ref($err); + $err = $@ || $Error::THROWN; + $err = $Error::ObjectifyCallback->({'text' =>$err}) + unless ref($err); } last CATCH; }; @@ -357,7 +366,9 @@ sub run_clauses ($$$\@) { if(defined($owise = $clauses->{'otherwise'})) { my $code = $clauses->{'otherwise'}; my $more = 0; + local($Error::THROWN, $@); my $ok = eval { + $@ = $err; if($wantarray) { @{$result} = $code->($err,\$more); } @@ -374,11 +385,10 @@ sub run_clauses ($$$\@) { undef $err; } else { - $err = defined($Error::THROWN) - ? $Error::THROWN : $@; + $err = $@ || $Error::THROWN; - $err = $Error::ObjectifyCallback->({'text' =>$err}) - unless ref($err); + $err = $Error::ObjectifyCallback->({'text' =>$err}) + unless ref($err); } } } @@ -398,7 +408,7 @@ sub try (&;$) { do { local $Error::THROWN = undef; - local $@ = undef; + local $@ = undef; $ok = eval { if($wantarray) { @@ -413,21 +423,21 @@ sub try (&;$) { 1; }; - $err = defined($Error::THROWN) ? $Error::THROWN : $@ + $err = $@ || $Error::THROWN unless $ok; }; shift @Error::STACK; $err = run_clauses($clauses,$err,wantarray,@result) - unless($ok); + unless($ok); $clauses->{'finally'}->() if(defined($clauses->{'finally'})); if (defined($err)) { - if (blessed($err) && $err->can('throw')) + if (Scalar::Util::blessed($err) && $err->can('throw')) { throw $err; } @@ -506,12 +516,116 @@ sub otherwise (&;$) { } 1; + +package Error::WarnDie; + +sub gen_callstack($) +{ + my ( $start ) = @_; + + require Carp; + local $Carp::CarpLevel = $start; + my $trace = Carp::longmess(""); + # Remove try calls from the trace + $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog; + $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog; + my @callstack = split( m/\n/, $trace ); + return @callstack; +} + +my $old_DIE; +my $old_WARN; + +sub DEATH +{ + my ( $e ) = @_; + + local $SIG{__DIE__} = $old_DIE if( defined $old_DIE ); + + die @_ if $^S; + + my ( $etype, $message, $location, @callstack ); + if ( ref($e) && $e->isa( "Error" ) ) { + $etype = "exception of type " . ref( $e ); + $message = $e->text; + $location = $e->file . ":" . $e->line; + @callstack = split( m/\n/, $e->stacktrace ); + } + else { + # Don't apply subsequent layer of message formatting + die $e if( $e =~ m/^\nUnhandled perl error caught at toplevel:\n\n/ ); + $etype = "perl error"; + my $stackdepth = 0; + while( caller( $stackdepth ) =~ m/^Error(?:$|::)/ ) { + $stackdepth++ + } + + @callstack = gen_callstack( $stackdepth + 1 ); + + $message = "$e"; + chomp $message; + + if ( $message =~ s/ at (.*?) line (\d+)\.$// ) { + $location = $1 . ":" . $2; + } + else { + my @caller = caller( $stackdepth ); + $location = $caller[1] . ":" . $caller[2]; + } + } + + shift @callstack; + # Do it this way in case there are no elements; we don't print a spurious \n + my $callstack = join( "", map { "$_\n"} @callstack ); + + die "\nUnhandled $etype caught at toplevel:\n\n $message\n\nThrown from: $location\n\nFull stack trace:\n\n$callstack\n"; +} + +sub TAXES +{ + my ( $message ) = @_; + + local $SIG{__WARN__} = $old_WARN if( defined $old_WARN ); + + $message =~ s/ at .*? line \d+\.$//; + chomp $message; + + my @callstack = gen_callstack( 1 ); + my $location = shift @callstack; + + # $location already starts in a leading space + $message .= $location; + + # Do it this way in case there are no elements; we don't print a spurious \n + my $callstack = join( "", map { "$_\n"} @callstack ); + + warn "$message:\n$callstack"; +} + +sub import +{ + $old_DIE = $SIG{__DIE__}; + $old_WARN = $SIG{__WARN__}; + + $SIG{__DIE__} = \&DEATH; + $SIG{__WARN__} = \&TAXES; +} + +1; + __END__ =head1 NAME Error - Error/exception handling in an OO-ish way +=head1 WARNING + +Using the "Error" module is B<no longer recommended> due to the black-magical +nature of its syntactic sugar, which often tends to break. Its maintainers +have stopped actively writing code that uses it, and discourage people +from doing so. See the "SEE ALSO" section below for better recommendations. + =head1 SYNOPSIS use Error qw(:try); @@ -529,7 +643,7 @@ Error - Error/exception handling in an OO-ish way try { do_some_stuff(); die "error!" if $condition; - throw Error::Simple -text => "Oops!" if $other_condition; + throw Error::Simple "Oops!" if $other_condition; } catch Error::IO with { my $E = shift; @@ -587,7 +701,7 @@ C<BLOCK> will be passed two arguments. The first will be the error being thrown. The second is a reference to a scalar variable. If this variable is set by the catch block then, on return from the catch block, try will continue processing as if the catch block was never -found. +found. The error will also be available in C<$@>. To propagate the error the catch block may call C<$err-E<gt>throw> @@ -608,7 +722,7 @@ type. Catch any error by executing the code in C<BLOCK> When evaluated C<BLOCK> will be passed one argument, which will be the -error being processed. +error being processed. The error will also be available in C<$@>. Only one otherwise block may be specified per try block @@ -625,12 +739,25 @@ Only one finally block may be specified per try block =back +=head1 COMPATIBILITY + +L<Moose> exports a keyword called C<with> which clashes with Error's. This +example returns a prototype mismatch error: + + package MyTest; + + use warnings; + use Moose; + use Error qw(:try); + +(Thanks to C<maik.hentsche@amd.com> for the report.). + =head1 CLASS INTERFACE =head2 CONSTRUCTORS The C<Error> object is implemented as a HASH. This HASH is initialized -with the arguments that are passed to its constructor. The elements +with the arguments that are passed to it's constructor. The elements that are used by, or are retrievable by the C<Error> class are listed below, other classes may add to these. @@ -655,6 +782,10 @@ an object blessed into that package as the C<-object> argument. =over 4 +=item Error->new() + +See the Error::Simple documentation. + =item throw ( [ ARGS ] ) Create a new C<Error> object and throw an error, which will be caught @@ -730,6 +861,13 @@ The line where the constructor of this error was called from The text of the error +=item $err->associate($obj) + +Associates an error with an object to allow error propagation. I.e: + + $ber->encode(...) or + return Error->prior($ber)->associate($ldap); + =back =head2 OVERLOAD METHODS @@ -759,11 +897,9 @@ to the constructor. =head1 PRE-DEFINED ERROR CLASSES -=over 4 - -=item Error::Simple +=head2 Error::Simple -This class can be used to hold simple error strings and values. Its +This class can be used to hold simple error strings and values. It's constructor takes two arguments. The first is a text value, the second is a numeric value. These values are what will be returned by the overload methods. @@ -775,7 +911,6 @@ of the error object. This class is used internally if an eval'd block die's with an error that is a plain string. (Unless C<$Error::ObjectifyCallback> is modified) -=back =head1 $Error::ObjectifyCallback @@ -804,6 +939,76 @@ class MyError::Bar by default: # Error handling here. } +=cut + +=head1 MESSAGE HANDLERS + +C<Error> also provides handlers to extend the output of the C<warn()> perl +function, and to handle the printing of a thrown C<Error> that is not caught +or otherwise handled. These are not installed by default, but are requested +using the C<:warndie> tag in the C<use> line. + + use Error qw( :warndie ); + +These new error handlers are installed in C<$SIG{__WARN__}> and +C<$SIG{__DIE__}>. If these handlers are already defined when the tag is +imported, the old values are stored, and used during the new code. Thus, to +arrange for custom handling of warnings and errors, you will need to perform +something like the following: + + BEGIN { + $SIG{__WARN__} = sub { + print STDERR "My special warning handler: $_[0]" + }; + } + + use Error qw( :warndie ); + +Note that setting C<$SIG{__WARN__}> after the C<:warndie> tag has been +imported will overwrite the handler that C<Error> provides. If this cannot be +avoided, then the tag can be explicitly C<import>ed later + + use Error; + + $SIG{__WARN__} = ...; + + import Error qw( :warndie ); + +=head2 EXAMPLE + +The C<__DIE__> handler turns messages such as + + Can't call method "foo" on an undefined value at examples/warndie.pl line 16. + +into + + Unhandled perl error caught at toplevel: + + Can't call method "foo" on an undefined value + + Thrown from: examples/warndie.pl:16 + + Full stack trace: + + main::inner('undef') called at examples/warndie.pl line 20 + main::outer('undef') called at examples/warndie.pl line 23 + +=cut + +=head1 SEE ALSO + +See L<Exception::Class> for a different module providing Object-Oriented +exception handling, along with a convenient syntax for declaring hierarchies +for them. It doesn't provide Error's syntactic sugar of C<try { ... }>, +C<catch { ... }>, etc. which may be a good thing or a bad thing based +on what you want. (Because Error's syntactic sugar tends to break.) + +L<Error::Exception> aims to combine L<Error> and L<Exception::Class> +"with correct stringification". + +L<TryCatch> and L<Try::Tiny> are similar in concept to Error.pm only providing +a syntax that hopefully breaks less. + =head1 KNOWN BUGS None, but that does not mean there are not any. @@ -816,12 +1021,20 @@ The code that inspired me to write this was originally written by Peter Seibel <peter@weblogic.com> and adapted by Jesse Glick <jglick@sig.bsh.com>. +C<:warndie> handlers added by Paul Evans <leonerd@leonerd.org.uk> + =head1 MAINTAINER -Shlomi Fish <shlomif@iglu.org.il> +Shlomi Fish, L<http://www.shlomifish.org/> . =head1 PAST MAINTAINERS Arun Kumar U <u_arunkumar@yahoo.com> +=head1 COPYRIGHT + +Copyright (c) 1997-8 Graham Barr. All rights reserved. +This program is free software; you can redistribute it and/or modify it +under the same terms as Perl itself. + =cut diff --git a/perl/FromCPAN/Mail/Address.pm b/perl/FromCPAN/Mail/Address.pm new file mode 100644 index 0000000000..683d490b2b --- /dev/null +++ b/perl/FromCPAN/Mail/Address.pm @@ -0,0 +1,280 @@ +# Copyrights 1995-2018 by [Mark Overmeer]. +# For other contributors see ChangeLog. +# See the manual pages for details on the licensing terms. +# Pod stripped from pm file by OODoc 2.02. +# This code is part of the bundle MailTools. Meta-POD processed with +# OODoc into POD and HTML manual-pages. See README.md for Copyright. +# Licensed under the same terms as Perl itself. + +package Mail::Address; +use vars '$VERSION'; +$VERSION = '2.20'; + +use strict; + +use Carp; + +# use locale; removed in version 1.78, because it causes taint problems + +sub Version { our $VERSION } + + + +# given a comment, attempt to extract a person's name +sub _extract_name +{ # This function can be called as method as well + my $self = @_ && ref $_[0] ? shift : undef; + + local $_ = shift + or return ''; + + # Using encodings, too hard. See Mail::Message::Field::Full. + return '' if m/\=\?.*?\?\=/; + + # trim whitespace + s/^\s+//; + s/\s+$//; + s/\s+/ /; + + # Disregard numeric names (e.g. 123456.1234@compuserve.com) + return "" if /^[\d ]+$/; + + s/^\((.*)\)$/$1/; # remove outermost parenthesis + s/^"(.*)"$/$1/; # remove outer quotation marks + s/\(.*?\)//g; # remove minimal embedded comments + s/\\//g; # remove all escapes + s/^"(.*)"$/$1/; # remove internal quotation marks + s/^([^\s]+) ?, ?(.*)$/$2 $1/; # reverse "Last, First M." if applicable + s/,.*//; + + # Change casing only when the name contains only upper or only + # lower cased characters. + unless( m/[A-Z]/ && m/[a-z]/ ) + { # Set the case of the name to first char upper rest lower + s/\b(\w+)/\L\u$1/igo; # Upcase first letter on name + s/\bMc(\w)/Mc\u$1/igo; # Scottish names such as 'McLeod' + s/\bo'(\w)/O'\u$1/igo; # Irish names such as 'O'Malley, O'Reilly' + s/\b(x*(ix)?v*(iv)?i*)\b/\U$1/igo; # Roman numerals, eg 'Level III Support' + } + + # some cleanup + s/\[[^\]]*\]//g; + s/(^[\s'"]+|[\s'"]+$)//g; + s/\s{2,}/ /g; + + $_; +} + +sub _tokenise +{ local $_ = join ',', @_; + my (@words,$snippet,$field); + + s/\A\s+//; + s/[\r\n]+/ /g; + + while ($_ ne '') + { $field = ''; + if(s/^\s*\(/(/ ) # (...) + { my $depth = 0; + + PAREN: while(s/^(\(([^\(\)\\]|\\.)*)//) + { $field .= $1; + $depth++; + while(s/^(([^\(\)\\]|\\.)*\)\s*)//) + { $field .= $1; + last PAREN unless --$depth; + $field .= $1 if s/^(([^\(\)\\]|\\.)+)//; + } + } + + carp "Unmatched () '$field' '$_'" + if $depth; + + $field =~ s/\s+\Z//; + push @words, $field; + + next; + } + + if( s/^("(?:[^"\\]+|\\.)*")\s*// # "..." + || s/^(\[(?:[^\]\\]+|\\.)*\])\s*// # [...] + || s/^([^\s()<>\@,;:\\".[\]]+)\s*// + || s/^([()<>\@,;:\\".[\]])\s*// + ) + { push @words, $1; + next; + } + + croak "Unrecognised line: $_"; + } + + push @words, ","; + \@words; +} + +sub _find_next +{ my ($idx, $tokens, $len) = @_; + + while($idx < $len) + { my $c = $tokens->[$idx]; + return $c if $c eq ',' || $c eq ';' || $c eq '<'; + $idx++; + } + + ""; +} + +sub _complete +{ my ($class, $phrase, $address, $comment) = @_; + + @$phrase || @$comment || @$address + or return undef; + + my $o = $class->new(join(" ",@$phrase), join("",@$address), join(" ",@$comment)); + @$phrase = @$address = @$comment = (); + $o; +} + +#------------ + +sub new(@) +{ my $class = shift; + bless [@_], $class; +} + + +sub parse(@) +{ my $class = shift; + my @line = grep {defined} @_; + my $line = join '', @line; + + my (@phrase, @comment, @address, @objs); + my ($depth, $idx) = (0, 0); + + my $tokens = _tokenise @line; + my $len = @$tokens; + my $next = _find_next $idx, $tokens, $len; + + local $_; + for(my $idx = 0; $idx < $len; $idx++) + { $_ = $tokens->[$idx]; + + if(substr($_,0,1) eq '(') { push @comment, $_ } + elsif($_ eq '<') { $depth++ } + elsif($_ eq '>') { $depth-- if $depth } + elsif($_ eq ',' || $_ eq ';') + { warn "Unmatched '<>' in $line" if $depth; + my $o = $class->_complete(\@phrase, \@address, \@comment); + push @objs, $o if defined $o; + $depth = 0; + $next = _find_next $idx+1, $tokens, $len; + } + elsif($depth) { push @address, $_ } + elsif($next eq '<') { push @phrase, $_ } + elsif( /^[.\@:;]$/ || !@address || $address[-1] =~ /^[.\@:;]$/ ) + { push @address, $_ } + else + { warn "Unmatched '<>' in $line" if $depth; + my $o = $class->_complete(\@phrase, \@address, \@comment); + push @objs, $o if defined $o; + $depth = 0; + push @address, $_; + } + } + @objs; +} + +#------------ + +sub phrase { shift->set_or_get(0, @_) } +sub address { shift->set_or_get(1, @_) } +sub comment { shift->set_or_get(2, @_) } + +sub set_or_get($) +{ my ($self, $i) = (shift, shift); + @_ or return $self->[$i]; + + my $val = $self->[$i]; + $self->[$i] = shift if @_; + $val; +} + + +my $atext = '[\-\w !#$%&\'*+/=?^`{|}~]'; +sub format +{ my @addrs; + + foreach (@_) + { my ($phrase, $email, $comment) = @$_; + my @addr; + + if(defined $phrase && length $phrase) + { push @addr + , $phrase =~ /^(?:\s*$atext\s*)+$/o ? $phrase + : $phrase =~ /(?<!\\)"/ ? $phrase + : qq("$phrase"); + + push @addr, "<$email>" + if defined $email && length $email; + } + elsif(defined $email && length $email) + { push @addr, $email; + } + + if(defined $comment && $comment =~ /\S/) + { $comment =~ s/^\s*\(?/(/; + $comment =~ s/\)?\s*$/)/; + } + + push @addr, $comment + if defined $comment && length $comment; + + push @addrs, join(" ", @addr) + if @addr; + } + + join ", ", @addrs; +} + +#------------ + +sub name +{ my $self = shift; + my $phrase = $self->phrase; + my $addr = $self->address; + + $phrase = $self->comment + unless defined $phrase && length $phrase; + + my $name = $self->_extract_name($phrase); + + # first.last@domain address + if($name eq '' && $addr =~ /([^\%\.\@_]+([\._][^\%\.\@_]+)+)[\@\%]/) + { ($name = $1) =~ s/[\._]+/ /g; + $name = _extract_name $name; + } + + if($name eq '' && $addr =~ m#/g=#i) # X400 style address + { my ($f) = $addr =~ m#g=([^/]*)#i; + my ($l) = $addr =~ m#s=([^/]*)#i; + $name = _extract_name "$f $l"; + } + + length $name ? $name : undef; +} + + +sub host +{ my $addr = shift->address || ''; + my $i = rindex $addr, '@'; + $i >= 0 ? substr($addr, $i+1) : undef; +} + + +sub user +{ my $addr = shift->address || ''; + my $i = rindex $addr, '@'; + $i >= 0 ? substr($addr,0,$i) : $addr; +} + +1; diff --git a/perl/Git.pm b/perl/Git.pm index ffa09ace92..16ebcc612c 100644 --- a/perl/Git.pm +++ b/perl/Git.pm @@ -9,7 +9,10 @@ package Git; use 5.008; use strict; +use warnings; +use File::Temp (); +use File::Spec (); BEGIN { @@ -101,7 +104,7 @@ increase notwithstanding). use Carp qw(carp croak); # but croak is bad - throw instead -use Error qw(:try); +use Git::LoadCPAN::Error qw(:try); use Cwd qw(abs_path cwd); use IPC::Open2 qw(open2); use Fcntl qw(SEEK_SET SEEK_CUR); @@ -189,7 +192,6 @@ sub repository { }; if ($dir) { - _verify_require(); File::Spec->file_name_is_absolute($dir) or $dir = $opts{Directory} . '/' . $dir; $opts{Repository} = abs_path($dir); @@ -534,7 +536,9 @@ If TIME is not supplied, the current local time is used. sub get_tz_offset { # some systems don't handle or mishandle %z, so be creative. my $t = shift || time; - my $gm = timegm(localtime($t)); + my @t = localtime($t); + $t[5] += 1900; + my $gm = timegm(@t); my $sign = qw( + + - )[ $gm <=> $t ]; return sprintf("%s%02d%02d", $sign, (gmtime(abs($t - $gm)))[2,1]); } @@ -880,77 +884,6 @@ sub ident_person { return "$ident[0] <$ident[1]>"; } -=item parse_mailboxes - -Return an array of mailboxes extracted from a string. - -=cut - -# Very close to Mail::Address's parser, but we still have minor -# differences in some cases (see t9000 for examples). -sub parse_mailboxes { - my $re_comment = qr/\((?:[^)]*)\)/; - my $re_quote = qr/"(?:[^\"\\]|\\.)*"/; - my $re_word = qr/(?:[^]["\s()<>:;@\\,.]|\\.)+/; - - # divide the string in tokens of the above form - my $re_token = qr/(?:$re_quote|$re_word|$re_comment|\S)/; - my @tokens = map { $_ =~ /\s*($re_token)\s*/g } @_; - my $end_of_addr_seen = 0; - - # add a delimiter to simplify treatment for the last mailbox - push @tokens, ","; - - my (@addr_list, @phrase, @address, @comment, @buffer) = (); - foreach my $token (@tokens) { - if ($token =~ /^[,;]$/) { - # if buffer still contains undeterminated strings - # append it at the end of @address or @phrase - if ($end_of_addr_seen) { - push @phrase, @buffer; - } else { - push @address, @buffer; - } - - my $str_phrase = join ' ', @phrase; - my $str_address = join '', @address; - my $str_comment = join ' ', @comment; - - # quote are necessary if phrase contains - # special characters - if ($str_phrase =~ /[][()<>:;@\\,.\000-\037\177]/) { - $str_phrase =~ s/(^|[^\\])"/$1/g; - $str_phrase = qq["$str_phrase"]; - } - - # add "<>" around the address if necessary - if ($str_address ne "" && $str_phrase ne "") { - $str_address = qq[<$str_address>]; - } - - my $str_mailbox = "$str_phrase $str_address $str_comment"; - $str_mailbox =~ s/^\s*|\s*$//g; - push @addr_list, $str_mailbox if ($str_mailbox); - - @phrase = @address = @comment = @buffer = (); - $end_of_addr_seen = 0; - } elsif ($token =~ /^\(/) { - push @comment, $token; - } elsif ($token eq "<") { - push @phrase, (splice @address), (splice @buffer); - } elsif ($token eq ">") { - $end_of_addr_seen = 1; - push @address, (splice @buffer); - } elsif ($token eq "@" && !$end_of_addr_seen) { - push @address, (splice @buffer), "@"; - } else { - push @buffer, $token; - } - } - - return @addr_list; -} - =item hash_object ( TYPE, FILENAME ) Compute the SHA1 object id of the given C<FILENAME> considering it is @@ -1359,8 +1292,6 @@ sub temp_release { sub _temp_cache { my ($self, $name) = _maybe_self(@_); - _verify_require(); - my $temp_fd = \$TEMP_FILEMAP{$name}; if (defined $$temp_fd and $$temp_fd->opened) { if ($TEMP_FILES{$$temp_fd}{locked}) { @@ -1394,11 +1325,6 @@ sub _temp_cache { $$temp_fd; } -sub _verify_require { - eval { require File::Temp; require File::Spec; }; - $@ and throw Error::Simple($@); -} - =item temp_reset ( FILEHANDLE ) Truncates and resets the position of the C<FILEHANDLE>. @@ -1763,7 +1689,6 @@ sub DESTROY { # Pipe implementation for ActiveState Perl. package Git::activestate_pipe; -use strict; sub TIEHANDLE { my ($class, @params) = @_; diff --git a/perl/Git/I18N.pm b/perl/Git/I18N.pm index 836a5c2382..dba96fff0a 100644 --- a/perl/Git/I18N.pm +++ b/perl/Git/I18N.pm @@ -18,7 +18,7 @@ our @EXPORT_OK = @EXPORT; sub __bootstrap_locale_messages { our $TEXTDOMAIN = 'git'; - our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '++LOCALEDIR++'; + our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '@@LOCALEDIR@@'; require POSIX; POSIX->import(qw(setlocale)); diff --git a/perl/Git/LoadCPAN.pm b/perl/Git/LoadCPAN.pm new file mode 100644 index 0000000000..e5585e75e8 --- /dev/null +++ b/perl/Git/LoadCPAN.pm @@ -0,0 +1,104 @@ +package Git::LoadCPAN; +use 5.008; +use strict; +use warnings; + +=head1 NAME + +Git::LoadCPAN - Wrapper for loading modules from the CPAN (OS) or Git's own copy + +=head1 DESCRIPTION + +The Perl code in Git depends on some modules from the CPAN, but we +don't want to make those a hard requirement for anyone building from +source. + +Therefore the L<Git::LoadCPAN> namespace shipped with Git contains +wrapper modules like C<Git::LoadCPAN::Module::Name> that will first +attempt to load C<Module::Name> from the OS, and if that doesn't work +will fall back on C<FromCPAN::Module::Name> shipped with Git itself. + +Usually distributors will not ship with Git's Git::FromCPAN tree at +all via the C<NO_PERL_CPAN_FALLBACKS> option, preferring to use their +own packaging of CPAN modules instead. + +This module is only intended to be used for code shipping in the +C<git.git> repository. Use it for anything else at your peril! + +=cut + +# NO_PERL_CPAN_FALLBACKS_STR evades the sed search-replace from the +# Makefile, and allows for detecting whether the module is loaded from +# perl/Git as opposed to perl/build/Git, which is useful for one-off +# testing without having Error.pm et al installed. +use constant NO_PERL_CPAN_FALLBACKS_STR => '@@' . 'NO_PERL_CPAN_FALLBACKS' . '@@'; +use constant NO_PERL_CPAN_FALLBACKS => ( + q[@@NO_PERL_CPAN_FALLBACKS@@] ne '' + and + q[@@NO_PERL_CPAN_FALLBACKS@@] ne NO_PERL_CPAN_FALLBACKS_STR +); + +sub import { + shift; + my $caller = caller; + my %args = @_; + my $module = exists $args{module} ? delete $args{module} : die "BUG: Expected 'module' parameter!"; + my $import = exists $args{import} ? delete $args{import} : die "BUG: Expected 'import' parameter!"; + die "BUG: Too many arguments!" if keys %args; + + # Foo::Bar to Foo/Bar.pm + my $package_pm = $module; + $package_pm =~ s[::][/]g; + $package_pm .= '.pm'; + + eval { + require $package_pm; + 1; + } or do { + my $error = $@ || "Zombie Error"; + + if (NO_PERL_CPAN_FALLBACKS) { + chomp(my $error = sprintf <<'THEY_PROMISED', $module); +BUG: The '%s' module is not here, but NO_PERL_CPAN_FALLBACKS was set! + +Git needs this Perl module from the CPAN, and will by default ship +with a copy of it. This Git was built with NO_PERL_CPAN_FALLBACKS, +meaning that whoever built it promised to provide this module. + +You're seeing this error because they broke that promise, and we can't +load our fallback version, since we were asked not to install it. + +If you're seeing this error and didn't package Git yourself the +package you're using is broken, or your system is broken. This error +won't appear if Git is built without NO_PERL_CPAN_FALLBACKS (instead +we'll use our fallback version of the module). +THEY_PROMISED + die $error; + } + + my $Git_LoadCPAN_pm_path = $INC{"Git/LoadCPAN.pm"} || die "BUG: Should have our own path from %INC!"; + + require File::Basename; + my $Git_LoadCPAN_pm_root = File::Basename::dirname($Git_LoadCPAN_pm_path) || die "BUG: Can't figure out lib/Git dirname from '$Git_LoadCPAN_pm_path'!"; + + require File::Spec; + my $Git_pm_FromCPAN_root = File::Spec->catdir($Git_LoadCPAN_pm_root, '..', 'FromCPAN'); + die "BUG: '$Git_pm_FromCPAN_root' should be a directory!" unless -d $Git_pm_FromCPAN_root; + + local @INC = ($Git_pm_FromCPAN_root, @INC); + require $package_pm; + }; + + if ($import) { + no strict 'refs'; + *{"${caller}::import"} = sub { + shift; + use strict 'refs'; + unshift @_, $module; + goto &{"${module}::import"}; + }; + use strict 'refs'; + } +} + +1; diff --git a/perl/Git/LoadCPAN/Error.pm b/perl/Git/LoadCPAN/Error.pm new file mode 100644 index 0000000000..c6d2c45d80 --- /dev/null +++ b/perl/Git/LoadCPAN/Error.pm @@ -0,0 +1,10 @@ +package Git::LoadCPAN::Error; +use 5.008; +use strict; +use warnings; +use Git::LoadCPAN ( + module => 'Error', + import => 1, +); + +1; diff --git a/perl/Git/LoadCPAN/Mail/Address.pm b/perl/Git/LoadCPAN/Mail/Address.pm new file mode 100644 index 0000000000..f70a4f064c --- /dev/null +++ b/perl/Git/LoadCPAN/Mail/Address.pm @@ -0,0 +1,10 @@ +package Git::LoadCPAN::Mail::Address; +use 5.008; +use strict; +use warnings; +use Git::LoadCPAN ( + module => 'Mail::Address', + import => 0, +); + +1; diff --git a/perl/Git/SVN.pm b/perl/Git/SVN.pm index bc4eed3d75..991a5885e9 100644 --- a/perl/Git/SVN.pm +++ b/perl/Git/SVN.pm @@ -1405,7 +1405,7 @@ sub parse_svn_date { $ENV{TZ} = 'UTC'; my $epoch_in_UTC = - Time::Local::timelocal($S, $M, $H, $d, $m - 1, $Y - 1900); + Time::Local::timelocal($S, $M, $H, $d, $m - 1, $Y); # Determine our local timezone (including DST) at the # time of $epoch_in_UTC. $Git::SVN::Log::TZ stored the diff --git a/perl/Makefile b/perl/Makefile deleted file mode 100644 index f657de20e3..0000000000 --- a/perl/Makefile +++ /dev/null @@ -1,90 +0,0 @@ -# -# Makefile for perl support modules and routine -# -makfile:=perl.mak -modules = - -PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) -prefix_SQ = $(subst ','\'',$(prefix)) -localedir_SQ = $(subst ','\'',$(localedir)) - -ifndef V - QUIET = @ -endif - -all install instlibdir: $(makfile) - $(QUIET)$(MAKE) -f $(makfile) $@ - -clean: - $(QUIET)test -f $(makfile) && $(MAKE) -f $(makfile) $@ || exit 0 - $(RM) ppport.h - $(RM) $(makfile) - $(RM) $(makfile).old - $(RM) PM.stamp - -$(makfile): PM.stamp - -ifdef NO_PERL_MAKEMAKER -instdir_SQ = $(subst ','\'',$(prefix)/lib) - -modules += Git -modules += Git/I18N -modules += Git/IndexInfo -modules += Git/Packet -modules += Git/SVN -modules += Git/SVN/Memoize/YAML -modules += Git/SVN/Fetcher -modules += Git/SVN/Editor -modules += Git/SVN/GlobSpec -modules += Git/SVN/Log -modules += Git/SVN/Migration -modules += Git/SVN/Prompt -modules += Git/SVN/Ra -modules += Git/SVN/Utils - -$(makfile): ../GIT-CFLAGS Makefile - echo all: private-Error.pm Git.pm Git/I18N.pm > $@ - set -e; \ - for i in $(modules); \ - do \ - if test $$i = $${i%/*}; \ - then \ - subdir=; \ - else \ - subdir=/$${i%/*}; \ - fi; \ - echo ' $(RM) blib/lib/'$$i'.pm' >> $@; \ - echo ' mkdir -p blib/lib'$$subdir >> $@; \ - echo ' cp '$$i'.pm blib/lib/'$$i'.pm' >> $@; \ - done - echo ' $(RM) blib/lib/Error.pm' >> $@ - '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \ - echo ' cp private-Error.pm blib/lib/Error.pm' >> $@ - echo install: >> $@ - set -e; \ - for i in $(modules); \ - do \ - if test $$i = $${i%/*}; \ - then \ - subdir=; \ - else \ - subdir=/$${i%/*}; \ - fi; \ - echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \ - echo ' mkdir -p "$$(DESTDIR)$(instdir_SQ)'$$subdir'"' >> $@; \ - echo ' cp '$$i'.pm "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \ - done - echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@ - '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \ - echo ' cp private-Error.pm "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@ - echo instlibdir: >> $@ - echo ' echo $(instdir_SQ)' >> $@ -else -$(makfile): Makefile.PL ../GIT-CFLAGS - $(PERL_PATH) $< PREFIX='$(prefix_SQ)' INSTALL_BASE='' --localedir='$(localedir_SQ)' -endif - -# this is just added comfort for calling make directly in perl dir -# (even though GIT-CFLAGS aren't used yet. If ever) -../GIT-CFLAGS: - $(MAKE) -C .. GIT-CFLAGS diff --git a/perl/Makefile.PL b/perl/Makefile.PL deleted file mode 100644 index 3f29ba98a6..0000000000 --- a/perl/Makefile.PL +++ /dev/null @@ -1,62 +0,0 @@ -use strict; -use warnings; -use ExtUtils::MakeMaker; -use Getopt::Long; -use File::Find; - -# Don't forget to update the perl/Makefile, too. -# Don't forget to test with NO_PERL_MAKEMAKER=YesPlease - -# Sanity: die at first unknown option -Getopt::Long::Configure qw/ pass_through /; - -my $localedir = ''; -GetOptions("localedir=s" => \$localedir); - -sub MY::postamble { - return <<'MAKE_FRAG'; -instlibdir: - @echo '$(INSTALLSITELIB)' - -ifneq (,$(DESTDIR)) -ifeq (0,$(shell expr '$(MM_VERSION)' '>' 6.10)) -$(error ExtUtils::MakeMaker version "$(MM_VERSION)" is older than 6.11 and so \ - is likely incompatible with the DESTDIR mechanism. Try setting \ - NO_PERL_MAKEMAKER=1 instead) -endif -endif - -MAKE_FRAG -} - -# Find all the .pm files in "Git/" and Git.pm -my %pm; -find sub { - return unless /\.pm$/; - - # sometimes File::Find prepends a ./ Strip it. - my $pm_path = $File::Find::name; - $pm_path =~ s{^\./}{}; - - $pm{$pm_path} = '$(INST_LIBDIR)/'.$pm_path; -}, "Git", "Git.pm"; - - -# We come with our own bundled Error.pm. It's not in the set of default -# Perl modules so install it if it's not available on the system yet. -if ( !eval { require Error } || $Error::VERSION < 0.15009) { - $pm{'private-Error.pm'} = '$(INST_LIBDIR)/Error.pm'; -} - -# redirect stdout, otherwise the message "Writing perl.mak for Git" -# disrupts the output for the target 'instlibdir' -open STDOUT, ">&STDERR"; - -WriteMakefile( - NAME => 'Git', - VERSION_FROM => 'Git.pm', - PM => \%pm, - PM_FILTER => qq[\$(PERL) -pe "s<\\Q++LOCALEDIR++\\E><$localedir>"], - MAKEFILE => 'perl.mak', - INSTALLSITEMAN3DIR => '$(SITEPREFIX)/share/man/man3' -); diff --git a/preload-index.c b/preload-index.c index 2a83255e4e..4d08d44874 100644 --- a/preload-index.c +++ b/preload-index.c @@ -78,6 +78,7 @@ static void preload_index(struct index_state *index, { int threads, i, work, offset; struct thread_data data[MAX_PARALLEL]; + uint64_t start = getnanotime(); if (!core_preload_index) return; @@ -108,6 +109,7 @@ static void preload_index(struct index_state *index, if (pthread_join(p->pthread, NULL)) die("unable to join threaded lstat"); } + trace_performance_since(start, "preload index"); } #endif @@ -43,6 +43,22 @@ void sq_quote_buf(struct strbuf *dst, const char *src) free(to_free); } +void sq_quote_buf_pretty(struct strbuf *dst, const char *src) +{ + static const char ok_punct[] = "+,-./:=@_^"; + const char *p; + + for (p = src; *p; p++) { + if (!isalpha(*p) && !isdigit(*p) && !strchr(ok_punct, *p)) { + sq_quote_buf(dst, src); + return; + } + } + + /* if we get here, we did not need quoting */ + strbuf_addstr(dst, src); +} + void sq_quotef(struct strbuf *dst, const char *fmt, ...) { struct strbuf src = STRBUF_INIT; @@ -56,7 +72,7 @@ void sq_quotef(struct strbuf *dst, const char *fmt, ...) strbuf_release(&src); } -void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) +void sq_quote_argv(struct strbuf *dst, const char **argv) { int i; @@ -65,8 +81,16 @@ void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) for (i = 0; argv[i]; ++i) { strbuf_addch(dst, ' '); sq_quote_buf(dst, argv[i]); - if (maxlen && dst->len > maxlen) - die("Too many or long arguments"); + } +} + +void sq_quote_argv_pretty(struct strbuf *dst, const char **argv) +{ + int i; + + for (i = 0; argv[i]; i++) { + strbuf_addch(dst, ' '); + sq_quote_buf_pretty(dst, argv[i]); } } @@ -94,9 +118,15 @@ static char *sq_dequote_step(char *arg, char **next) *next = NULL; return arg; case '\\': - c = *++src; - if (need_bs_quote(c) && *++src == '\'') { - *dst++ = c; + /* + * Allow backslashed characters outside of + * single-quotes only if they need escaping, + * and only if we resume the single-quoted part + * afterward. + */ + if (need_bs_quote(src[1]) && src[2] == '\'') { + *dst++ = src[1]; + src += 2; continue; } /* Fallthrough */ @@ -30,9 +30,17 @@ struct strbuf; */ extern void sq_quote_buf(struct strbuf *, const char *src); -extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen); +extern void sq_quote_argv(struct strbuf *, const char **argv); extern void sq_quotef(struct strbuf *, const char *fmt, ...); +/* + * These match their non-pretty variants, except that they avoid + * quoting when there are no exotic characters. These should only be used for + * human-readable output, as sq_dequote() is not smart enough to dequote it. + */ +void sq_quote_buf_pretty(struct strbuf *, const char *src); +void sq_quote_argv_pretty(struct strbuf *, const char **argv); + /* This unwraps what sq_quote() produces in place, but returns * NULL if the input does not look like what sq_quote would have * produced. diff --git a/reachable.c b/reachable.c index 88d7d679da..191ebe3e6a 100644 --- a/reachable.c +++ b/reachable.c @@ -94,7 +94,7 @@ static void add_recent_object(const struct object_id *oid, break; default: die("unknown object type for %s: %s", - oid_to_hex(oid), typename(type)); + oid_to_hex(oid), type_name(type)); } if (!obj) diff --git a/read-cache.c b/read-cache.c index 198e72b685..59a73f4a81 100644 --- a/read-cache.c +++ b/read-cache.c @@ -62,6 +62,7 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache replace_index_entry_in_base(istate, old, ce); remove_name_hash(istate, old); free(old); + ce->ce_flags &= ~CE_HASHED; set_index_entry(istate, nr, ce); ce->ce_flags |= CE_UPDATE_IN_BASE; mark_fsmonitor_invalid(istate, ce); @@ -70,20 +71,20 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name) { - struct cache_entry *old = istate->cache[nr], *new; + struct cache_entry *old_entry = istate->cache[nr], *new_entry; int namelen = strlen(new_name); - new = xmalloc(cache_entry_size(namelen)); - copy_cache_entry(new, old); - new->ce_flags &= ~CE_HASHED; - new->ce_namelen = namelen; - new->index = 0; - memcpy(new->name, new_name, namelen + 1); + new_entry = xmalloc(cache_entry_size(namelen)); + copy_cache_entry(new_entry, old_entry); + new_entry->ce_flags &= ~CE_HASHED; + new_entry->ce_namelen = namelen; + new_entry->index = 0; + memcpy(new_entry->name, new_name, namelen + 1); - cache_tree_invalidate_path(istate, old->name); - untracked_cache_remove_from_index(istate, old->name); + cache_tree_invalidate_path(istate, old_entry->name); + untracked_cache_remove_from_index(istate, old_entry->name); remove_index_entry_at(istate, nr); - add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE); + add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE); } void fill_stat_data(struct stat_data *sd, struct stat *st) @@ -615,26 +616,26 @@ static struct cache_entry *create_alias_ce(struct index_state *istate, struct cache_entry *alias) { int len; - struct cache_entry *new; + struct cache_entry *new_entry; if (alias->ce_flags & CE_ADDED) die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name); /* Ok, create the new entry using the name of the existing alias */ len = ce_namelen(alias); - new = xcalloc(1, cache_entry_size(len)); - memcpy(new->name, alias->name, len); - copy_cache_entry(new, ce); + new_entry = xcalloc(1, cache_entry_size(len)); + memcpy(new_entry->name, alias->name, len); + copy_cache_entry(new_entry, ce); save_or_free_index_entry(istate, ce); - return new; + return new_entry; } void set_object_name_for_intent_to_add_entry(struct cache_entry *ce) { - unsigned char sha1[20]; - if (write_sha1_file("", 0, blob_type, sha1)) + struct object_id oid; + if (write_object_file("", 0, blob_type, &oid)) die("cannot create an empty blob in the object database"); - hashcpy(ce->oid.hash, sha1); + oidcpy(&ce->oid, &oid); } int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags) @@ -1217,9 +1218,8 @@ int add_index_entry(struct index_state *istate, struct cache_entry *ce, int opti /* Add it in.. */ istate->cache_nr++; if (istate->cache_nr > pos + 1) - memmove(istate->cache + pos + 1, - istate->cache + pos, - (istate->cache_nr - pos - 1) * sizeof(ce)); + MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos, + istate->cache_nr - pos - 1); set_index_entry(istate, pos, ce); istate->cache_changed |= CE_ENTRY_ADDED; return 0; @@ -1325,7 +1325,8 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate, size = ce_size(ce); updated = xmalloc(size); - memcpy(updated, ce, size); + copy_cache_entry(updated, ce); + memcpy(updated->name, ce->name, ce->ce_namelen + 1); fill_stat_cache_info(updated, &st); /* * If ignore_valid is not set, we should leave CE_VALID bit @@ -1372,6 +1373,7 @@ int refresh_index(struct index_state *istate, unsigned int flags, const char *typechange_fmt; const char *added_fmt; const char *unmerged_fmt; + uint64_t start = getnanotime(); modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n"); deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n"); @@ -1379,7 +1381,7 @@ int refresh_index(struct index_state *istate, unsigned int flags, added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n"); unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n"); for (i = 0; i < istate->cache_nr; i++) { - struct cache_entry *ce, *new; + struct cache_entry *ce, *new_entry; int cache_errno = 0; int changed = 0; int filtered = 0; @@ -1408,10 +1410,10 @@ int refresh_index(struct index_state *istate, unsigned int flags, if (filtered) continue; - new = refresh_cache_ent(istate, ce, options, &cache_errno, &changed); - if (new == ce) + new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed); + if (new_entry == ce) continue; - if (!new) { + if (!new_entry) { const char *fmt; if (really && cache_errno == EINVAL) { @@ -1440,8 +1442,9 @@ int refresh_index(struct index_state *istate, unsigned int flags, continue; } - replace_index_entry(istate, i, new); + replace_index_entry(istate, i, new_entry); } + trace_performance_since(start, "refresh index"); return has_errors; } @@ -1545,8 +1548,8 @@ int verify_ce_order; static int verify_hdr(struct cache_header *hdr, unsigned long size) { - git_SHA_CTX c; - unsigned char sha1[20]; + git_hash_ctx c; + unsigned char hash[GIT_MAX_RAWSZ]; int hdr_version; if (hdr->hdr_signature != htonl(CACHE_SIGNATURE)) @@ -1558,10 +1561,10 @@ static int verify_hdr(struct cache_header *hdr, unsigned long size) if (!verify_index_checksum) return 0; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, size - 20); - git_SHA1_Final(sha1, &c); - if (hashcmp(sha1, (unsigned char *)hdr + size - 20)) + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz); + the_hash_algo->final_fn(hash, &c); + if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz)) return error("bad index file sha1 signature"); return 0; } @@ -1791,7 +1794,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) die_errno("cannot stat the open index"); mmap_size = xsize_t(st.st_size); - if (mmap_size < sizeof(struct cache_header) + 20) + if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz) die("index file smaller than expected"); mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0); @@ -1803,7 +1806,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) if (verify_hdr(hdr, mmap_size) < 0) goto unmap; - hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20); + hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz); istate->version = ntohl(hdr->hdr_version); istate->cache_nr = ntohl(hdr->hdr_entries); istate->cache_alloc = alloc_nr(istate->cache_nr); @@ -1831,7 +1834,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) istate->timestamp.sec = st.st_mtime; istate->timestamp.nsec = ST_MTIME_NSEC(st); - while (src_offset <= mmap_size - 20 - 8) { + while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) { /* After an array of active_nr index entries, * there can be arbitrary number of extended * sections, each of which is prefixed with @@ -1872,6 +1875,7 @@ static void freshen_shared_index(const char *shared_index, int warn) int read_index_from(struct index_state *istate, const char *path, const char *gitdir) { + uint64_t start = getnanotime(); struct split_index *split_index; int ret; char *base_sha1_hex; @@ -1882,6 +1886,7 @@ int read_index_from(struct index_state *istate, const char *path, return istate->cache_nr; ret = do_read_index(istate, path, 0); + trace_performance_since(start, "read cache %s", path); split_index = istate->split_index; if (!split_index || is_null_sha1(split_index->base_sha1)) { @@ -1905,6 +1910,7 @@ int read_index_from(struct index_state *istate, const char *path, freshen_shared_index(base_path, 0); merge_base_index(istate); post_read_index_from(istate); + trace_performance_since(start, "read cache %s", base_path); free(base_path); return ret; } @@ -1957,11 +1963,11 @@ int unmerged_index(const struct index_state *istate) static unsigned char write_buffer[WRITE_BUFFER_SIZE]; static unsigned long write_buffer_len; -static int ce_write_flush(git_SHA_CTX *context, int fd) +static int ce_write_flush(git_hash_ctx *context, int fd) { unsigned int buffered = write_buffer_len; if (buffered) { - git_SHA1_Update(context, write_buffer, buffered); + the_hash_algo->update_fn(context, write_buffer, buffered); if (write_in_full(fd, write_buffer, buffered) < 0) return -1; write_buffer_len = 0; @@ -1969,7 +1975,7 @@ static int ce_write_flush(git_SHA_CTX *context, int fd) return 0; } -static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len) +static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len) { while (len) { unsigned int buffered = write_buffer_len; @@ -1991,7 +1997,7 @@ static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len) return 0; } -static int write_index_ext_header(git_SHA_CTX *context, int fd, +static int write_index_ext_header(git_hash_ctx *context, int fd, unsigned int ext, unsigned int sz) { ext = htonl(ext); @@ -2000,26 +2006,26 @@ static int write_index_ext_header(git_SHA_CTX *context, int fd, (ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0; } -static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1) +static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash) { unsigned int left = write_buffer_len; if (left) { write_buffer_len = 0; - git_SHA1_Update(context, write_buffer, left); + the_hash_algo->update_fn(context, write_buffer, left); } - /* Flush first if not enough space for SHA1 signature */ - if (left + 20 > WRITE_BUFFER_SIZE) { + /* Flush first if not enough space for hash signature */ + if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) { if (write_in_full(fd, write_buffer, left) < 0) return -1; left = 0; } - /* Append the SHA1 signature at the end */ - git_SHA1_Final(write_buffer + left, context); - hashcpy(sha1, write_buffer + left); - left += 20; + /* Append the hash signature at the end */ + the_hash_algo->final_fn(write_buffer + left, context); + hashcpy(hash, write_buffer + left); + left += the_hash_algo->rawsz; return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0; } @@ -2100,17 +2106,19 @@ static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk, } } -static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce, +static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce, struct strbuf *previous_name, struct ondisk_cache_entry *ondisk) { int size; - int saved_namelen = saved_namelen; /* compiler workaround */ int result; + unsigned int saved_namelen; + int stripped_name = 0; static unsigned char padding[8] = { 0x00 }; if (ce->ce_flags & CE_STRIP_NAME) { saved_namelen = ce_namelen(ce); ce->ce_namelen = 0; + stripped_name = 1; } if (ce->ce_flags & CE_EXTENDED) @@ -2150,7 +2158,7 @@ static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce, strbuf_splice(previous_name, common, to_remove, ce->name + common, ce_namelen(ce) - common); } - if (ce->ce_flags & CE_STRIP_NAME) { + if (stripped_name) { ce->ce_namelen = saved_namelen; ce->ce_flags &= ~CE_STRIP_NAME; } @@ -2167,7 +2175,7 @@ static int verify_index_from(const struct index_state *istate, const char *path) int fd; ssize_t n; struct stat st; - unsigned char sha1[20]; + unsigned char hash[GIT_MAX_RAWSZ]; if (!istate->initialized) return 0; @@ -2179,14 +2187,14 @@ static int verify_index_from(const struct index_state *istate, const char *path) if (fstat(fd, &st)) goto out; - if (st.st_size < sizeof(struct cache_header) + 20) + if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz) goto out; - n = pread_in_full(fd, sha1, 20, st.st_size - 20); - if (n != 20) + n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz); + if (n != the_hash_algo->rawsz) goto out; - if (hashcmp(istate->sha1, sha1)) + if (hashcmp(istate->sha1, hash)) goto out; close(fd); @@ -2234,8 +2242,9 @@ void update_index_if_able(struct index_state *istate, struct lock_file *lockfile static int do_write_index(struct index_state *istate, struct tempfile *tempfile, int strip_extensions) { + uint64_t start = getnanotime(); int newfd = tempfile->fd; - git_SHA_CTX c; + git_hash_ctx c; struct cache_header hdr; int i, err = 0, removed, extended, hdr_version; struct cache_entry **cache = istate->cache; @@ -2273,7 +2282,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, hdr.hdr_version = htonl(hdr_version); hdr.hdr_entries = htonl(entries - removed); - git_SHA1_Init(&c); + the_hash_algo->init_fn(&c); if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0) return -1; @@ -2374,6 +2383,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, return -1; istate->timestamp.sec = (unsigned int)st.st_mtime; istate->timestamp.nsec = ST_MTIME_NSEC(st); + trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed); return 0; } @@ -2532,6 +2542,12 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock, int new_shared_index, ret; struct split_index *si = istate->split_index; + if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) { + if (flags & COMMIT_LOCK) + rollback_lock_file(lock); + return 0; + } + if (istate->fsmonitor_last_update) fill_fsmonitor_bitmap(istate); diff --git a/ref-filter.c b/ref-filter.c index f9e25aea7a..45fc56216a 100644 --- a/ref-filter.c +++ b/ref-filter.c @@ -529,12 +529,12 @@ static void end_align_handler(struct ref_formatting_stack **stack) static void align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state) { - struct ref_formatting_stack *new; + struct ref_formatting_stack *new_stack; push_stack_element(&state->stack); - new = state->stack; - new->at_end = end_align_handler; - new->at_end_data = &atomv->atom->u.align; + new_stack = state->stack; + new_stack->at_end = end_align_handler; + new_stack->at_end_data = &atomv->atom->u.align; } static void if_then_else_handler(struct ref_formatting_stack **stack) @@ -574,16 +574,16 @@ static void if_then_else_handler(struct ref_formatting_stack **stack) static void if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state) { - struct ref_formatting_stack *new; + struct ref_formatting_stack *new_stack; struct if_then_else *if_then_else = xcalloc(sizeof(struct if_then_else), 1); if_then_else->str = atomv->atom->u.if_then_else.str; if_then_else->cmp_status = atomv->atom->u.if_then_else.cmp_status; push_stack_element(&state->stack); - new = state->stack; - new->at_end = if_then_else_handler; - new->at_end_data = if_then_else; + new_stack = state->stack; + new_stack->at_end = if_then_else_handler; + new_stack->at_end_data = if_then_else; } static int is_empty(const char *s) @@ -769,7 +769,7 @@ static void grab_common_values(struct atom_value *val, int deref, struct object if (deref) name++; if (!strcmp(name, "objecttype")) - v->s = typename(obj->type); + v->s = type_name(obj->type); else if (!strcmp(name, "objectsize")) { v->value = sz; v->s = xstrfmt("%lu", sz); @@ -795,7 +795,7 @@ static void grab_tag_values(struct atom_value *val, int deref, struct object *ob if (!strcmp(name, "tag")) v->s = tag->tag; else if (!strcmp(name, "type") && tag->tagged) - v->s = typename(tag->tagged->type); + v->s = type_name(tag->tagged->type); else if (!strcmp(name, "object") && tag->tagged) v->s = xstrdup(oid_to_hex(&tag->tagged->oid)); } @@ -1249,8 +1249,8 @@ static void fill_remote_ref_details(struct used_atom *atom, const char *refname, if (atom->u.remote_ref.option == RR_REF) *s = show_ref(&atom->u.remote_ref.refname, refname); else if (atom->u.remote_ref.option == RR_TRACK) { - if (stat_tracking_info(branch, &num_ours, - &num_theirs, NULL)) { + if (stat_tracking_info(branch, &num_ours, &num_theirs, + NULL, AHEAD_BEHIND_FULL) < 0) { *s = xstrdup(msgs.gone); } else if (!num_ours && !num_theirs) *s = ""; @@ -1267,8 +1267,8 @@ static void fill_remote_ref_details(struct used_atom *atom, const char *refname, free((void *)to_free); } } else if (atom->u.remote_ref.option == RR_TRACKSHORT) { - if (stat_tracking_info(branch, &num_ours, - &num_theirs, NULL)) + if (stat_tracking_info(branch, &num_ours, &num_theirs, + NULL, AHEAD_BEHIND_FULL) < 0) return; if (!num_ours && !num_theirs) @@ -1354,15 +1354,31 @@ static const char *get_refname(struct used_atom *atom, struct ref_array_item *re return show_ref(&atom->u.refname, ref->refname); } +static void get_object(struct ref_array_item *ref, const struct object_id *oid, + int deref, struct object **obj) +{ + int eaten; + unsigned long size; + void *buf = get_obj(oid, obj, &size, &eaten); + if (!buf) + die(_("missing object %s for %s"), + oid_to_hex(oid), ref->refname); + if (!*obj) + die(_("parse_object_buffer failed on %s for %s"), + oid_to_hex(oid), ref->refname); + + grab_values(ref->value, deref, *obj, buf, size); + if (!eaten) + free(buf); +} + /* * Parse the object referred by ref, and grab needed value. */ static void populate_value(struct ref_array_item *ref) { - void *buf; struct object *obj; - int eaten, i; - unsigned long size; + int i; const struct object_id *tagged; ref->value = xcalloc(used_atom_cnt, sizeof(struct atom_value)); @@ -1478,22 +1494,12 @@ static void populate_value(struct ref_array_item *ref) for (i = 0; i < used_atom_cnt; i++) { struct atom_value *v = &ref->value[i]; if (v->s == NULL) - goto need_obj; + break; } - return; - - need_obj: - buf = get_obj(&ref->objectname, &obj, &size, &eaten); - if (!buf) - die(_("missing object %s for %s"), - oid_to_hex(&ref->objectname), ref->refname); - if (!obj) - die(_("parse_object_buffer failed on %s for %s"), - oid_to_hex(&ref->objectname), ref->refname); + if (used_atom_cnt <= i) + return; - grab_values(ref->value, 0, obj, buf, size); - if (!eaten) - free(buf); + get_object(ref, &ref->objectname, 0, &obj); /* * If there is no atom that wants to know about tagged @@ -1514,16 +1520,7 @@ static void populate_value(struct ref_array_item *ref) * is not consistent with what deref_tag() does * which peels the onion to the core. */ - buf = get_obj(tagged, &obj, &size, &eaten); - if (!buf) - die(_("missing object %s for %s"), - oid_to_hex(tagged), ref->refname); - if (!obj) - die(_("parse_object_buffer failed on %s for %s"), - oid_to_hex(tagged), ref->refname); - grab_values(ref->value, 1, obj, buf, size); - if (!eaten) - free(buf); + get_object(ref, tagged, 1, &obj); } /* diff --git a/refs/packed-backend.c b/refs/packed-backend.c index 023243fd5f..65288c6472 100644 --- a/refs/packed-backend.c +++ b/refs/packed-backend.c @@ -68,17 +68,21 @@ struct snapshot { int mmapped; /* - * The contents of the `packed-refs` file. If the file was - * already sorted, this points at the mmapped contents of the - * file. If not, this points at heap-allocated memory - * containing the contents, sorted. If there were no contents - * (e.g., because the file didn't exist), `buf` and `eof` are - * both NULL. + * The contents of the `packed-refs` file: + * + * - buf -- a pointer to the start of the memory + * - start -- a pointer to the first byte of actual references + * (i.e., after the header line, if one is present) + * - eof -- a pointer just past the end of the reference + * contents + * + * If the `packed-refs` file was already sorted, `buf` points + * at the mmapped contents of the file. If not, it points at + * heap-allocated memory containing the contents, sorted. If + * there were no contents (e.g., because the file didn't + * exist), `buf`, `start`, and `eof` are all NULL. */ - char *buf, *eof; - - /* The size of the header line, if any; otherwise, 0: */ - size_t header_len; + char *buf, *start, *eof; /* * What is the peeled state of the `packed-refs` file that @@ -169,8 +173,7 @@ static void clear_snapshot_buffer(struct snapshot *snapshot) } else { free(snapshot->buf); } - snapshot->buf = snapshot->eof = NULL; - snapshot->header_len = 0; + snapshot->buf = snapshot->start = snapshot->eof = NULL; } /* @@ -319,13 +322,14 @@ static void sort_snapshot(struct snapshot *snapshot) size_t len, i; char *new_buffer, *dst; - pos = snapshot->buf + snapshot->header_len; + pos = snapshot->start; eof = snapshot->eof; - len = eof - pos; - if (!len) + if (pos == eof) return; + len = eof - pos; + /* * Initialize records based on a crude estimate of the number * of references in the file (we'll grow it below if needed): @@ -391,9 +395,8 @@ static void sort_snapshot(struct snapshot *snapshot) * place: */ clear_snapshot_buffer(snapshot); - snapshot->buf = new_buffer; + snapshot->buf = snapshot->start = new_buffer; snapshot->eof = new_buffer + len; - snapshot->header_len = 0; cleanup: free(records); @@ -442,23 +445,26 @@ static const char *find_end_of_record(const char *p, const char *end) */ static void verify_buffer_safe(struct snapshot *snapshot) { - const char *buf = snapshot->buf + snapshot->header_len; + const char *start = snapshot->start; const char *eof = snapshot->eof; const char *last_line; - if (buf == eof) + if (start == eof) return; - last_line = find_start_of_record(buf, eof - 1); + last_line = find_start_of_record(start, eof - 1); if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2) die_invalid_line(snapshot->refs->path, last_line, eof - last_line); } +#define SMALL_FILE_SIZE (32*1024) + /* * Depending on `mmap_strategy`, either mmap or read the contents of * the `packed-refs` file into the snapshot. Return 1 if the file - * existed and was read, or 0 if the file was absent. Die on errors. + * existed and was read, or 0 if the file was absent or empty. Die on + * errors. */ static int load_contents(struct snapshot *snapshot) { @@ -489,24 +495,23 @@ static int load_contents(struct snapshot *snapshot) die_errno("couldn't stat %s", snapshot->refs->path); size = xsize_t(st.st_size); - switch (mmap_strategy) { - case MMAP_NONE: + if (!size) { + return 0; + } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) { snapshot->buf = xmalloc(size); bytes_read = read_in_full(fd, snapshot->buf, size); if (bytes_read < 0 || bytes_read != size) die_errno("couldn't read %s", snapshot->refs->path); - snapshot->eof = snapshot->buf + size; snapshot->mmapped = 0; - break; - case MMAP_TEMPORARY: - case MMAP_OK: + } else { snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); - snapshot->eof = snapshot->buf + size; snapshot->mmapped = 1; - break; } close(fd); + snapshot->start = snapshot->buf; + snapshot->eof = snapshot->buf + size; + return 1; } @@ -515,9 +520,11 @@ static int load_contents(struct snapshot *snapshot) * `refname` starts. If `mustexist` is true and the reference doesn't * exist, then return NULL. If `mustexist` is false and the reference * doesn't exist, then return the point where that reference would be - * inserted. In the latter mode, `refname` doesn't have to be a proper - * reference name; for example, one could search for "refs/replace/" - * to find the start of any replace references. + * inserted, or `snapshot->eof` (which might be NULL) if it would be + * inserted at the end of the file. In the latter mode, `refname` + * doesn't have to be a proper reference name; for example, one could + * search for "refs/replace/" to find the start of any replace + * references. * * The record is sought using a binary search, so `snapshot->buf` must * be sorted. @@ -539,7 +546,7 @@ static const char *find_reference_location(struct snapshot *snapshot, * preceding records all have reference names that come * *before* `refname`. */ - const char *lo = snapshot->buf + snapshot->header_len; + const char *lo = snapshot->start; /* * A pointer to a the first character of a record whose @@ -547,7 +554,7 @@ static const char *find_reference_location(struct snapshot *snapshot, */ const char *hi = snapshot->eof; - while (lo < hi) { + while (lo != hi) { const char *mid, *rec; int cmp; @@ -616,9 +623,7 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs) /* If the file has a header line, process it: */ if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') { - struct strbuf tmp = STRBUF_INIT; - char *p; - const char *eol; + char *tmp, *p, *eol; struct string_list traits = STRING_LIST_INIT_NODUP; eol = memchr(snapshot->buf, '\n', @@ -628,9 +633,9 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs) snapshot->buf, snapshot->eof - snapshot->buf); - strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf); + tmp = xmemdupz(snapshot->buf, eol - snapshot->buf); - if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p)) + if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p)) die_invalid_line(refs->path, snapshot->buf, snapshot->eof - snapshot->buf); @@ -647,10 +652,10 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs) /* perhaps other traits later as well */ /* The "+ 1" is for the LF character. */ - snapshot->header_len = eol + 1 - snapshot->buf; + snapshot->start = eol + 1; string_list_clear(&traits, 0); - strbuf_release(&tmp); + free(tmp); } verify_buffer_safe(snapshot); @@ -671,13 +676,12 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs) * We don't want to leave the file mmapped, so we are * forced to make a copy now: */ - size_t size = snapshot->eof - - (snapshot->buf + snapshot->header_len); + size_t size = snapshot->eof - snapshot->start; char *buf_copy = xmalloc(size); - memcpy(buf_copy, snapshot->buf + snapshot->header_len, size); + memcpy(buf_copy, snapshot->start, size); clear_snapshot_buffer(snapshot); - snapshot->buf = buf_copy; + snapshot->buf = snapshot->start = buf_copy; snapshot->eof = buf_copy + size; } @@ -924,7 +928,12 @@ static struct ref_iterator *packed_ref_iterator_begin( */ snapshot = get_snapshot(refs); - if (!snapshot->buf) + if (prefix && *prefix) + start = find_reference_location(snapshot, prefix, 0); + else + start = snapshot->start; + + if (start == snapshot->eof) return empty_ref_iterator_begin(); iter = xcalloc(1, sizeof(*iter)); @@ -934,11 +943,6 @@ static struct ref_iterator *packed_ref_iterator_begin( iter->snapshot = snapshot; acquire_snapshot(snapshot); - if (prefix && *prefix) - start = find_reference_location(snapshot, prefix, 0); - else - start = snapshot->buf + snapshot->header_len; - iter->pos = start; iter->eof = snapshot->eof; strbuf_init(&iter->refname_buf, 0); diff --git a/refs/ref-cache.c b/refs/ref-cache.c index 82c1cf90a7..e90bd3e727 100644 --- a/refs/ref-cache.c +++ b/refs/ref-cache.c @@ -238,10 +238,8 @@ int remove_entry_from_dir(struct ref_dir *dir, const char *refname) return -1; entry = dir->entries[entry_index]; - memmove(&dir->entries[entry_index], - &dir->entries[entry_index + 1], - (dir->nr - entry_index - 1) * sizeof(*dir->entries) - ); + MOVE_ARRAY(&dir->entries[entry_index], + &dir->entries[entry_index + 1], dir->nr - entry_index - 1); dir->nr--; if (dir->sorted > entry_index) dir->sorted--; diff --git a/remote-curl.c b/remote-curl.c index 0053b09549..a7c4c9b5ff 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -13,6 +13,7 @@ #include "credential.h" #include "sha1-array.h" #include "send-pack.h" +#include "quote.h" static struct remote *remote; /* always ends with a trailing slash */ @@ -24,6 +25,7 @@ struct options { char *deepen_since; struct string_list deepen_not; struct string_list push_options; + char *filter; unsigned progress : 1, check_self_contained_and_connected : 1, cloning : 1, @@ -33,7 +35,9 @@ struct options { thin : 1, /* One of the SEND_PACK_PUSH_CERT_* constants. */ push_cert : 2, - deepen_relative : 1; + deepen_relative : 1, + from_promisor : 1, + no_dependents : 1; }; static struct options options; static struct string_list cas_options = STRING_LIST_INIT_DUP; @@ -142,7 +146,15 @@ static int set_option(const char *name, const char *value) return -1; return 0; } else if (!strcmp(name, "push-option")) { - string_list_append(&options.push_options, value); + if (*value != '"') + string_list_append(&options.push_options, value); + else { + struct strbuf unquoted = STRBUF_INIT; + if (unquote_c_style(&unquoted, value, NULL) < 0) + die("invalid quoting in push-option value"); + string_list_append_nodup(&options.push_options, + strbuf_detach(&unquoted, NULL)); + } return 0; #if LIBCURL_VERSION_NUM >= 0x070a08 @@ -157,6 +169,15 @@ static int set_option(const char *name, const char *value) return -1; return 0; #endif /* LIBCURL_VERSION_NUM >= 0x070a08 */ + } else if (!strcmp(name, "from-promisor")) { + options.from_promisor = 1; + return 0; + } else if (!strcmp(name, "no-dependents")) { + options.no_dependents = 1; + return 0; + } else if (!strcmp(name, "filter")) { + options.filter = xstrdup(value);; + return 0; } else { return 1 /* unsupported */; } @@ -339,6 +360,8 @@ static struct discovery *discover_refs(const char *service, int for_push) * pkt-line matches our request. */ line = packet_read_line_buf(&last->buf, &last->len, NULL); + if (!line) + die("invalid server response; expected service, got flush packet"); strbuf_reset(&exp); strbuf_addf(&exp, "# service=%s", service); @@ -822,6 +845,12 @@ static int fetch_git(struct discovery *heads, options.deepen_not.items[i].string); if (options.deepen_relative && options.depth) argv_array_push(&args, "--deepen-relative"); + if (options.from_promisor) + argv_array_push(&args, "--from-promisor"); + if (options.no_dependents) + argv_array_push(&args, "--no-dependents"); + if (options.filter) + argv_array_pushf(&args, "--filter=%s", options.filter); argv_array_push(&args, url.buf); for (i = 0; i < nr_heads; i++) { @@ -22,6 +22,7 @@ static struct refspec s_tag_refspec = { "refs/tags/*" }; +/* See TAG_REFSPEC for the string version */ const struct refspec *tag_refspec = &s_tag_refspec; struct counted_string { @@ -103,6 +104,17 @@ static void add_fetch_refspec(struct remote *remote, const char *ref) remote->fetch_refspec[remote->fetch_refspec_nr++] = ref; } +void add_prune_tags_to_fetch_refspec(struct remote *remote) +{ + int nr = remote->fetch_refspec_nr; + int bufsize = nr + 1; + int size = sizeof(struct refspec); + + remote->fetch = xrealloc(remote->fetch, size * bufsize); + memcpy(&remote->fetch[nr], tag_refspec, size); + add_fetch_refspec(remote, xstrdup(TAG_REFSPEC)); +} + static void add_url(struct remote *remote, const char *url) { ALLOC_GROW(remote->url, remote->url_nr + 1, remote->url_alloc); @@ -173,6 +185,7 @@ static struct remote *make_remote(const char *name, int len) ret = xcalloc(1, sizeof(struct remote)); ret->prune = -1; /* unspecified */ + ret->prune_tags = -1; /* unspecified */ ALLOC_GROW(remotes, remotes_nr + 1, remotes_alloc); remotes[remotes_nr++] = ret; ret->name = xstrndup(name, len); @@ -391,6 +404,8 @@ static int handle_config(const char *key, const char *value, void *cb) remote->skip_default_update = git_config_bool(key, value); else if (!strcmp(subkey, "prune")) remote->prune = git_config_bool(key, value); + else if (!strcmp(subkey, "prunetags")) + remote->prune_tags = git_config_bool(key, value); else if (!strcmp(subkey, "url")) { const char *v; if (git_config_string(&v, key, value)) @@ -1970,33 +1985,33 @@ static void unmark_and_free(struct commit_list *list, unsigned int mark) int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid) { struct object *o; - struct commit *old, *new; + struct commit *old_commit, *new_commit; struct commit_list *list, *used; int found = 0; /* - * Both new and old must be commit-ish and new is descendant of - * old. Otherwise we require --force. + * Both new_commit and old_commit must be commit-ish and new_commit is descendant of + * old_commit. Otherwise we require --force. */ o = deref_tag(parse_object(old_oid), NULL, 0); if (!o || o->type != OBJ_COMMIT) return 0; - old = (struct commit *) o; + old_commit = (struct commit *) o; o = deref_tag(parse_object(new_oid), NULL, 0); if (!o || o->type != OBJ_COMMIT) return 0; - new = (struct commit *) o; + new_commit = (struct commit *) o; - if (parse_commit(new) < 0) + if (parse_commit(new_commit) < 0) return 0; used = list = NULL; - commit_list_insert(new, &list); + commit_list_insert(new_commit, &list); while (list) { - new = pop_most_recent_commit(&list, TMP_MARK); - commit_list_insert(new, &used); - if (new == old) { + new_commit = pop_most_recent_commit(&list, TMP_MARK); + commit_list_insert(new_commit, &used); + if (new_commit == old_commit) { found = 1; break; } @@ -2007,16 +2022,23 @@ int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid) } /* - * Compare a branch with its upstream, and save their differences (number - * of commits) in *num_ours and *num_theirs. The name of the upstream branch - * (or NULL if no upstream is defined) is returned via *upstream_name, if it - * is not itself NULL. + * Lookup the upstream branch for the given branch and if present, optionally + * compute the commit ahead/behind values for the pair. + * + * If abf is AHEAD_BEHIND_FULL, compute the full ahead/behind and return the + * counts in *num_ours and *num_theirs. If abf is AHEAD_BEHIND_QUICK, skip + * the (potentially expensive) a/b computation (*num_ours and *num_theirs are + * set to zero). + * + * The name of the upstream branch (or NULL if no upstream is defined) is + * returned via *upstream_name, if it is not itself NULL. * * Returns -1 if num_ours and num_theirs could not be filled in (e.g., no - * upstream defined, or ref does not exist), 0 otherwise. + * upstream defined, or ref does not exist). Returns 0 if the commits are + * identical. Returns 1 if commits are different. */ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs, - const char **upstream_name) + const char **upstream_name, enum ahead_behind_flags abf) { struct object_id oid; struct commit *ours, *theirs; @@ -2044,11 +2066,15 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs, if (!ours) return -1; + *num_theirs = *num_ours = 0; + /* are we the same? */ - if (theirs == ours) { - *num_theirs = *num_ours = 0; + if (theirs == ours) return 0; - } + if (abf == AHEAD_BEHIND_QUICK) + return 1; + if (abf != AHEAD_BEHIND_FULL) + BUG("stat_tracking_info: invalid abf '%d'", abf); /* Run "rev-list --left-right ours...theirs" internally... */ argv_array_push(&argv, ""); /* ignored */ @@ -2064,8 +2090,6 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs, die("revision walk setup failed"); /* ... and count the commits on each side. */ - *num_ours = 0; - *num_theirs = 0; while (1) { struct commit *c = get_revision(&revs); if (!c) @@ -2081,20 +2105,22 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs, clear_commit_marks(theirs, ALL_REV_FLAGS); argv_array_clear(&argv); - return 0; + return 1; } /* * Return true when there is anything to report, otherwise false. */ -int format_tracking_info(struct branch *branch, struct strbuf *sb) +int format_tracking_info(struct branch *branch, struct strbuf *sb, + enum ahead_behind_flags abf) { - int ours, theirs; + int ours, theirs, sti; const char *full_base; char *base; int upstream_is_gone = 0; - if (stat_tracking_info(branch, &ours, &theirs, &full_base) < 0) { + sti = stat_tracking_info(branch, &ours, &theirs, &full_base, abf); + if (sti < 0) { if (!full_base) return 0; upstream_is_gone = 1; @@ -2108,10 +2134,17 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb) if (advice_status_hints) strbuf_addstr(sb, _(" (use \"git branch --unset-upstream\" to fixup)\n")); - } else if (!ours && !theirs) { + } else if (!sti) { strbuf_addf(sb, _("Your branch is up to date with '%s'.\n"), base); + } else if (abf == AHEAD_BEHIND_QUICK) { + strbuf_addf(sb, + _("Your branch and '%s' refer to different commits.\n"), + base); + if (advice_status_hints) + strbuf_addf(sb, _(" (use \"%s\" for details)\n"), + "git status --ahead-behind"); } else if (!theirs) { strbuf_addf(sb, Q_("Your branch is ahead of '%s' by %d commit.\n", @@ -47,6 +47,7 @@ struct remote { int skip_default_update; int mirror; int prune; + int prune_tags; const char *receivepack; const char *uploadpack; @@ -257,10 +258,18 @@ enum match_refs_flags { MATCH_REFS_FOLLOW_TAGS = (1 << 3) }; +/* Flags for --ahead-behind option. */ +enum ahead_behind_flags { + AHEAD_BEHIND_UNSPECIFIED = -1, + AHEAD_BEHIND_QUICK = 0, /* just eq/neq reporting */ + AHEAD_BEHIND_FULL = 1, /* traditional a/b reporting */ +}; + /* Reporting of tracking info */ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs, - const char **upstream_name); -int format_tracking_info(struct branch *branch, struct strbuf *sb); + const char **upstream_name, enum ahead_behind_flags abf); +int format_tracking_info(struct branch *branch, struct strbuf *sb, + enum ahead_behind_flags abf); struct ref *get_local_heads(void); /* @@ -297,4 +306,8 @@ extern int parseopt_push_cas_option(const struct option *, const char *arg, int extern int is_empty_cas(const struct push_cas_option *); void apply_push_cas(struct push_cas_option *, struct remote *, struct ref *); +#define TAG_REFSPEC "refs/tags/*:refs/tags/*" + +void add_prune_tags_to_fetch_refspec(struct remote *remote); + #endif diff --git a/replace_object.c b/replace_object.c index f0b39f06d5..3e49965d05 100644 --- a/replace_object.c +++ b/replace_object.c @@ -44,10 +44,8 @@ static int register_replace_object(struct replace_object *replace, ALLOC_GROW(replace_object, replace_object_nr + 1, replace_object_alloc); replace_object_nr++; if (pos < replace_object_nr) - memmove(replace_object + pos + 1, - replace_object + pos, - (replace_object_nr - pos - 1) * - sizeof(*replace_object)); + MOVE_ARRAY(replace_object + pos + 1, replace_object + pos, + replace_object_nr - pos - 1); replace_object[pos] = replace; return 0; } @@ -159,8 +159,8 @@ static struct rerere_dir *find_rerere_dir(const char *hex) ALLOC_GROW(rerere_dir, rerere_dir_nr + 1, rerere_dir_alloc); /* ... and add it in. */ rerere_dir_nr++; - memmove(rerere_dir + pos + 1, rerere_dir + pos, - (rerere_dir_nr - pos - 1) * sizeof(*rerere_dir)); + MOVE_ARRAY(rerere_dir + pos + 1, rerere_dir + pos, + rerere_dir_nr - pos - 1); rerere_dir[pos] = rr_dir; scan_rerere_dir(rr_dir); } @@ -719,11 +719,9 @@ static void update_paths(struct string_list *update) item->string); } - if (active_cache_changed) { - if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) - die("Unable to write new index file"); - } else - rollback_lock_file(&index_lock); + if (write_locked_index(&the_index, &index_lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) + die("Unable to write new index file"); } static void remove_variant(struct rerere_id *id) diff --git a/revision.c b/revision.c index e5e527bcf2..b42c836d7a 100644 --- a/revision.c +++ b/revision.c @@ -113,7 +113,8 @@ void mark_parents_uninteresting(struct commit *commit) * it is popped next time around, we won't be trying * to parse it and get an error. */ - if (!has_object_file(&commit->object.oid)) + if (!commit->object.parsed && + !has_object_file(&commit->object.oid)) commit->object.parsed = 1; if (commit->object.flags & UNINTERESTING) @@ -198,6 +199,8 @@ static struct object *get_reference(struct rev_info *revs, const char *name, if (!object) { if (revs->ignore_missing) return object; + if (revs->exclude_promisor_objects && is_promisor_object(oid)) + return NULL; die("bad object %s", name); } object->flags |= flags; @@ -799,9 +802,17 @@ static int add_parents_to_list(struct rev_info *revs, struct commit *commit, for (parent = commit->parents; parent; parent = parent->next) { struct commit *p = parent->item; - - if (parse_commit_gently(p, revs->ignore_missing_links) < 0) + int gently = revs->ignore_missing_links || + revs->exclude_promisor_objects; + if (parse_commit_gently(p, gently) < 0) { + if (revs->exclude_promisor_objects && + is_promisor_object(&p->object.oid)) { + if (revs->first_parent_only) + break; + continue; + } return -1; + } if (revs->show_source && !p->util) p->util = commit->util; p->object.flags |= left_flag; @@ -2072,7 +2083,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg revs->grep_filter.pattern_type_option = GREP_PATTERN_TYPE_ERE; } else if (!strcmp(arg, "--regexp-ignore-case") || !strcmp(arg, "-i")) { revs->grep_filter.ignore_case = 1; - revs->diffopt.flags.pickaxe_ignore_case = 1; + revs->diffopt.pickaxe_opts |= DIFF_PICKAXE_IGNORE_CASE; } else if (!strcmp(arg, "--fixed-strings") || !strcmp(arg, "-F")) { revs->grep_filter.pattern_type_option = GREP_PATTERN_TYPE_FIXED; } else if (!strcmp(arg, "--perl-regexp") || !strcmp(arg, "-P")) { @@ -2094,6 +2105,10 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg revs->limited = 1; } else if (!strcmp(arg, "--ignore-missing")) { revs->ignore_missing = 1; + } else if (!strcmp(arg, "--exclude-promisor-objects")) { + if (fetch_if_missing) + die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0"); + revs->exclude_promisor_objects = 1; } else { int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix); if (!opts) @@ -2403,11 +2418,14 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s revs->diff = 1; /* Pickaxe, diff-filter and rename following need diffs */ - if (revs->diffopt.pickaxe || + if ((revs->diffopt.pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) || revs->diffopt.filter || revs->diffopt.flags.follow_renames) revs->diff = 1; + if (revs->diffopt.objfind) + revs->simplify_history = 0; + if (revs->topo_order) revs->limited = 1; @@ -2836,6 +2854,16 @@ void reset_revision_walk(void) clear_object_flags(SEEN | ADDED | SHOWN); } +static int mark_uninteresting(const struct object_id *oid, + struct packed_git *pack, + uint32_t pos, + void *unused) +{ + struct object *o = parse_object(oid); + o->flags |= UNINTERESTING | SEEN; + return 0; +} + int prepare_revision_walk(struct rev_info *revs) { int i; @@ -2863,6 +2891,11 @@ int prepare_revision_walk(struct rev_info *revs) (revs->limited && limiting_can_increase_treesame(revs))) revs->treesame.name = "treesame"; + if (revs->exclude_promisor_objects) { + for_each_packed_object(mark_uninteresting, NULL, + FOR_EACH_OBJECT_PROMISOR_ONLY); + } + if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED) commit_list_sort_by_date(&revs->commits); if (revs->no_walk) diff --git a/revision.h b/revision.h index 187406b6eb..b8c47b98e2 100644 --- a/revision.h +++ b/revision.h @@ -121,7 +121,10 @@ struct rev_info { ancestry_path:1, first_parent_only:1, line_level_traverse:1, - tree_blobs_in_commit_order:1; + tree_blobs_in_commit_order:1, + + /* for internal use only */ + exclude_promisor_objects:1; /* Diff flags */ unsigned int diff:1, diff --git a/run-command.c b/run-command.c index 31fc5ea86e..a483d5904a 100644 --- a/run-command.c +++ b/run-command.c @@ -6,6 +6,7 @@ #include "thread-utils.h" #include "strbuf.h" #include "string-list.h" +#include "quote.h" void child_process_init(struct child_process *child) { @@ -556,6 +557,90 @@ static int wait_or_whine(pid_t pid, const char *argv0, int in_signal) return code; } +static void trace_add_env(struct strbuf *dst, const char *const *deltaenv) +{ + struct string_list envs = STRING_LIST_INIT_DUP; + const char *const *e; + int i; + int printed_unset = 0; + + /* Last one wins, see run-command.c:prep_childenv() for context */ + for (e = deltaenv; e && *e; e++) { + struct strbuf key = STRBUF_INIT; + char *equals = strchr(*e, '='); + + if (equals) { + strbuf_add(&key, *e, equals - *e); + string_list_insert(&envs, key.buf)->util = equals + 1; + } else { + string_list_insert(&envs, *e)->util = NULL; + } + strbuf_release(&key); + } + + /* "unset X Y...;" */ + for (i = 0; i < envs.nr; i++) { + const char *var = envs.items[i].string; + const char *val = envs.items[i].util; + + if (val || !getenv(var)) + continue; + + if (!printed_unset) { + strbuf_addstr(dst, " unset"); + printed_unset = 1; + } + strbuf_addf(dst, " %s", var); + } + if (printed_unset) + strbuf_addch(dst, ';'); + + /* ... followed by "A=B C=D ..." */ + for (i = 0; i < envs.nr; i++) { + const char *var = envs.items[i].string; + const char *val = envs.items[i].util; + const char *oldval; + + if (!val) + continue; + + oldval = getenv(var); + if (oldval && !strcmp(val, oldval)) + continue; + + strbuf_addf(dst, " %s=", var); + sq_quote_buf_pretty(dst, val); + } + string_list_clear(&envs, 0); +} + +static void trace_run_command(const struct child_process *cp) +{ + struct strbuf buf = STRBUF_INIT; + + if (!trace_want(&trace_default_key)) + return; + + strbuf_addf(&buf, "trace: run_command:"); + if (cp->dir) { + strbuf_addstr(&buf, " cd "); + sq_quote_buf_pretty(&buf, cp->dir); + strbuf_addch(&buf, ';'); + } + /* + * The caller is responsible for initializing cp->env from + * cp->env_array if needed. We only check one place. + */ + if (cp->env) + trace_add_env(&buf, cp->env); + if (cp->git_cmd) + strbuf_addstr(&buf, " git"); + sq_quote_argv_pretty(&buf, cp->argv); + + trace_printf("%s", buf.buf); + strbuf_release(&buf); +} + int start_command(struct child_process *cmd) { int need_in, need_out, need_err; @@ -624,7 +709,8 @@ fail_pipe: cmd->err = fderr[0]; } - trace_argv_printf(cmd->argv, "trace: run_command:"); + trace_run_command(cmd); + fflush(NULL); #ifndef GIT_WINDOWS_NATIVE diff --git a/send-pack.c b/send-pack.c index 2112d3b27a..8d9190f5e7 100644 --- a/send-pack.c +++ b/send-pack.c @@ -137,6 +137,8 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *extra, struc static int receive_unpack_status(int in) { const char *line = packet_read_line(in, NULL); + if (!line) + return error(_("unexpected flush packet while reading remote unpack status")); if (!skip_prefix(line, "unpack ", &line)) return error(_("unable to parse remote unpack status: %s"), line); if (strcmp(line, "ok")) diff --git a/sequencer.c b/sequencer.c index 4d3f60594c..f9d1001dee 100644 --- a/sequencer.c +++ b/sequencer.c @@ -1,10 +1,10 @@ #include "cache.h" #include "config.h" #include "lockfile.h" -#include "sequencer.h" #include "dir.h" #include "object.h" #include "commit.h" +#include "sequencer.h" #include "tag.h" #include "run-command.h" #include "exec_cmd.h" @@ -21,12 +21,16 @@ #include "log-tree.h" #include "wt-status.h" #include "hashmap.h" +#include "notes-utils.h" +#include "sigchain.h" #define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION" const char sign_off_header[] = "Signed-off-by: "; static const char cherry_picked_prefix[] = "(cherry picked from commit "; +GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG") + GIT_PATH_FUNC(git_path_seq_dir, "sequencer") static GIT_PATH_FUNC(git_path_todo_file, "sequencer/todo") @@ -130,6 +134,51 @@ static GIT_PATH_FUNC(rebase_path_strategy, "rebase-merge/strategy") static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts") static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate") +static int git_sequencer_config(const char *k, const char *v, void *cb) +{ + struct replay_opts *opts = cb; + int status; + + if (!strcmp(k, "commit.cleanup")) { + const char *s; + + status = git_config_string(&s, k, v); + if (status) + return status; + + if (!strcmp(s, "verbatim")) + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE; + else if (!strcmp(s, "whitespace")) + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE; + else if (!strcmp(s, "strip")) + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_ALL; + else if (!strcmp(s, "scissors")) + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE; + else + warning(_("invalid commit message cleanup mode '%s'"), + s); + + return status; + } + + if (!strcmp(k, "commit.gpgsign")) { + opts->gpg_sign = git_config_bool(k, v) ? xstrdup("") : NULL; + return 0; + } + + status = git_gpg_config(k, v, NULL); + if (status) + return status; + + return git_diff_basic_config(k, v, NULL); +} + +void sequencer_init_config(struct replay_opts *opts) +{ + opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE; + git_config(git_sequencer_config, opts); +} + static inline int is_rebase_i(const struct replay_opts *opts) { return opts->action == REPLAY_INTERACTIVE_REBASE; @@ -290,7 +339,7 @@ static void print_advice(int show_hint, struct replay_opts *opts) static int write_message(const void *buf, size_t len, const char *filename, int append_eol) { - static struct lock_file msg_file; + struct lock_file msg_file = LOCK_INIT; int msg_fd = hold_lock_file_for_update(&msg_file, filename, 0); if (msg_fd < 0) @@ -303,10 +352,8 @@ static int write_message(const void *buf, size_t len, const char *filename, rollback_lock_file(&msg_file); return error_errno(_("could not write eol to '%s'"), filename); } - if (commit_lock_file(&msg_file) < 0) { - rollback_lock_file(&msg_file); - return error(_("failed to finalize '%s'."), filename); - } + if (commit_lock_file(&msg_file) < 0) + return error(_("failed to finalize '%s'"), filename); return 0; } @@ -436,7 +483,7 @@ static int do_recursive_merge(struct commit *base, struct commit *next, struct tree *result, *next_tree, *base_tree, *head_tree; int clean; char **xopt; - static struct lock_file index_lock; + struct lock_file index_lock = LOCK_INIT; if (hold_locked_index(&index_lock, LOCK_REPORT_ON_ERROR) < 0) return -1; @@ -465,21 +512,19 @@ static int do_recursive_merge(struct commit *base, struct commit *next, fputs(o.obuf.buf, stdout); strbuf_release(&o.obuf); diff_warn_rename_limit("merge.renamelimit", o.needed_rename_limit, 0); - if (clean < 0) + if (clean < 0) { + rollback_lock_file(&index_lock); return clean; + } - if (active_cache_changed && - write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) + if (write_locked_index(&the_index, &index_lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) /* * TRANSLATORS: %s will be "revert", "cherry-pick" or * "rebase -i". */ return error(_("%s: Unable to write new index file"), _(action_name(opts))); - rollback_lock_file(&index_lock); - - if (opts->signoff) - append_signoff(msgbuf, 0, 0); if (!clean) append_conflicts_hint(msgbuf); @@ -596,6 +641,18 @@ static int read_env_script(struct argv_array *env) return 0; } +static char *get_author(const char *message) +{ + size_t len; + const char *a; + + a = find_commit_header(message, "author", &len); + if (a) + return xmemdupz(a, len); + + return NULL; +} + static const char staged_changes_advice[] = N_("you have staged changes in your working tree\n" "If these changes are meant to be squashed into the previous commit, run:\n" @@ -658,8 +715,6 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts, argv_array_push(&cmd.args, "--amend"); if (opts->gpg_sign) argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign); - if (opts->signoff) - argv_array_push(&cmd.args, "-s"); if (defmsg) argv_array_pushl(&cmd.args, "-F", defmsg, NULL); if ((flags & CLEANUP_MSG)) @@ -694,6 +749,461 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts, return run_command(&cmd); } +static int rest_is_empty(const struct strbuf *sb, int start) +{ + int i, eol; + const char *nl; + + /* Check if the rest is just whitespace and Signed-off-by's. */ + for (i = start; i < sb->len; i++) { + nl = memchr(sb->buf + i, '\n', sb->len - i); + if (nl) + eol = nl - sb->buf; + else + eol = sb->len; + + if (strlen(sign_off_header) <= eol - i && + starts_with(sb->buf + i, sign_off_header)) { + i = eol; + continue; + } + while (i < eol) + if (!isspace(sb->buf[i++])) + return 0; + } + + return 1; +} + +/* + * Find out if the message in the strbuf contains only whitespace and + * Signed-off-by lines. + */ +int message_is_empty(const struct strbuf *sb, + enum commit_msg_cleanup_mode cleanup_mode) +{ + if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len) + return 0; + return rest_is_empty(sb, 0); +} + +/* + * See if the user edited the message in the editor or left what + * was in the template intact + */ +int template_untouched(const struct strbuf *sb, const char *template_file, + enum commit_msg_cleanup_mode cleanup_mode) +{ + struct strbuf tmpl = STRBUF_INIT; + const char *start; + + if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len) + return 0; + + if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0) + return 0; + + strbuf_stripspace(&tmpl, cleanup_mode == COMMIT_MSG_CLEANUP_ALL); + if (!skip_prefix(sb->buf, tmpl.buf, &start)) + start = sb->buf; + strbuf_release(&tmpl); + return rest_is_empty(sb, start - sb->buf); +} + +int update_head_with_reflog(const struct commit *old_head, + const struct object_id *new_head, + const char *action, const struct strbuf *msg, + struct strbuf *err) +{ + struct ref_transaction *transaction; + struct strbuf sb = STRBUF_INIT; + const char *nl; + int ret = 0; + + if (action) { + strbuf_addstr(&sb, action); + strbuf_addstr(&sb, ": "); + } + + nl = strchr(msg->buf, '\n'); + if (nl) { + strbuf_add(&sb, msg->buf, nl + 1 - msg->buf); + } else { + strbuf_addbuf(&sb, msg); + strbuf_addch(&sb, '\n'); + } + + transaction = ref_transaction_begin(err); + if (!transaction || + ref_transaction_update(transaction, "HEAD", new_head, + old_head ? &old_head->object.oid : &null_oid, + 0, sb.buf, err) || + ref_transaction_commit(transaction, err)) { + ret = -1; + } + ref_transaction_free(transaction); + strbuf_release(&sb); + + return ret; +} + +static int run_rewrite_hook(const struct object_id *oldoid, + const struct object_id *newoid) +{ + struct child_process proc = CHILD_PROCESS_INIT; + const char *argv[3]; + int code; + struct strbuf sb = STRBUF_INIT; + + argv[0] = find_hook("post-rewrite"); + if (!argv[0]) + return 0; + + argv[1] = "amend"; + argv[2] = NULL; + + proc.argv = argv; + proc.in = -1; + proc.stdout_to_stderr = 1; + + code = start_command(&proc); + if (code) + return code; + strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid)); + sigchain_push(SIGPIPE, SIG_IGN); + write_in_full(proc.in, sb.buf, sb.len); + close(proc.in); + strbuf_release(&sb); + sigchain_pop(SIGPIPE); + return finish_command(&proc); +} + +void commit_post_rewrite(const struct commit *old_head, + const struct object_id *new_head) +{ + struct notes_rewrite_cfg *cfg; + + cfg = init_copy_notes_for_rewrite("amend"); + if (cfg) { + /* we are amending, so old_head is not NULL */ + copy_note_for_rewrite(cfg, &old_head->object.oid, new_head); + finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'"); + } + run_rewrite_hook(&old_head->object.oid, new_head); +} + +static int run_prepare_commit_msg_hook(struct strbuf *msg, const char *commit) +{ + struct argv_array hook_env = ARGV_ARRAY_INIT; + int ret; + const char *name; + + name = git_path_commit_editmsg(); + if (write_message(msg->buf, msg->len, name, 0)) + return -1; + + argv_array_pushf(&hook_env, "GIT_INDEX_FILE=%s", get_index_file()); + argv_array_push(&hook_env, "GIT_EDITOR=:"); + if (commit) + ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name, + "commit", commit, NULL); + else + ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name, + "message", NULL); + if (ret) + ret = error(_("'prepare-commit-msg' hook failed")); + argv_array_clear(&hook_env); + + return ret; +} + +static const char implicit_ident_advice_noconfig[] = +N_("Your name and email address were configured automatically based\n" +"on your username and hostname. Please check that they are accurate.\n" +"You can suppress this message by setting them explicitly. Run the\n" +"following command and follow the instructions in your editor to edit\n" +"your configuration file:\n" +"\n" +" git config --global --edit\n" +"\n" +"After doing this, you may fix the identity used for this commit with:\n" +"\n" +" git commit --amend --reset-author\n"); + +static const char implicit_ident_advice_config[] = +N_("Your name and email address were configured automatically based\n" +"on your username and hostname. Please check that they are accurate.\n" +"You can suppress this message by setting them explicitly:\n" +"\n" +" git config --global user.name \"Your Name\"\n" +" git config --global user.email you@example.com\n" +"\n" +"After doing this, you may fix the identity used for this commit with:\n" +"\n" +" git commit --amend --reset-author\n"); + +static const char *implicit_ident_advice(void) +{ + char *user_config = expand_user_path("~/.gitconfig", 0); + char *xdg_config = xdg_config_home("config"); + int config_exists = file_exists(user_config) || file_exists(xdg_config); + + free(user_config); + free(xdg_config); + + if (config_exists) + return _(implicit_ident_advice_config); + else + return _(implicit_ident_advice_noconfig); + +} + +void print_commit_summary(const char *prefix, const struct object_id *oid, + unsigned int flags) +{ + struct rev_info rev; + struct commit *commit; + struct strbuf format = STRBUF_INIT; + const char *head; + struct pretty_print_context pctx = {0}; + struct strbuf author_ident = STRBUF_INIT; + struct strbuf committer_ident = STRBUF_INIT; + + commit = lookup_commit(oid); + if (!commit) + die(_("couldn't look up newly created commit")); + if (parse_commit(commit)) + die(_("could not parse newly created commit")); + + strbuf_addstr(&format, "format:%h] %s"); + + format_commit_message(commit, "%an <%ae>", &author_ident, &pctx); + format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx); + if (strbuf_cmp(&author_ident, &committer_ident)) { + strbuf_addstr(&format, "\n Author: "); + strbuf_addbuf_percentquote(&format, &author_ident); + } + if (flags & SUMMARY_SHOW_AUTHOR_DATE) { + struct strbuf date = STRBUF_INIT; + + format_commit_message(commit, "%ad", &date, &pctx); + strbuf_addstr(&format, "\n Date: "); + strbuf_addbuf_percentquote(&format, &date); + strbuf_release(&date); + } + if (!committer_ident_sufficiently_given()) { + strbuf_addstr(&format, "\n Committer: "); + strbuf_addbuf_percentquote(&format, &committer_ident); + if (advice_implicit_identity) { + strbuf_addch(&format, '\n'); + strbuf_addstr(&format, implicit_ident_advice()); + } + } + strbuf_release(&author_ident); + strbuf_release(&committer_ident); + + init_revisions(&rev, prefix); + setup_revisions(0, NULL, &rev, NULL); + + rev.diff = 1; + rev.diffopt.output_format = + DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY; + + rev.verbose_header = 1; + rev.show_root_diff = 1; + get_commit_format(format.buf, &rev); + rev.always_show_header = 0; + rev.diffopt.detect_rename = DIFF_DETECT_RENAME; + rev.diffopt.break_opt = 0; + diff_setup_done(&rev.diffopt); + + head = resolve_ref_unsafe("HEAD", 0, NULL, NULL); + if (!head) + die_errno(_("unable to resolve HEAD after creating commit")); + if (!strcmp(head, "HEAD")) + head = _("detached HEAD"); + else + skip_prefix(head, "refs/heads/", &head); + printf("[%s%s ", head, (flags & SUMMARY_INITIAL_COMMIT) ? + _(" (root-commit)") : ""); + + if (!log_tree_commit(&rev, commit)) { + rev.always_show_header = 1; + rev.use_terminator = 1; + log_tree_commit(&rev, commit); + } + + strbuf_release(&format); +} + +static int parse_head(struct commit **head) +{ + struct commit *current_head; + struct object_id oid; + + if (get_oid("HEAD", &oid)) { + current_head = NULL; + } else { + current_head = lookup_commit_reference(&oid); + if (!current_head) + return error(_("could not parse HEAD")); + if (oidcmp(&oid, ¤t_head->object.oid)) { + warning(_("HEAD %s is not a commit!"), + oid_to_hex(&oid)); + } + if (parse_commit(current_head)) + return error(_("could not parse HEAD commit")); + } + *head = current_head; + + return 0; +} + +/* + * Try to commit without forking 'git commit'. In some cases we need + * to run 'git commit' to display an error message + * + * Returns: + * -1 - error unable to commit + * 0 - success + * 1 - run 'git commit' + */ +static int try_to_commit(struct strbuf *msg, const char *author, + struct replay_opts *opts, unsigned int flags, + struct object_id *oid) +{ + struct object_id tree; + struct commit *current_head; + struct commit_list *parents = NULL; + struct commit_extra_header *extra = NULL; + struct strbuf err = STRBUF_INIT; + struct strbuf commit_msg = STRBUF_INIT; + char *amend_author = NULL; + const char *hook_commit = NULL; + enum commit_msg_cleanup_mode cleanup; + int res = 0; + + if (parse_head(¤t_head)) + return -1; + + if (flags & AMEND_MSG) { + const char *exclude_gpgsig[] = { "gpgsig", NULL }; + const char *out_enc = get_commit_output_encoding(); + const char *message = logmsg_reencode(current_head, NULL, + out_enc); + + if (!msg) { + const char *orig_message = NULL; + + find_commit_subject(message, &orig_message); + msg = &commit_msg; + strbuf_addstr(msg, orig_message); + hook_commit = "HEAD"; + } + author = amend_author = get_author(message); + unuse_commit_buffer(current_head, message); + if (!author) { + res = error(_("unable to parse commit author")); + goto out; + } + parents = copy_commit_list(current_head->parents); + extra = read_commit_extra_headers(current_head, exclude_gpgsig); + } else if (current_head) { + commit_list_insert(current_head, &parents); + } + + if (write_cache_as_tree(tree.hash, 0, NULL)) { + res = error(_("git write-tree failed to write a tree")); + goto out; + } + + if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ? + ¤t_head->tree->object.oid : + &empty_tree_oid, &tree)) { + res = 1; /* run 'git commit' to display error message */ + goto out; + } + + if (find_hook("prepare-commit-msg")) { + res = run_prepare_commit_msg_hook(msg, hook_commit); + if (res) + goto out; + if (strbuf_read_file(&commit_msg, git_path_commit_editmsg(), + 2048) < 0) { + res = error_errno(_("unable to read commit message " + "from '%s'"), + git_path_commit_editmsg()); + goto out; + } + msg = &commit_msg; + } + + cleanup = (flags & CLEANUP_MSG) ? COMMIT_MSG_CLEANUP_ALL : + opts->default_msg_cleanup; + + if (cleanup != COMMIT_MSG_CLEANUP_NONE) + strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL); + if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) { + res = 1; /* run 'git commit' to display error message */ + goto out; + } + + if (commit_tree_extended(msg->buf, msg->len, &tree, parents, + oid, author, opts->gpg_sign, extra)) { + res = error(_("failed to write commit object")); + goto out; + } + + if (update_head_with_reflog(current_head, oid, + getenv("GIT_REFLOG_ACTION"), msg, &err)) { + res = error("%s", err.buf); + goto out; + } + + if (flags & AMEND_MSG) + commit_post_rewrite(current_head, oid); + +out: + free_commit_extra_headers(extra); + strbuf_release(&err); + strbuf_release(&commit_msg); + free(amend_author); + + return res; +} + +static int do_commit(const char *msg_file, const char *author, + struct replay_opts *opts, unsigned int flags) +{ + int res = 1; + + if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) { + struct object_id oid; + struct strbuf sb = STRBUF_INIT; + + if (msg_file && strbuf_read_file(&sb, msg_file, 2048) < 0) + return error_errno(_("unable to read commit message " + "from '%s'"), + msg_file); + + res = try_to_commit(msg_file ? &sb : NULL, author, opts, flags, + &oid); + strbuf_release(&sb); + if (!res) { + unlink(git_path_cherry_pick_head()); + unlink(git_path_merge_msg()); + if (!is_rebase_i(opts)) + print_commit_summary(NULL, &oid, + SUMMARY_SHOW_AUTHOR_DATE); + return res; + } + } + if (res == 1) + return run_git_commit(msg_file, opts, flags); + + return res; +} + static int is_original_commit_empty(struct commit *commit) { const struct object_id *ptree_oid; @@ -952,6 +1462,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, struct object_id head; struct commit *base, *next, *parent; const char *base_label, *next_label; + char *author = NULL; struct commit_message msg = { NULL, NULL, NULL, NULL }; struct strbuf msgbuf = STRBUF_INIT; int res, unborn = 0, allow; @@ -1066,6 +1577,8 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, strbuf_addstr(&msgbuf, oid_to_hex(&commit->object.oid)); strbuf_addstr(&msgbuf, ")\n"); } + if (!is_fixup(command)) + author = get_author(msg.message); } if (command == TODO_REWORD) @@ -1091,6 +1604,9 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, } } + if (opts->signoff) + append_signoff(&msgbuf, 0, 0); + if (is_rebase_i(opts) && write_author_script(msg.message) < 0) res = -1; else if (!opts->strategy || !strcmp(opts->strategy, "recursive") || command == TODO_REVERT) { @@ -1148,9 +1664,13 @@ static int do_pick_commit(enum todo_command command, struct commit *commit, goto leave; } else if (allow) flags |= ALLOW_EMPTY; - if (!opts->no_commit) + if (!opts->no_commit) { fast_forward_edit: - res = run_git_commit(msg_file, opts, flags); + if (author || command == TODO_REVERT || (flags & AMEND_MSG)) + res = do_commit(msg_file, author, opts, flags); + else + res = error(_("unable to parse commit author")); + } if (!res && final_fixup) { unlink(rebase_path_fixup_msg()); @@ -1159,6 +1679,7 @@ fast_forward_edit: leave: free_message(commit, &msg); + free(author); update_abort_safety_file(); return res; @@ -1183,7 +1704,7 @@ static int prepare_revs(struct replay_opts *opts) static int read_and_refresh_cache(struct replay_opts *opts) { - static struct lock_file index_lock; + struct lock_file index_lock = LOCK_INIT; int index_fd = hold_locked_index(&index_lock, 0); if (read_index_preload(&the_index, NULL) < 0) { rollback_lock_file(&index_lock); @@ -1191,13 +1712,13 @@ static int read_and_refresh_cache(struct replay_opts *opts) _(action_name(opts))); } refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL); - if (the_index.cache_changed && index_fd >= 0) { - if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) { + if (index_fd >= 0) { + if (write_locked_index(&the_index, &index_lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) { return error(_("git %s: failed to refresh the index"), _(action_name(opts))); } } - rollback_lock_file(&index_lock); return 0; } @@ -1347,22 +1868,31 @@ static int count_commands(struct todo_list *todo_list) return count; } +static ssize_t strbuf_read_file_or_whine(struct strbuf *sb, const char *path) +{ + int fd; + ssize_t len; + + fd = open(path, O_RDONLY); + if (fd < 0) + return error_errno(_("could not open '%s'"), path); + len = strbuf_read(sb, fd, 0); + close(fd); + if (len < 0) + return error(_("could not read '%s'."), path); + return len; +} + static int read_populate_todo(struct todo_list *todo_list, struct replay_opts *opts) { struct stat st; const char *todo_file = get_todo_path(opts); - int fd, res; + int res; strbuf_reset(&todo_list->buf); - fd = open(todo_file, O_RDONLY); - if (fd < 0) - return error_errno(_("could not open '%s'"), todo_file); - if (strbuf_read(&todo_list->buf, fd, 0) < 0) { - close(fd); - return error(_("could not read '%s'."), todo_file); - } - close(fd); + if (strbuf_read_file_or_whine(&todo_list->buf, todo_file) < 0) + return -1; res = stat(todo_file, &st); if (res) @@ -1577,16 +2107,14 @@ static int create_seq_dir(void) static int save_head(const char *head) { - static struct lock_file head_lock; + struct lock_file head_lock = LOCK_INIT; struct strbuf buf = STRBUF_INIT; int fd; ssize_t written; fd = hold_lock_file_for_update(&head_lock, git_path_head_file(), 0); - if (fd < 0) { - rollback_lock_file(&head_lock); + if (fd < 0) return error_errno(_("could not lock HEAD")); - } strbuf_addf(&buf, "%s\n", head); written = write_in_full(fd, buf.buf, buf.len); strbuf_release(&buf); @@ -1595,10 +2123,8 @@ static int save_head(const char *head) return error_errno(_("could not write to '%s'"), git_path_head_file()); } - if (commit_lock_file(&head_lock) < 0) { - rollback_lock_file(&head_lock); - return error(_("failed to finalize '%s'."), git_path_head_file()); - } + if (commit_lock_file(&head_lock) < 0) + return error(_("failed to finalize '%s'"), git_path_head_file()); return 0; } @@ -1702,7 +2228,7 @@ fail: static int save_todo(struct todo_list *todo_list, struct replay_opts *opts) { - static struct lock_file todo_lock; + struct lock_file todo_lock = LOCK_INIT; const char *todo_path = get_todo_path(opts); int next = todo_list->current, offset, fd; @@ -1722,7 +2248,7 @@ static int save_todo(struct todo_list *todo_list, struct replay_opts *opts) todo_list->buf.len - offset) < 0) return error_errno(_("could not write to '%s'"), todo_path); if (commit_lock_file(&todo_lock) < 0) - return error(_("failed to finalize '%s'."), todo_path); + return error(_("failed to finalize '%s'"), todo_path); if (is_rebase_i(opts)) { const char *done_path = rebase_path_done(); @@ -1792,6 +2318,9 @@ static int make_patch(struct commit *commit, struct replay_opts *opts) p = short_commit_name(commit); if (write_message(p, strlen(p), rebase_path_stopped_sha(), 1) < 0) return -1; + if (update_ref("rebase", "REBASE_HEAD", &commit->object.oid, + NULL, REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR)) + res |= error(_("could not update %s"), "REBASE_HEAD"); strbuf_addf(&buf, "%s/patch", get_dir(opts)); memset(&log_tree_opt, 0, sizeof(log_tree_opt)); @@ -2043,6 +2572,7 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts) unlink(rebase_path_author_script()); unlink(rebase_path_stopped_sha()); unlink(rebase_path_amend()); + delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); } if (item->command <= TODO_SQUASH) { if (is_rebase_i(opts)) @@ -2348,7 +2878,7 @@ int sequencer_pick_revisions(struct replay_opts *opts) if (!lookup_commit_reference_gently(&oid, 1)) { enum object_type type = sha1_object_info(oid.hash, NULL); return error(_("%s: can't cherry-pick a %s"), - name, typename(type)); + name, type_name(type)); } } else return error(_("%s: bad revision"), name); @@ -2629,20 +3159,13 @@ int check_todo_list(void) struct strbuf todo_file = STRBUF_INIT; struct todo_list todo_list = TODO_LIST_INIT; struct strbuf missing = STRBUF_INIT; - int advise_to_edit_todo = 0, res = 0, fd, i; + int advise_to_edit_todo = 0, res = 0, i; strbuf_addstr(&todo_file, rebase_path_todo()); - fd = open(todo_file.buf, O_RDONLY); - if (fd < 0) { - res = error_errno(_("could not open '%s'"), todo_file.buf); - goto leave_check; - } - if (strbuf_read(&todo_list.buf, fd, 0) < 0) { - close(fd); - res = error(_("could not read '%s'."), todo_file.buf); + if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) { + res = -1; goto leave_check; } - close(fd); advise_to_edit_todo = res = parse_insn_buffer(todo_list.buf.buf, &todo_list); @@ -2658,17 +3181,10 @@ int check_todo_list(void) todo_list_release(&todo_list); strbuf_addstr(&todo_file, ".backup"); - fd = open(todo_file.buf, O_RDONLY); - if (fd < 0) { - res = error_errno(_("could not open '%s'"), todo_file.buf); - goto leave_check; - } - if (strbuf_read(&todo_list.buf, fd, 0) < 0) { - close(fd); - res = error(_("could not read '%s'."), todo_file.buf); + if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) { + res = -1; goto leave_check; } - close(fd); strbuf_release(&todo_file); res = !!parse_insn_buffer(todo_list.buf.buf, &todo_list); @@ -2749,15 +3265,8 @@ int skip_unnecessary_picks(void) } strbuf_release(&buf); - fd = open(todo_file, O_RDONLY); - if (fd < 0) { - return error_errno(_("could not open '%s'"), todo_file); - } - if (strbuf_read(&todo_list.buf, fd, 0) < 0) { - close(fd); - return error(_("could not read '%s'."), todo_file); - } - close(fd); + if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0) + return -1; if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) { todo_list_release(&todo_list); return -1; @@ -2848,17 +3357,11 @@ int rearrange_squash(void) const char *todo_file = rebase_path_todo(); struct todo_list todo_list = TODO_LIST_INIT; struct hashmap subject2item; - int res = 0, rearranged = 0, *next, *tail, fd, i; + int res = 0, rearranged = 0, *next, *tail, i; char **subjects; - fd = open(todo_file, O_RDONLY); - if (fd < 0) - return error_errno(_("could not open '%s'"), todo_file); - if (strbuf_read(&todo_list.buf, fd, 0) < 0) { - close(fd); - return error(_("could not read '%s'."), todo_file); - } - close(fd); + if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0) + return -1; if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) { todo_list_release(&todo_list); return -1; diff --git a/sequencer.h b/sequencer.h index 81f6d7d393..e45b178dfc 100644 --- a/sequencer.h +++ b/sequencer.h @@ -1,6 +1,7 @@ #ifndef SEQUENCER_H #define SEQUENCER_H +const char *git_path_commit_editmsg(void); const char *git_path_seq_dir(void); #define APPEND_SIGNOFF_DEDUP (1u << 0) @@ -11,6 +12,13 @@ enum replay_action { REPLAY_INTERACTIVE_REBASE }; +enum commit_msg_cleanup_mode { + COMMIT_MSG_CLEANUP_SPACE, + COMMIT_MSG_CLEANUP_NONE, + COMMIT_MSG_CLEANUP_SCISSORS, + COMMIT_MSG_CLEANUP_ALL +}; + struct replay_opts { enum replay_action action; @@ -29,6 +37,7 @@ struct replay_opts { int mainline; char *gpg_sign; + enum commit_msg_cleanup_mode default_msg_cleanup; /* Merge strategy */ char *strategy; @@ -40,6 +49,8 @@ struct replay_opts { }; #define REPLAY_OPTS_INIT { -1 } +/* Call this to setup defaults before parsing command line options */ +void sequencer_init_config(struct replay_opts *opts); int sequencer_pick_revisions(struct replay_opts *opts); int sequencer_continue(struct replay_opts *opts); int sequencer_rollback(struct replay_opts *opts); @@ -61,5 +72,19 @@ extern const char sign_off_header[]; void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag); void append_conflicts_hint(struct strbuf *msgbuf); +int message_is_empty(const struct strbuf *sb, + enum commit_msg_cleanup_mode cleanup_mode); +int template_untouched(const struct strbuf *sb, const char *template_file, + enum commit_msg_cleanup_mode cleanup_mode); +int update_head_with_reflog(const struct commit *old_head, + const struct object_id *new_head, + const char *action, const struct strbuf *msg, + struct strbuf *err); +void commit_post_rewrite(const struct commit *current_head, + const struct object_id *new_head); +#define SUMMARY_INITIAL_COMMIT (1 << 0) +#define SUMMARY_SHOW_AUTHOR_DATE (1 << 1) +void print_commit_summary(const char *prefix, const struct object_id *oid, + unsigned int flags); #endif @@ -119,7 +119,7 @@ char *prefix_path(const char *prefix, int len, const char *path) { char *r = prefix_path_gently(prefix, len, NULL, path); if (!r) - die("'%s' is outside repository", path); + die(_("'%s' is outside repository"), path); return r; } @@ -160,7 +160,7 @@ int check_filename(const char *prefix, const char *arg) free(to_free); return 0; /* file does not exist */ } - die_errno("failed to stat '%s'", arg); + die_errno(_("failed to stat '%s'"), arg); } static void NORETURN die_verify_filename(const char *prefix, @@ -230,7 +230,7 @@ void verify_filename(const char *prefix, int diagnose_misspelt_rev) { if (*arg == '-') - die("option '%s' must come before non-option arguments", arg); + die(_("option '%s' must come before non-option arguments"), arg); if (looks_like_pathspec(arg) || check_filename(prefix, arg)) return; die_verify_filename(prefix, arg, diagnose_misspelt_rev); @@ -385,14 +385,14 @@ void setup_work_tree(void) return; if (work_tree_config_is_bogus) - die("unable to set up work tree using invalid config"); + die(_("unable to set up work tree using invalid config")); work_tree = get_git_work_tree(); git_dir = get_git_dir(); if (!is_absolute_path(git_dir)) git_dir = real_path(get_git_dir()); if (!work_tree || chdir(work_tree)) - die("This operation must be run in a work tree"); + die(_("this operation must be run in a work tree")); /* * Make sure subsequent git processes find correct worktree @@ -422,7 +422,11 @@ static int check_repo_format(const char *var, const char *value, void *vdata) ; else if (!strcmp(ext, "preciousobjects")) data->precious_objects = git_config_bool(var, value); - else + else if (!strcmp(ext, "partialclone")) { + if (!value) + return config_error_nonbool(var); + data->partial_clone = xstrdup(value); + } else string_list_append(&data->unknown_extensions, ext); } else if (strcmp(var, "core.bare") == 0) { data->is_bare = git_config_bool(var, value); @@ -464,6 +468,7 @@ static int check_repository_format_gently(const char *gitdir, struct repository_ } repository_format_precious_objects = candidate->precious_objects; + repository_format_partial_clone = candidate->partial_clone; string_list_clear(&candidate->unknown_extensions, 0); if (!has_common) { if (candidate->is_bare != -1) { @@ -525,17 +530,17 @@ void read_gitfile_error_die(int error_code, const char *path, const char *dir) /* non-fatal; follow return path */ break; case READ_GITFILE_ERR_OPEN_FAILED: - die_errno("Error opening '%s'", path); + die_errno(_("error opening '%s'"), path); case READ_GITFILE_ERR_TOO_LARGE: - die("Too large to be a .git file: '%s'", path); + die(_("too large to be a .git file: '%s'"), path); case READ_GITFILE_ERR_READ_FAILED: - die("Error reading %s", path); + die(_("error reading %s"), path); case READ_GITFILE_ERR_INVALID_FORMAT: - die("Invalid gitfile format: %s", path); + die(_("invalid gitfile format: %s"), path); case READ_GITFILE_ERR_NO_PATH: - die("No path in gitfile: %s", path); + die(_("no path in gitfile: %s"), path); case READ_GITFILE_ERR_NOT_A_REPO: - die("Not a git repository: %s", dir); + die(_("not a git repository: %s"), dir); default: die("BUG: unknown error code"); } @@ -634,7 +639,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv, int offset; if (PATH_MAX - 40 < strlen(gitdirenv)) - die("'$%s' too big", GIT_DIR_ENVIRONMENT); + die(_("'$%s' too big"), GIT_DIR_ENVIRONMENT); gitfile = (char*)read_gitfile(gitdirenv); if (gitfile) { @@ -648,7 +653,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv, free(gitfile); return NULL; } - die("Not a git repository: '%s'", gitdirenv); + die(_("not a git repository: '%s'"), gitdirenv); } if (check_repository_format_gently(gitdirenv, repo_fmt, nongit_ok)) { @@ -677,12 +682,12 @@ static const char *setup_explicit_git_dir(const char *gitdirenv, else { char *core_worktree; if (chdir(gitdirenv)) - die_errno("Could not chdir to '%s'", gitdirenv); + die_errno(_("cannot chdir to '%s'"), gitdirenv); if (chdir(git_work_tree_cfg)) - die_errno("Could not chdir to '%s'", git_work_tree_cfg); + die_errno(_("cannot chdir to '%s'"), git_work_tree_cfg); core_worktree = xgetcwd(); if (chdir(cwd->buf)) - die_errno("Could not come back to cwd"); + die_errno(_("cannot come back to cwd")); set_git_work_tree(core_worktree); free(core_worktree); } @@ -710,7 +715,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv, if (offset >= 0) { /* cwd inside worktree? */ set_git_dir(real_path(gitdirenv)); if (chdir(worktree)) - die_errno("Could not chdir to '%s'", worktree); + die_errno(_("cannot chdir to '%s'"), worktree); strbuf_addch(cwd, '/'); free(gitfile); return cwd->buf + offset; @@ -738,7 +743,7 @@ static const char *setup_discovered_git_dir(const char *gitdir, if (offset != cwd->len && !is_absolute_path(gitdir)) gitdir = to_free = real_pathdup(gitdir, 1); if (chdir(cwd->buf)) - die_errno("Could not come back to cwd"); + die_errno(_("cannot come back to cwd")); ret = setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok); free(to_free); return ret; @@ -748,7 +753,7 @@ static const char *setup_discovered_git_dir(const char *gitdir, if (is_bare_repository_cfg > 0) { set_git_dir(offset == cwd->len ? gitdir : real_path(gitdir)); if (chdir(cwd->buf)) - die_errno("Could not come back to cwd"); + die_errno(_("cannot come back to cwd")); return NULL; } @@ -787,7 +792,7 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset, gitdir = offset == cwd->len ? "." : xmemdupz(cwd->buf, offset); if (chdir(cwd->buf)) - die_errno("Could not come back to cwd"); + die_errno(_("cannot come back to cwd")); return setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok); } @@ -795,7 +800,7 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset, inside_work_tree = 0; if (offset != cwd->len) { if (chdir(cwd->buf)) - die_errno("Cannot come back to cwd"); + die_errno(_("cannot come back to cwd")); root_len = offset_1st_component(cwd->buf); strbuf_setlen(cwd, offset > root_len ? offset : root_len); set_git_dir(cwd->buf); @@ -808,9 +813,9 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset, static const char *setup_nongit(const char *cwd, int *nongit_ok) { if (!nongit_ok) - die(_("Not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT); + die(_("not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT); if (chdir(cwd)) - die_errno(_("Cannot come back to cwd")); + die_errno(_("cannot come back to cwd")); *nongit_ok = 1; return NULL; } @@ -819,7 +824,7 @@ static dev_t get_device_or_die(const char *path, const char *prefix, int prefix_ { struct stat buf; if (stat(path, &buf)) { - die_errno("failed to stat '%*s%s%s'", + die_errno(_("failed to stat '%*s%s%s'"), prefix_len, prefix ? prefix : "", prefix ? "/" : "", path); @@ -1061,13 +1066,13 @@ const char *setup_git_directory_gently(int *nongit_ok) break; case GIT_DIR_DISCOVERED: if (dir.len < cwd.len && chdir(dir.buf)) - die(_("Cannot change to '%s'"), dir.buf); + die(_("cannot change to '%s'"), dir.buf); prefix = setup_discovered_git_dir(gitdir.buf, &cwd, dir.len, &repo_fmt, nongit_ok); break; case GIT_DIR_BARE: if (dir.len < cwd.len && chdir(dir.buf)) - die(_("Cannot change to '%s'"), dir.buf); + die(_("cannot change to '%s'"), dir.buf); prefix = setup_bare_git_dir(&cwd, dir.len, &repo_fmt, nongit_ok); break; case GIT_DIR_HIT_CEILING: @@ -1080,7 +1085,7 @@ const char *setup_git_directory_gently(int *nongit_ok) strbuf_release(&dir); return NULL; } - die(_("Not a git repository (or any parent up to mount point %s)\n" + die(_("not a git repository (or any parent up to mount point %s)\n" "Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)."), dir.buf); default: @@ -1164,7 +1169,7 @@ int git_config_perm(const char *var, const char *value) /* A filemode value was given: 0xxx */ if ((i & 0600) != 0600) - die(_("Problem with core.sharedRepository filemode value " + die(_("problem with core.sharedRepository filemode value " "(0%.3o).\nThe owner of files must always have " "read and write permissions."), i); @@ -1207,7 +1212,7 @@ void sanitize_stdfds(void) while (fd != -1 && fd < 2) fd = dup(fd); if (fd == -1) - die_errno("open /dev/null or dup failed"); + die_errno(_("open /dev/null or dup failed")); if (fd > 2) close(fd); } @@ -1222,12 +1227,12 @@ int daemonize(void) case 0: break; case -1: - die_errno("fork failed"); + die_errno(_("fork failed")); default: exit(0); } if (setsid() == -1) - die_errno("setsid failed"); + die_errno(_("setsid failed")); close(0); close(1); close(2); diff --git a/sha1-lookup.c b/sha1-lookup.c index 4cf3ebd921..8d0b1db3e2 100644 --- a/sha1-lookup.c +++ b/sha1-lookup.c @@ -99,3 +99,31 @@ int sha1_pos(const unsigned char *sha1, void *table, size_t nr, } while (lo < hi); return -lo-1; } + +int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo, + const unsigned char *table, size_t stride, uint32_t *result) +{ + uint32_t hi, lo; + + hi = ntohl(fanout_nbo[*sha1]); + lo = ((*sha1 == 0x0) ? 0 : ntohl(fanout_nbo[*sha1 - 1])); + + while (lo < hi) { + unsigned mi = lo + (hi - lo) / 2; + int cmp = hashcmp(table + mi * stride, sha1); + + if (!cmp) { + if (result) + *result = mi; + return 1; + } + if (cmp > 0) + hi = mi; + else + lo = mi + 1; + } + + if (result) + *result = lo; + return 0; +} diff --git a/sha1-lookup.h b/sha1-lookup.h index cf5314f402..7678b23b36 100644 --- a/sha1-lookup.h +++ b/sha1-lookup.h @@ -7,4 +7,26 @@ extern int sha1_pos(const unsigned char *sha1, void *table, size_t nr, sha1_access_fn fn); + +/* + * Searches for sha1 in table, using the given fanout table to determine the + * interval to search, then using binary search. Returns 1 if found, 0 if not. + * + * Takes the following parameters: + * + * - sha1: the hash to search for + * - fanout_nbo: a 256-element array of NETWORK-order 32-bit integers; the + * integer at position i represents the number of elements in table whose + * first byte is less than or equal to i + * - table: a sorted list of hashes with optional extra information in between + * - stride: distance between two consecutive elements in table (should be + * GIT_MAX_RAWSZ or greater) + * - result: if not NULL, this function stores the element index of the + * position found (if the search is successful) or the index of the least + * element that is greater than sha1 (if the search is not successful) + * + * This function does not verify the validity of the fanout table. + */ +int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo, + const unsigned char *table, size_t stride, uint32_t *result); #endif diff --git a/sha1_file.c b/sha1_file.c index 3da70ac650..cc0f43ea84 100644 --- a/sha1_file.c +++ b/sha1_file.c @@ -24,11 +24,11 @@ #include "bulk-checkin.h" #include "streaming.h" #include "dir.h" -#include "mru.h" #include "list.h" #include "mergesort.h" #include "quote.h" #include "packfile.h" +#include "fetch-object.h" const unsigned char null_sha1[GIT_MAX_RAWSZ]; const struct object_id null_oid; @@ -39,32 +39,32 @@ const struct object_id empty_blob_oid = { EMPTY_BLOB_SHA1_BIN_LITERAL }; -static void git_hash_sha1_init(void *ctx) +static void git_hash_sha1_init(git_hash_ctx *ctx) { - git_SHA1_Init((git_SHA_CTX *)ctx); + git_SHA1_Init(&ctx->sha1); } -static void git_hash_sha1_update(void *ctx, const void *data, size_t len) +static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len) { - git_SHA1_Update((git_SHA_CTX *)ctx, data, len); + git_SHA1_Update(&ctx->sha1, data, len); } -static void git_hash_sha1_final(unsigned char *hash, void *ctx) +static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx) { - git_SHA1_Final(hash, (git_SHA_CTX *)ctx); + git_SHA1_Final(hash, &ctx->sha1); } -static void git_hash_unknown_init(void *ctx) +static void git_hash_unknown_init(git_hash_ctx *ctx) { die("trying to init unknown hash"); } -static void git_hash_unknown_update(void *ctx, const void *data, size_t len) +static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len) { die("trying to update unknown hash"); } -static void git_hash_unknown_final(unsigned char *hash, void *ctx) +static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx) { die("trying to finalize unknown hash"); } @@ -75,7 +75,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = { 0x00000000, 0, 0, - 0, git_hash_unknown_init, git_hash_unknown_update, git_hash_unknown_final, @@ -86,7 +85,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = { "sha-1", /* "sha1", big-endian */ 0x73686131, - sizeof(git_SHA_CTX), GIT_SHA1_RAWSZ, GIT_SHA1_HEXSZ, git_hash_sha1_init, @@ -133,14 +131,14 @@ static struct cached_object *find_cached_object(const unsigned char *sha1) } -static enum safe_crlf get_safe_crlf(unsigned flags) +static int get_conv_flags(unsigned flags) { if (flags & HASH_RENORMALIZE) - return SAFE_CRLF_RENORMALIZE; + return CONV_EOL_RENORMALIZE; else if (flags & HASH_WRITE_OBJECT) - return safe_crlf; + return global_conv_flags_eol; else - return SAFE_CRLF_FALSE; + return 0; } @@ -321,15 +319,11 @@ static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1) } } -const char *sha1_file_name(const unsigned char *sha1) +void sha1_file_name(struct strbuf *buf, const unsigned char *sha1) { - static struct strbuf buf = STRBUF_INIT; - - strbuf_reset(&buf); - strbuf_addf(&buf, "%s/", get_object_directory()); - - fill_sha1_path(&buf, sha1); - return buf.buf; + strbuf_addstr(buf, get_object_directory()); + strbuf_addch(buf, '/'); + fill_sha1_path(buf, sha1); } struct strbuf *alt_scratch_buf(struct alternate_object_database *alt) @@ -710,7 +704,12 @@ int check_and_freshen_file(const char *fn, int freshen) static int check_and_freshen_local(const unsigned char *sha1, int freshen) { - return check_and_freshen_file(sha1_file_name(sha1), freshen); + static struct strbuf buf = STRBUF_INIT; + + strbuf_reset(&buf); + sha1_file_name(&buf, sha1); + + return check_and_freshen_file(buf.buf, freshen); } static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen) @@ -788,16 +787,16 @@ void *xmmap(void *start, size_t length, int check_sha1_signature(const unsigned char *sha1, void *map, unsigned long size, const char *type) { - unsigned char real_sha1[20]; + struct object_id real_oid; enum object_type obj_type; struct git_istream *st; - git_SHA_CTX c; + git_hash_ctx c; char hdr[32]; int hdrlen; if (map) { - hash_sha1_file(map, size, type, real_sha1); - return hashcmp(sha1, real_sha1) ? -1 : 0; + hash_object_file(map, size, type, &real_oid); + return hashcmp(sha1, real_oid.hash) ? -1 : 0; } st = open_istream(sha1, &obj_type, &size, NULL); @@ -805,11 +804,11 @@ int check_sha1_signature(const unsigned char *sha1, void *map, return -1; /* Generate the header */ - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(obj_type), size) + 1; + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1; /* Sha1.. */ - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, hdrlen); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, hdrlen); for (;;) { char buf[1024 * 16]; ssize_t readlen = read_istream(st, buf, sizeof(buf)); @@ -820,11 +819,11 @@ int check_sha1_signature(const unsigned char *sha1, void *map, } if (!readlen) break; - git_SHA1_Update(&c, buf, readlen); + the_hash_algo->update_fn(&c, buf, readlen); } - git_SHA1_Final(real_sha1, &c); + the_hash_algo->final_fn(real_oid.hash, &c); close_istream(st); - return hashcmp(sha1, real_sha1) ? -1 : 0; + return hashcmp(sha1, real_oid.hash) ? -1 : 0; } int git_open_cloexec(const char *name, int flags) @@ -866,8 +865,12 @@ static int stat_sha1_file(const unsigned char *sha1, struct stat *st, const char **path) { struct alternate_object_database *alt; + static struct strbuf buf = STRBUF_INIT; + + strbuf_reset(&buf); + sha1_file_name(&buf, sha1); + *path = buf.buf; - *path = sha1_file_name(sha1); if (!lstat(*path, st)) return 0; @@ -891,8 +894,12 @@ static int open_sha1_file(const unsigned char *sha1, const char **path) int fd; struct alternate_object_database *alt; int most_interesting_errno; + static struct strbuf buf = STRBUF_INIT; + + strbuf_reset(&buf); + sha1_file_name(&buf, sha1); + *path = buf.buf; - *path = sha1_file_name(sha1); fd = git_open(*path); if (fd >= 0) return fd; @@ -1087,8 +1094,8 @@ static int parse_sha1_header_extended(const char *hdr, struct object_info *oi, } type = type_from_string_gently(type_buf, type_len, 1); - if (oi->typename) - strbuf_add(oi->typename, type_buf, type_len); + if (oi->type_name) + strbuf_add(oi->type_name, type_buf, type_len); /* * Set type to 0 if its an unknown object and * we're obtaining the type using '--allow-unknown-type' @@ -1158,7 +1165,7 @@ static int sha1_loose_object_info(const unsigned char *sha1, * return value implicitly indicates whether the * object even exists. */ - if (!oi->typep && !oi->typename && !oi->sizep && !oi->contentp) { + if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) { const char *path; struct stat st; if (stat_sha1_file(sha1, &st, &path) < 0) @@ -1213,6 +1220,8 @@ static int sha1_loose_object_info(const unsigned char *sha1, return (status < 0) ? status : 0; } +int fetch_if_missing = 1; + int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags) { static struct object_info blank_oi = OBJECT_INFO_INIT; @@ -1221,6 +1230,7 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ? lookup_replace_object(sha1) : sha1; + int already_retried = 0; if (is_null_sha1(real)) return -1; @@ -1239,8 +1249,8 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, *(oi->disk_sizep) = 0; if (oi->delta_base_sha1) hashclr(oi->delta_base_sha1); - if (oi->typename) - strbuf_addstr(oi->typename, typename(co->type)); + if (oi->type_name) + strbuf_addstr(oi->type_name, type_name(co->type)); if (oi->contentp) *oi->contentp = xmemdupz(co->buf, co->size); oi->whence = OI_CACHED; @@ -1248,19 +1258,34 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, } } - if (!find_pack_entry(real, &e)) { + while (1) { + if (find_pack_entry(real, &e)) + break; + /* Most likely it's a loose object. */ if (!sha1_loose_object_info(real, oi, flags)) return 0; /* Not a loose object; someone else may have just packed it. */ - if (flags & OBJECT_INFO_QUICK) { - return -1; - } else { + if (!(flags & OBJECT_INFO_QUICK)) { reprepare_packed_git(); - if (!find_pack_entry(real, &e)) - return -1; + if (find_pack_entry(real, &e)) + break; } + + /* Check if it is a missing object */ + if (fetch_if_missing && repository_format_partial_clone && + !already_retried) { + /* + * TODO Investigate haveing fetch_object() return + * TODO error/success and stopping the music here. + */ + fetch_object(repository_format_partial_clone, real); + already_retried = 1; + continue; + } + + return -1; } if (oi == &blank_oi) @@ -1269,7 +1294,6 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, * information below, so return early. */ return 0; - rtype = packed_object_info(e.p, e.offset, oi); if (rtype < 0) { mark_bad_packed_object(e.p, real); @@ -1312,13 +1336,13 @@ static void *read_object(const unsigned char *sha1, enum object_type *type, return content; } -int pretend_sha1_file(void *buf, unsigned long len, enum object_type type, - unsigned char *sha1) +int pretend_object_file(void *buf, unsigned long len, enum object_type type, + struct object_id *oid) { struct cached_object *co; - hash_sha1_file(buf, len, typename(type), sha1); - if (has_sha1_file(sha1) || find_cached_object(sha1)) + hash_object_file(buf, len, type_name(type), oid); + if (has_sha1_file(oid->hash) || find_cached_object(oid->hash)) return 0; ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc); co = &cached_objects[cached_object_nr++]; @@ -1326,7 +1350,7 @@ int pretend_sha1_file(void *buf, unsigned long len, enum object_type type, co->type = type; co->buf = xmalloc(len); memcpy(co->buf, buf, len); - hashcpy(co->sha1, sha1); + hashcpy(co->sha1, oid->hash); return 0; } @@ -1419,20 +1443,20 @@ void *read_object_with_reference(const unsigned char *sha1, } } -static void write_sha1_file_prepare(const void *buf, unsigned long len, - const char *type, unsigned char *sha1, - char *hdr, int *hdrlen) +static void write_object_file_prepare(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + char *hdr, int *hdrlen) { - git_SHA_CTX c; + git_hash_ctx c; /* Generate the header */ *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1; /* Sha1.. */ - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, *hdrlen); - git_SHA1_Update(&c, buf, len); - git_SHA1_Final(sha1, &c); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, *hdrlen); + the_hash_algo->update_fn(&c, buf, len); + the_hash_algo->final_fn(oid->hash, &c); } /* @@ -1485,12 +1509,12 @@ static int write_buffer(int fd, const void *buf, size_t len) return 0; } -int hash_sha1_file(const void *buf, unsigned long len, const char *type, - unsigned char *sha1) +int hash_object_file(const void *buf, unsigned long len, const char *type, + struct object_id *oid) { char hdr[32]; int hdrlen = sizeof(hdr); - write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen); + write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen); return 0; } @@ -1548,18 +1572,22 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename) return fd; } -static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, - const void *buf, unsigned long len, time_t mtime) +static int write_loose_object(const struct object_id *oid, char *hdr, + int hdrlen, const void *buf, unsigned long len, + time_t mtime) { int fd, ret; unsigned char compressed[4096]; git_zstream stream; - git_SHA_CTX c; - unsigned char parano_sha1[20]; + git_hash_ctx c; + struct object_id parano_oid; static struct strbuf tmp_file = STRBUF_INIT; - const char *filename = sha1_file_name(sha1); + static struct strbuf filename = STRBUF_INIT; + + strbuf_reset(&filename); + sha1_file_name(&filename, oid->hash); - fd = create_tmpfile(&tmp_file, filename); + fd = create_tmpfile(&tmp_file, filename.buf); if (fd < 0) { if (errno == EACCES) return error("insufficient permission for adding an object to repository database %s", get_object_directory()); @@ -1571,14 +1599,14 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, git_deflate_init(&stream, zlib_compression_level); stream.next_out = compressed; stream.avail_out = sizeof(compressed); - git_SHA1_Init(&c); + the_hash_algo->init_fn(&c); /* First header.. */ stream.next_in = (unsigned char *)hdr; stream.avail_in = hdrlen; while (git_deflate(&stream, 0) == Z_OK) ; /* nothing */ - git_SHA1_Update(&c, hdr, hdrlen); + the_hash_algo->update_fn(&c, hdr, hdrlen); /* Then the data itself.. */ stream.next_in = (void *)buf; @@ -1586,7 +1614,7 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, do { unsigned char *in0 = stream.next_in; ret = git_deflate(&stream, Z_FINISH); - git_SHA1_Update(&c, in0, stream.next_in - in0); + the_hash_algo->update_fn(&c, in0, stream.next_in - in0); if (write_buffer(fd, compressed, stream.next_out - compressed) < 0) die("unable to write sha1 file"); stream.next_out = compressed; @@ -1594,13 +1622,16 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, } while (ret == Z_OK); if (ret != Z_STREAM_END) - die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret); + die("unable to deflate new object %s (%d)", oid_to_hex(oid), + ret); ret = git_deflate_end_gently(&stream); if (ret != Z_OK) - die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret); - git_SHA1_Final(parano_sha1, &c); - if (hashcmp(sha1, parano_sha1) != 0) - die("confused by unstable object source data for %s", sha1_to_hex(sha1)); + die("deflateEnd on object %s failed (%d)", oid_to_hex(oid), + ret); + the_hash_algo->final_fn(parano_oid.hash, &c); + if (oidcmp(oid, ¶no_oid) != 0) + die("confused by unstable object source data for %s", + oid_to_hex(oid)); close_sha1_file(fd); @@ -1612,7 +1643,7 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, warning_errno("failed utime() on %s", tmp_file.buf); } - return finalize_object_file(tmp_file.buf, filename); + return finalize_object_file(tmp_file.buf, filename.buf); } static int freshen_loose_object(const unsigned char *sha1) @@ -1633,7 +1664,8 @@ static int freshen_packed_object(const unsigned char *sha1) return 1; } -int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1) +int write_object_file(const void *buf, unsigned long len, const char *type, + struct object_id *oid) { char hdr[32]; int hdrlen = sizeof(hdr); @@ -1641,14 +1673,15 @@ int write_sha1_file(const void *buf, unsigned long len, const char *type, unsign /* Normally if we have it in the pack then we do not bother writing * it out into .git/objects/??/?{38} file. */ - write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen); - if (freshen_packed_object(sha1) || freshen_loose_object(sha1)) + write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen); + if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash)) return 0; - return write_loose_object(sha1, hdr, hdrlen, buf, len, 0); + return write_loose_object(oid, hdr, hdrlen, buf, len, 0); } -int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, - struct object_id *oid, unsigned flags) +int hash_object_file_literally(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + unsigned flags) { char *header; int hdrlen, status = 0; @@ -1656,20 +1689,20 @@ int hash_sha1_file_literally(const void *buf, unsigned long len, const char *typ /* type string, SP, %lu of the length plus NUL must fit this */ hdrlen = strlen(type) + 32; header = xmalloc(hdrlen); - write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen); + write_object_file_prepare(buf, len, type, oid, header, &hdrlen); if (!(flags & HASH_WRITE_OBJECT)) goto cleanup; if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash)) goto cleanup; - status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0); + status = write_loose_object(oid, header, hdrlen, buf, len, 0); cleanup: free(header); return status; } -int force_object_loose(const unsigned char *sha1, time_t mtime) +int force_object_loose(const struct object_id *oid, time_t mtime) { void *buf; unsigned long len; @@ -1678,13 +1711,13 @@ int force_object_loose(const unsigned char *sha1, time_t mtime) int hdrlen; int ret; - if (has_loose_object(sha1)) + if (has_loose_object(oid->hash)) return 0; - buf = read_object(sha1, &type, &len); + buf = read_object(oid->hash, &type, &len); if (!buf) - return error("cannot read sha1_file for %s", sha1_to_hex(sha1)); - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1; - ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime); + return error("cannot read sha1_file for %s", oid_to_hex(oid)); + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1; + ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime); free(buf); return ret; @@ -1752,7 +1785,7 @@ static int index_mem(struct object_id *oid, void *buf, size_t size, if ((type == OBJ_BLOB) && path) { struct strbuf nbuf = STRBUF_INIT; if (convert_to_git(&the_index, path, buf, size, &nbuf, - get_safe_crlf(flags))) { + get_conv_flags(flags))) { buf = strbuf_detach(&nbuf, &size); re_allocated = 1; } @@ -1767,9 +1800,9 @@ static int index_mem(struct object_id *oid, void *buf, size_t size, } if (write_object) - ret = write_sha1_file(buf, size, typename(type), oid->hash); + ret = write_object_file(buf, size, type_name(type), oid); else - ret = hash_sha1_file(buf, size, typename(type), oid->hash); + ret = hash_object_file(buf, size, type_name(type), oid); if (re_allocated) free(buf); return ret; @@ -1786,14 +1819,14 @@ static int index_stream_convert_blob(struct object_id *oid, int fd, assert(would_convert_to_git_filter_fd(path)); convert_to_git_filter_fd(&the_index, path, fd, &sbuf, - get_safe_crlf(flags)); + get_conv_flags(flags)); if (write_object) - ret = write_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB), - oid->hash); + ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB), + oid); else - ret = hash_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB), - oid->hash); + ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB), + oid); strbuf_release(&sbuf); return ret; } @@ -1907,8 +1940,8 @@ int index_path(struct object_id *oid, const char *path, struct stat *st, unsigne if (strbuf_readlink(&sb, path, st->st_size)) return error_errno("readlink(\"%s\")", path); if (!(flags & HASH_WRITE_OBJECT)) - hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash); - else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash)) + hash_object_file(sb.buf, sb.len, blob_type, oid); + else if (write_object_file(sb.buf, sb.len, blob_type, oid)) rc = error("%s: failed to insert into database", path); strbuf_release(&sb); break; @@ -1942,7 +1975,7 @@ void assert_sha1_type(const unsigned char *sha1, enum object_type expect) die("%s is not a valid object", sha1_to_hex(sha1)); if (type != expect) die("%s is not a valid '%s' object", sha1_to_hex(sha1), - typename(expect)); + type_name(expect)); } int for_each_file_in_obj_subdir(unsigned int subdir_nr, @@ -2093,14 +2126,14 @@ static int check_stream_sha1(git_zstream *stream, const char *path, const unsigned char *expected_sha1) { - git_SHA_CTX c; + git_hash_ctx c; unsigned char real_sha1[GIT_MAX_RAWSZ]; unsigned char buf[4096]; unsigned long total_read; int status = Z_OK; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, stream->total_out); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, stream->total_out); /* * We already read some bytes into hdr, but the ones up to the NUL @@ -2119,7 +2152,7 @@ static int check_stream_sha1(git_zstream *stream, if (size - total_read < stream->avail_out) stream->avail_out = size - total_read; status = git_inflate(stream, Z_FINISH); - git_SHA1_Update(&c, buf, stream->next_out - buf); + the_hash_algo->update_fn(&c, buf, stream->next_out - buf); total_read += stream->next_out - buf; } git_inflate_end(stream); @@ -2134,7 +2167,7 @@ static int check_stream_sha1(git_zstream *stream, return -1; } - git_SHA1_Final(real_sha1, &c); + the_hash_algo->final_fn(real_sha1, &c); if (hashcmp(expected_sha1, real_sha1)) { error("sha1 mismatch for %s (expected %s)", path, sha1_to_hex(expected_sha1)); @@ -2187,7 +2220,7 @@ int read_loose_object(const char *path, goto out; } if (check_sha1_signature(expected_sha1, *contents, - *size, typename(*type))) { + *size, type_name(*type))) { error("sha1 mismatch for %s (expected %s)", path, sha1_to_hex(expected_sha1)); free(*contents); diff --git a/sha1_name.c b/sha1_name.c index 611c7d24dd..735c1c0b8e 100644 --- a/sha1_name.c +++ b/sha1_name.c @@ -381,7 +381,7 @@ static int show_ambiguous_object(const struct object_id *oid, void *data) advise(" %s %s%s", find_unique_abbrev(oid->hash, DEFAULT_ABBREV), - typename(type) ? typename(type) : "unknown type", + type_name(type) ? type_name(type) : "unknown type", desc.buf); strbuf_release(&desc); @@ -542,20 +542,20 @@ static void find_abbrev_len_for_pack(struct packed_git *p, /* * first is now the position in the packfile where we would insert * mad->hash if it does not exist (or the position of mad->hash if - * it does exist). Hence, we consider a maximum of three objects + * it does exist). Hence, we consider a maximum of two objects * nearby for the abbreviation length. */ mad->init_len = 0; if (!match) { - nth_packed_object_oid(&oid, p, first); - extend_abbrev_len(&oid, mad); + if (nth_packed_object_oid(&oid, p, first)) + extend_abbrev_len(&oid, mad); } else if (first < num - 1) { - nth_packed_object_oid(&oid, p, first + 1); - extend_abbrev_len(&oid, mad); + if (nth_packed_object_oid(&oid, p, first + 1)) + extend_abbrev_len(&oid, mad); } if (first > 0) { - nth_packed_object_oid(&oid, p, first - 1); - extend_abbrev_len(&oid, mad); + if (nth_packed_object_oid(&oid, p, first - 1)) + extend_abbrev_len(&oid, mad); } mad->init_len = mad->cur_len; } @@ -901,8 +901,8 @@ struct object *peel_to_type(const char *name, int namelen, if (name) error("%.*s: expected %s type, but the object " "dereferences to %s type", - namelen, name, typename(expected_type), - typename(o->type)); + namelen, name, type_name(expected_type), + type_name(o->type)); return NULL; } } diff --git a/sha1dc_git.h b/sha1dc_git.h index a8c2729278..41e1c3fd3f 100644 --- a/sha1dc_git.h +++ b/sha1dc_git.h @@ -1,9 +1,9 @@ /* Plumbing with collition-detecting SHA1 code */ -#ifdef DC_SHA1_SUBMODULE -#include "sha1collisiondetection/lib/sha1.h" -#elif defined(DC_SHA1_EXTERNAL) +#ifdef DC_SHA1_EXTERNAL #include <sha1dc/sha1.h> +#elif defined(DC_SHA1_SUBMODULE) +#include "sha1collisiondetection/lib/sha1.h" #else #include "sha1dc/sha1.h" #endif diff --git a/split-index.c b/split-index.c index 284d04d67f..3eb8ff1b43 100644 --- a/split-index.c +++ b/split-index.c @@ -305,17 +305,17 @@ void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce } void replace_index_entry_in_base(struct index_state *istate, - struct cache_entry *old, - struct cache_entry *new) + struct cache_entry *old_entry, + struct cache_entry *new_entry) { - if (old->index && + if (old_entry->index && istate->split_index && istate->split_index->base && - old->index <= istate->split_index->base->cache_nr) { - new->index = old->index; - if (old != istate->split_index->base->cache[new->index - 1]) - free(istate->split_index->base->cache[new->index - 1]); - istate->split_index->base->cache[new->index - 1] = new; + old_entry->index <= istate->split_index->base->cache_nr) { + new_entry->index = old_entry->index; + if (old_entry != istate->split_index->base->cache[new_entry->index - 1]) + free(istate->split_index->base->cache[new_entry->index - 1]); + istate->split_index->base->cache[new_entry->index - 1] = new_entry; } } diff --git a/split-index.h b/split-index.h index df91c1bda8..43d66826eb 100644 --- a/split-index.h +++ b/split-index.h @@ -21,7 +21,7 @@ struct split_index *init_split_index(struct index_state *istate); void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce); void replace_index_entry_in_base(struct index_state *istate, struct cache_entry *old, - struct cache_entry *new); + struct cache_entry *new_entry); int read_link_extension(struct index_state *istate, const void *data, unsigned long sz); int write_link_extension(struct strbuf *sb, @@ -95,6 +95,7 @@ void strbuf_trim(struct strbuf *sb) strbuf_rtrim(sb); strbuf_ltrim(sb); } + void strbuf_rtrim(struct strbuf *sb) { while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1])) @@ -102,6 +103,13 @@ void strbuf_rtrim(struct strbuf *sb) sb->buf[sb->len] = '\0'; } +void strbuf_trim_trailing_dir_sep(struct strbuf *sb) +{ + while (sb->len > 0 && is_dir_sep((unsigned char)sb->buf[sb->len - 1])) + sb->len--; + sb->buf[sb->len] = '\0'; +} + void strbuf_ltrim(struct strbuf *sb) { char *b = sb->buf; @@ -612,14 +620,18 @@ ssize_t strbuf_read_file(struct strbuf *sb, const char *path, size_t hint) { int fd; ssize_t len; + int saved_errno; fd = open(path, O_RDONLY); if (fd < 0) return -1; len = strbuf_read(sb, fd, hint); + saved_errno = errno; close(fd); - if (len < 0) + if (len < 0) { + errno = saved_errno; return -1; + } return len; } @@ -179,6 +179,9 @@ extern void strbuf_trim(struct strbuf *); extern void strbuf_rtrim(struct strbuf *); extern void strbuf_ltrim(struct strbuf *); +/* Strip trailing directory separators */ +extern void strbuf_trim_trailing_dir_sep(struct strbuf *); + /** * Replace the contents of the strbuf with a reencoded form. Returns -1 * on error, 0 on success. diff --git a/sub-process.h b/sub-process.h index 49701998c9..71b18ad5af 100644 --- a/sub-process.h +++ b/sub-process.h @@ -73,8 +73,8 @@ static inline struct child_process *subprocess_get_child_process( } /* - * Perform the version and capability negotiation as described in the "Long - * Running Filter Process" section of the gitattributes documentation using the + * Perform the version and capability negotiation as described in the + * "Handshake" section of long-running-process-protocol.txt using the * given requested versions and capabilities. The "versions" and "capabilities" * parameters are arrays terminated by a 0 or blank struct. * diff --git a/submodule.c b/submodule.c index 47ddc9b273..12a2503fda 100644 --- a/submodule.c +++ b/submodule.c @@ -590,7 +590,7 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path, struct object_id *one, struct object_id *two, unsigned dirty_submodule) { - const struct object_id *old = the_hash_algo->empty_tree, *new = the_hash_algo->empty_tree; + const struct object_id *old_oid = the_hash_algo->empty_tree, *new_oid = the_hash_algo->empty_tree; struct commit *left = NULL, *right = NULL; struct commit_list *merge_bases = NULL; struct child_process cp = CHILD_PROCESS_INIT; @@ -605,9 +605,9 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path, goto done; if (left) - old = one; + old_oid = one; if (right) - new = two; + new_oid = two; cp.git_cmd = 1; cp.dir = path; @@ -630,7 +630,7 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path, argv_array_pushf(&cp.args, "--dst-prefix=%s%s/", o->b_prefix, path); } - argv_array_push(&cp.args, oid_to_hex(old)); + argv_array_push(&cp.args, oid_to_hex(old_oid)); /* * If the submodule has modified content, we will diff against the * work tree, under the assumption that the user has asked for the @@ -638,7 +638,7 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path, * haven't yet been committed to the submodule yet. */ if (!(dirty_submodule & DIRTY_SUBMODULE_MODIFIED)) - argv_array_push(&cp.args, oid_to_hex(new)); + argv_array_push(&cp.args, oid_to_hex(new_oid)); prepare_submodule_repo_env(&cp.env_array); if (start_command(&cp)) @@ -831,7 +831,7 @@ static int check_has_commit(const struct object_id *oid, void *data) return 0; default: die(_("submodule entry '%s' (%s) is a %s, not a commit"), - cb->path, oid_to_hex(oid), typename(type)); + cb->path, oid_to_hex(oid), type_name(type)); } } @@ -1578,8 +1578,8 @@ static void submodule_reset_index(const char *path) * pass NULL for old or new respectively. */ int submodule_move_head(const char *path, - const char *old, - const char *new, + const char *old_head, + const char *new_head, unsigned flags) { int ret = 0; @@ -1600,7 +1600,7 @@ int submodule_move_head(const char *path, else error_code_ptr = NULL; - if (old && !is_submodule_populated_gently(path, error_code_ptr)) + if (old_head && !is_submodule_populated_gently(path, error_code_ptr)) return 0; sub = submodule_from_path(&null_oid, path); @@ -1608,14 +1608,14 @@ int submodule_move_head(const char *path, if (!sub) die("BUG: could not get submodule information for '%s'", path); - if (old && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) { + if (old_head && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) { /* Check if the submodule has a dirty index. */ if (submodule_has_dirty_index(sub)) return error(_("submodule '%s' has dirty index"), path); } if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) { - if (old) { + if (old_head) { if (!submodule_uses_gitfile(path)) absorb_git_dir_into_superproject("", path, ABSORB_GITDIR_RECURSE_SUBMODULES); @@ -1629,7 +1629,7 @@ int submodule_move_head(const char *path, submodule_reset_index(path); } - if (old && (flags & SUBMODULE_MOVE_HEAD_FORCE)) { + if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) { char *gitdir = xstrfmt("%s/modules/%s", get_git_common_dir(), sub->name); connect_work_tree_and_git_dir(path, gitdir); @@ -1658,9 +1658,9 @@ int submodule_move_head(const char *path, argv_array_push(&cp.args, "-m"); if (!(flags & SUBMODULE_MOVE_HEAD_FORCE)) - argv_array_push(&cp.args, old ? old : EMPTY_TREE_SHA1_HEX); + argv_array_push(&cp.args, old_head ? old_head : EMPTY_TREE_SHA1_HEX); - argv_array_push(&cp.args, new ? new : EMPTY_TREE_SHA1_HEX); + argv_array_push(&cp.args, new_head ? new_head : EMPTY_TREE_SHA1_HEX); if (run_command(&cp)) { ret = -1; @@ -1668,7 +1668,7 @@ int submodule_move_head(const char *path, } if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) { - if (new) { + if (new_head) { child_process_init(&cp); /* also set the HEAD accordingly */ cp.git_cmd = 1; @@ -1677,7 +1677,7 @@ int submodule_move_head(const char *path, prepare_submodule_repo_env(&cp.env_array); argv_array_pushl(&cp.args, "update-ref", "HEAD", - "--no-deref", new, NULL); + "--no-deref", new_head, NULL); if (run_command(&cp)) { ret = -1; diff --git a/submodule.h b/submodule.h index b9b7ef0030..9589f13127 100644 --- a/submodule.h +++ b/submodule.h @@ -117,7 +117,7 @@ int submodule_to_gitdir(struct strbuf *buf, const char *submodule); #define SUBMODULE_MOVE_HEAD_FORCE (1<<1) extern int submodule_move_head(const char *path, const char *old, - const char *new, + const char *new_head, unsigned flags); /* @@ -84,9 +84,10 @@ appropriately before running "make". -x:: Turn on shell tracing (i.e., `set -x`) during the tests - themselves. Implies `--verbose`. Note that in non-bash shells, - this can cause failures in some tests which redirect and test - the output of shell functions. Use with caution. + themselves. Implies `--verbose`. + Ignored in test scripts that set the variable 'test_untraceable' + to a non-empty value, unless it's run with a Bash version + supporting BASH_XTRACEFD, i.e. v4.1 or later. -d:: --debug:: @@ -452,6 +453,22 @@ Don't: causing the next test to start in an unexpected directory. Do so inside a subshell if necessary. + - save and verify the standard error of compound commands, i.e. group + commands, subshells, and shell functions (except test helper + functions like 'test_must_fail') like this: + + ( cd dir && git cmd ) 2>error && + test_cmp expect error + + When running the test with '-x' tracing, then the trace of commands + executed in the compound command will be included in standard error + as well, quite possibly throwing off the subsequent checks examining + the output. Instead, save only the relevant git command's standard + error: + + ( cd dir && git cmd 2>../error ) && + test_cmp expect error + - Break the TAP output The raw output from your test may be interpreted by a TAP harness. TAP diff --git a/t/helper/test-dump-untracked-cache.c b/t/helper/test-dump-untracked-cache.c index f752532ffb..d7c55c2355 100644 --- a/t/helper/test-dump-untracked-cache.c +++ b/t/helper/test-dump-untracked-cache.c @@ -54,8 +54,8 @@ int cmd_main(int ac, const char **av) printf("no untracked cache\n"); return 0; } - printf("info/exclude %s\n", sha1_to_hex(uc->ss_info_exclude.sha1)); - printf("core.excludesfile %s\n", sha1_to_hex(uc->ss_excludes_file.sha1)); + printf("info/exclude %s\n", oid_to_hex(&uc->ss_info_exclude.oid)); + printf("core.excludesfile %s\n", oid_to_hex(&uc->ss_excludes_file.oid)); printf("exclude_per_dir %s\n", uc->exclude_per_dir); printf("flags %08x\n", uc->dir_flags); if (uc->root) diff --git a/t/helper/test-hashmap.c b/t/helper/test-hashmap.c index 1145d51671..9ae9281c07 100644 --- a/t/helper/test-hashmap.c +++ b/t/helper/test-hashmap.c @@ -1,5 +1,6 @@ #include "git-compat-util.h" #include "hashmap.h" +#include "strbuf.h" struct test_entry { @@ -29,11 +30,12 @@ static int test_entry_cmp(const void *cmp_data, return strcmp(e1->key, key ? key : e2->key); } -static struct test_entry *alloc_test_entry(int hash, char *key, int klen, - char *value, int vlen) +static struct test_entry *alloc_test_entry(unsigned int hash, + char *key, char *value) { - struct test_entry *entry = malloc(sizeof(struct test_entry) + klen - + vlen + 2); + size_t klen = strlen(key); + size_t vlen = strlen(value); + struct test_entry *entry = xmalloc(st_add4(sizeof(*entry), klen, vlen, 2)); hashmap_entry_init(entry, hash); memcpy(entry->key, key, klen + 1); memcpy(entry->key + klen + 1, value, vlen + 1); @@ -85,11 +87,11 @@ static void perf_hashmap(unsigned int method, unsigned int rounds) unsigned int *hashes; unsigned int i, j; - entries = malloc(TEST_SIZE * sizeof(struct test_entry *)); - hashes = malloc(TEST_SIZE * sizeof(int)); + ALLOC_ARRAY(entries, TEST_SIZE); + ALLOC_ARRAY(hashes, TEST_SIZE); for (i = 0; i < TEST_SIZE; i++) { - snprintf(buf, sizeof(buf), "%i", i); - entries[i] = alloc_test_entry(0, buf, strlen(buf), "", 0); + xsnprintf(buf, sizeof(buf), "%i", i); + entries[i] = alloc_test_entry(0, buf, ""); hashes[i] = hash(method, i, entries[i]->key); } @@ -144,7 +146,7 @@ static void perf_hashmap(unsigned int method, unsigned int rounds) */ int cmd_main(int argc, const char **argv) { - char line[1024]; + struct strbuf line = STRBUF_INIT; struct hashmap map; int icase; @@ -153,44 +155,42 @@ int cmd_main(int argc, const char **argv) hashmap_init(&map, test_entry_cmp, &icase, 0); /* process commands from stdin */ - while (fgets(line, sizeof(line), stdin)) { + while (strbuf_getline(&line, stdin) != EOF) { char *cmd, *p1 = NULL, *p2 = NULL; - int l1 = 0, l2 = 0, hash = 0; + unsigned int hash = 0; struct test_entry *entry; /* break line into command and up to two parameters */ - cmd = strtok(line, DELIM); + cmd = strtok(line.buf, DELIM); /* ignore empty lines */ if (!cmd || *cmd == '#') continue; p1 = strtok(NULL, DELIM); if (p1) { - l1 = strlen(p1); hash = icase ? strihash(p1) : strhash(p1); p2 = strtok(NULL, DELIM); - if (p2) - l2 = strlen(p2); } - if (!strcmp("hash", cmd) && l1) { + if (!strcmp("hash", cmd) && p1) { /* print results of different hash functions */ - printf("%u %u %u %u\n", strhash(p1), memhash(p1, l1), - strihash(p1), memihash(p1, l1)); + printf("%u %u %u %u\n", + strhash(p1), memhash(p1, strlen(p1)), + strihash(p1), memihash(p1, strlen(p1))); - } else if (!strcmp("add", cmd) && l1 && l2) { + } else if (!strcmp("add", cmd) && p1 && p2) { /* create entry with key = p1, value = p2 */ - entry = alloc_test_entry(hash, p1, l1, p2, l2); + entry = alloc_test_entry(hash, p1, p2); /* add to hashmap */ hashmap_add(&map, entry); - } else if (!strcmp("put", cmd) && l1 && l2) { + } else if (!strcmp("put", cmd) && p1 && p2) { /* create entry with key = p1, value = p2 */ - entry = alloc_test_entry(hash, p1, l1, p2, l2); + entry = alloc_test_entry(hash, p1, p2); /* add / replace entry */ entry = hashmap_put(&map, entry); @@ -199,7 +199,7 @@ int cmd_main(int argc, const char **argv) puts(entry ? get_value(entry) : "NULL"); free(entry); - } else if (!strcmp("get", cmd) && l1) { + } else if (!strcmp("get", cmd) && p1) { /* lookup entry in hashmap */ entry = hashmap_get_from_hash(&map, hash, p1); @@ -212,7 +212,7 @@ int cmd_main(int argc, const char **argv) entry = hashmap_get_next(&map, entry); } - } else if (!strcmp("remove", cmd) && l1) { + } else if (!strcmp("remove", cmd) && p1) { /* setup static key */ struct hashmap_entry key; @@ -238,7 +238,7 @@ int cmd_main(int argc, const char **argv) printf("%u %u\n", map.tablesize, hashmap_get_size(&map)); - } else if (!strcmp("intern", cmd) && l1) { + } else if (!strcmp("intern", cmd) && p1) { /* test that strintern works */ const char *i1 = strintern(p1); @@ -252,7 +252,7 @@ int cmd_main(int argc, const char **argv) else printf("%s\n", i1); - } else if (!strcmp("perfhashmap", cmd) && l1 && l2) { + } else if (!strcmp("perfhashmap", cmd) && p1 && p2) { perf_hashmap(atoi(p1), atoi(p2)); @@ -263,6 +263,7 @@ int cmd_main(int argc, const char **argv) } } + strbuf_release(&line); hashmap_free(&map, 1); return 0; } diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c index d24d157379..153342e44d 100644 --- a/t/helper/test-run-command.c +++ b/t/helper/test-run-command.c @@ -56,6 +56,15 @@ int cmd_main(int argc, const char **argv) if (argc < 3) return 1; + while (!strcmp(argv[1], "env")) { + if (!argv[2]) + die("env specifier without a value"); + argv_array_push(&proc.env_array, argv[2]); + argv += 2; + argc -= 2; + } + if (argc < 3) + return 1; proc.argv = (const char **)argv + 2; if (!strcmp(argv[1], "start-command-ENOENT")) { diff --git a/t/helper/test-wildmatch.c b/t/helper/test-wildmatch.c index 921d7b3e7e..66d33dfcfd 100644 --- a/t/helper/test-wildmatch.c +++ b/t/helper/test-wildmatch.c @@ -16,6 +16,8 @@ int cmd_main(int argc, const char **argv) return !!wildmatch(argv[3], argv[2], WM_PATHNAME | WM_CASEFOLD); else if (!strcmp(argv[1], "pathmatch")) return !!wildmatch(argv[3], argv[2], 0); + else if (!strcmp(argv[1], "ipathmatch")) + return !!wildmatch(argv[3], argv[2], WM_CASEFOLD); else return 1; } diff --git a/t/lib-terminal.sh b/t/lib-terminal.sh index cd220e378e..e3809dcead 100644 --- a/t/lib-terminal.sh +++ b/t/lib-terminal.sh @@ -9,8 +9,8 @@ test_terminal () { echo >&4 "test_terminal: need to declare TTY prerequisite" return 127 fi - perl "$TEST_DIRECTORY"/test-terminal.perl "$@" -} + perl "$TEST_DIRECTORY"/test-terminal.perl "$@" 2>&7 +} 7>&2 2>&4 test_lazy_prereq TTY ' test_have_prereq PERL && diff --git a/t/perf/aggregate.perl b/t/perf/aggregate.perl index e401208488..821cf1498b 100755 --- a/t/perf/aggregate.perl +++ b/t/perf/aggregate.perl @@ -1,8 +1,9 @@ #!/usr/bin/perl -use lib '../../perl/blib/lib'; +use lib '../../perl/build/lib'; use strict; use warnings; +use JSON; use Git; sub get_times { @@ -35,10 +36,34 @@ sub format_times { return $out; } -my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests); +my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests, + $codespeed, $subsection, $reponame); while (scalar @ARGV) { my $arg = $ARGV[0]; my $dir; + if ($arg eq "--codespeed") { + $codespeed = 1; + shift @ARGV; + next; + } + if ($arg eq "--subsection") { + shift @ARGV; + $subsection = $ARGV[0]; + shift @ARGV; + if (! $subsection) { + die "empty subsection"; + } + next; + } + if ($arg eq "--reponame") { + shift @ARGV; + $reponame = $ARGV[0]; + shift @ARGV; + if (! $reponame) { + die "empty reponame"; + } + next; + } last if -f $arg or $arg eq "--"; if (! -d $arg) { my $rev = Git::command_oneline(qw(rev-parse --verify), $arg); @@ -70,8 +95,15 @@ if (not @tests) { } my $resultsdir = "test-results"; -if ($ENV{GIT_PERF_SUBSECTION} ne "") { - $resultsdir .= "/" . $ENV{GIT_PERF_SUBSECTION}; + +if (! $subsection and + exists $ENV{GIT_PERF_SUBSECTION} and + $ENV{GIT_PERF_SUBSECTION} ne "") { + $subsection = $ENV{GIT_PERF_SUBSECTION}; +} + +if ($subsection) { + $resultsdir .= "/" . $subsection; } my @subtests; @@ -100,13 +132,6 @@ sub read_descr { return $line; } -my %descrs; -my $descrlen = 4; # "Test" -for my $t (@subtests) { - $descrs{$t} = $shorttests{$t}.": ".read_descr("$resultsdir/$t.descr"); - $descrlen = length $descrs{$t} if length $descrs{$t}>$descrlen; -} - sub have_duplicate { my %seen; for (@_) { @@ -122,54 +147,119 @@ sub have_slash { return 0; } -my %newdirabbrevs = %dirabbrevs; -while (!have_duplicate(values %newdirabbrevs)) { - %dirabbrevs = %newdirabbrevs; - last if !have_slash(values %dirabbrevs); - %newdirabbrevs = %dirabbrevs; - for (values %newdirabbrevs) { - s{^[^/]*/}{}; +sub print_default_results { + my %descrs; + my $descrlen = 4; # "Test" + for my $t (@subtests) { + $descrs{$t} = $shorttests{$t}.": ".read_descr("$resultsdir/$t.descr"); + $descrlen = length $descrs{$t} if length $descrs{$t}>$descrlen; } -} -my %times; -my @colwidth = ((0)x@dirs); -for my $i (0..$#dirs) { - my $d = $dirs[$i]; - my $w = length (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d}); - $colwidth[$i] = $w if $w > $colwidth[$i]; -} -for my $t (@subtests) { - my $firstr; + my %newdirabbrevs = %dirabbrevs; + while (!have_duplicate(values %newdirabbrevs)) { + %dirabbrevs = %newdirabbrevs; + last if !have_slash(values %dirabbrevs); + %newdirabbrevs = %dirabbrevs; + for (values %newdirabbrevs) { + s{^[^/]*/}{}; + } + } + + my %times; + my @colwidth = ((0)x@dirs); for my $i (0..$#dirs) { my $d = $dirs[$i]; - $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")]; - my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}}; - my $w = length format_times($r,$u,$s,$firstr); + my $w = length (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d}); $colwidth[$i] = $w if $w > $colwidth[$i]; - $firstr = $r unless defined $firstr; } -} -my $totalwidth = 3*@dirs+$descrlen; -$totalwidth += $_ for (@colwidth); - -binmode STDOUT, ":utf8" or die "PANIC on binmode: $!"; + for my $t (@subtests) { + my $firstr; + for my $i (0..$#dirs) { + my $d = $dirs[$i]; + $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")]; + my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}}; + my $w = length format_times($r,$u,$s,$firstr); + $colwidth[$i] = $w if $w > $colwidth[$i]; + $firstr = $r unless defined $firstr; + } + } + my $totalwidth = 3*@dirs+$descrlen; + $totalwidth += $_ for (@colwidth); -printf "%-${descrlen}s", "Test"; -for my $i (0..$#dirs) { - my $d = $dirs[$i]; - printf " %-$colwidth[$i]s", (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d}); -} -print "\n"; -print "-"x$totalwidth, "\n"; -for my $t (@subtests) { - printf "%-${descrlen}s", $descrs{$t}; - my $firstr; + printf "%-${descrlen}s", "Test"; for my $i (0..$#dirs) { my $d = $dirs[$i]; - my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}}; - printf " %-$colwidth[$i]s", format_times($r,$u,$s,$firstr); - $firstr = $r unless defined $firstr; + printf " %-$colwidth[$i]s", (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d}); } print "\n"; + print "-"x$totalwidth, "\n"; + for my $t (@subtests) { + printf "%-${descrlen}s", $descrs{$t}; + my $firstr; + for my $i (0..$#dirs) { + my $d = $dirs[$i]; + my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}}; + printf " %-$colwidth[$i]s", format_times($r,$u,$s,$firstr); + $firstr = $r unless defined $firstr; + } + print "\n"; + } +} + +sub print_codespeed_results { + my ($subsection) = @_; + + my $project = "Git"; + + my $executable = `uname -s -m`; + chomp $executable; + + if ($subsection) { + $executable .= ", " . $subsection; + } + + my $environment; + if ($reponame) { + $environment = $reponame; + } elsif (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") { + $environment = $ENV{GIT_PERF_REPO_NAME}; + } elsif (exists $ENV{GIT_TEST_INSTALLED} and $ENV{GIT_TEST_INSTALLED} ne "") { + $environment = $ENV{GIT_TEST_INSTALLED}; + $environment =~ s|/bin-wrappers$||; + } else { + $environment = `uname -r`; + chomp $environment; + } + + my @data; + + for my $t (@subtests) { + for my $d (@dirs) { + my $commitid = $prefixes{$d}; + $commitid =~ s/^build_//; + $commitid =~ s/\.$//; + my ($result_value, $u, $s) = get_times("$resultsdir/$prefixes{$d}$t.times"); + + my %vals = ( + "commitid" => $commitid, + "project" => $project, + "branch" => $dirnames{$d}, + "executable" => $executable, + "benchmark" => $shorttests{$t} . " " . read_descr("$resultsdir/$t.descr"), + "environment" => $environment, + "result_value" => $result_value, + ); + push @data, \%vals; + } + } + + print to_json(\@data, {utf8 => 1, pretty => 1, canonical => 1}), "\n"; +} + +binmode STDOUT, ":utf8" or die "PANIC on binmode: $!"; + +if ($codespeed) { + print_codespeed_results($subsection); +} else { + print_default_results(); } diff --git a/t/perf/run b/t/perf/run index 43e4de49ef..213da5d6b9 100755 --- a/t/perf/run +++ b/t/perf/run @@ -105,7 +105,7 @@ get_var_from_env_or_config () { env_var="$1" conf_sec="$2" conf_var="$3" - # $4 can be set to a default value + conf_opts="$4" # optional # Do nothing if the env variable is already set eval "test -z \"\${$env_var+x}\"" || return @@ -116,18 +116,17 @@ get_var_from_env_or_config () { if test -n "$GIT_PERF_SUBSECTION" then var="$conf_sec.$GIT_PERF_SUBSECTION.$conf_var" - conf_value=$(git config -f "$GIT_PERF_CONFIG_FILE" "$var") && + conf_value=$(git config $conf_opts -f "$GIT_PERF_CONFIG_FILE" "$var") && eval "$env_var=\"$conf_value\"" && return fi var="$conf_sec.$conf_var" - conf_value=$(git config -f "$GIT_PERF_CONFIG_FILE" "$var") && - eval "$env_var=\"$conf_value\"" && return - - test -n "${4+x}" && eval "$env_var=\"$4\"" + conf_value=$(git config $conf_opts -f "$GIT_PERF_CONFIG_FILE" "$var") && + eval "$env_var=\"$conf_value\"" } run_subsection () { - get_var_from_env_or_config "GIT_PERF_REPEAT_COUNT" "perf" "repeatCount" 3 + get_var_from_env_or_config "GIT_PERF_REPEAT_COUNT" "perf" "repeatCount" "--int" + : ${GIT_PERF_REPEAT_COUNT:=3} export GIT_PERF_REPEAT_COUNT get_var_from_env_or_config "GIT_PERF_DIRS_OR_REVS" "perf" "dirsOrRevs" @@ -136,6 +135,9 @@ run_subsection () { get_var_from_env_or_config "GIT_PERF_MAKE_COMMAND" "perf" "makeCommand" get_var_from_env_or_config "GIT_PERF_MAKE_OPTS" "perf" "makeOpts" + get_var_from_env_or_config "GIT_PERF_REPO_NAME" "perf" "repoName" + export GIT_PERF_REPO_NAME + GIT_PERF_AGGREGATING_LATER=t export GIT_PERF_AGGREGATING_LATER @@ -143,10 +145,25 @@ run_subsection () { set -- . "$@" fi + codespeed_opt= + test "$GIT_PERF_CODESPEED_OUTPUT" = "true" && codespeed_opt="--codespeed" + run_dirs "$@" - ./aggregate.perl "$@" + + if test -z "$GIT_PERF_SEND_TO_CODESPEED" + then + ./aggregate.perl $codespeed_opt "$@" + else + json_res_file="test-results/$GIT_PERF_SUBSECTION/aggregate.json" + ./aggregate.perl --codespeed "$@" | tee "$json_res_file" + send_data_url="$GIT_PERF_SEND_TO_CODESPEED/result/add/json/" + curl -v --request POST --data-urlencode "json=$(cat "$json_res_file")" "$send_data_url" + fi } +get_var_from_env_or_config "GIT_PERF_CODESPEED_OUTPUT" "perf" "codespeedOutput" "--bool" +get_var_from_env_or_config "GIT_PERF_SEND_TO_CODESPEED" "perf" "sendToCodespeed" + cd "$(dirname $0)" . ../../GIT-BUILD-OPTIONS diff --git a/t/t0002-gitfile.sh b/t/t0002-gitfile.sh index 9670e8cbe6..3691023d51 100755 --- a/t/t0002-gitfile.sh +++ b/t/t0002-gitfile.sh @@ -10,15 +10,6 @@ objpath() { echo "$1" | sed -e 's|\(..\)|\1/|' } -objck() { - p=$(objpath "$1") - if test ! -f "$REAL/objects/$p" - then - echo "Object not found: $REAL/objects/$p" - false - fi -} - test_expect_success 'initial setup' ' REAL="$(pwd)/.real" && mv .git "$REAL" @@ -26,30 +17,14 @@ test_expect_success 'initial setup' ' test_expect_success 'bad setup: invalid .git file format' ' echo "gitdir $REAL" >.git && - if git rev-parse 2>.err - then - echo "git rev-parse accepted an invalid .git file" - false - fi && - if ! grep "Invalid gitfile format" .err - then - echo "git rev-parse returned wrong error" - false - fi + test_must_fail git rev-parse 2>.err && + test_i18ngrep "invalid gitfile format" .err ' test_expect_success 'bad setup: invalid .git file path' ' echo "gitdir: $REAL.not" >.git && - if git rev-parse 2>.err - then - echo "git rev-parse accepted an invalid .git file path" - false - fi && - if ! grep "Not a git repository" .err - then - echo "git rev-parse returned wrong error" - false - fi + test_must_fail git rev-parse 2>.err && + test_i18ngrep "not a git repository" .err ' test_expect_success 'final setup + check rev-parse --git-dir' ' @@ -60,7 +35,7 @@ test_expect_success 'final setup + check rev-parse --git-dir' ' test_expect_success 'check hash-object' ' echo "foo" >bar && SHA=$(cat bar | git hash-object -w --stdin) && - objck $SHA + test_path_is_file "$REAL/objects/$(objpath $SHA)" ' test_expect_success 'check cat-file' ' @@ -69,29 +44,21 @@ test_expect_success 'check cat-file' ' ' test_expect_success 'check update-index' ' - if test -f "$REAL/index" - then - echo "Hmm, $REAL/index exists?" - false - fi && + test_path_is_missing "$REAL/index" && rm -f "$REAL/objects/$(objpath $SHA)" && git update-index --add bar && - if ! test -f "$REAL/index" - then - echo "$REAL/index not found" - false - fi && - objck $SHA + test_path_is_file "$REAL/index" && + test_path_is_file "$REAL/objects/$(objpath $SHA)" ' test_expect_success 'check write-tree' ' SHA=$(git write-tree) && - objck $SHA + test_path_is_file "$REAL/objects/$(objpath $SHA)" ' test_expect_success 'check commit-tree' ' SHA=$(echo "commit bar" | git commit-tree $SHA) && - objck $SHA + test_path_is_file "$REAL/objects/$(objpath $SHA)" ' test_expect_success 'check rev-list' ' diff --git a/t/t0008-ignores.sh b/t/t0008-ignores.sh index d27f438bf4..c03f155a35 100755 --- a/t/t0008-ignores.sh +++ b/t/t0008-ignores.sh @@ -307,7 +307,7 @@ test_expect_success_multi 'needs work tree' '' ' cd .git && test_check_ignore "foo" 128 ) && - stderr_contains "fatal: This operation must be run in a work tree" + stderr_contains "fatal: this operation must be run in a work tree" ' ############################################################################ @@ -775,6 +775,26 @@ test_expect_success PIPE 'streaming support for --stdin' ' echo "$response" | grep "^:: two" ' +test_expect_success 'existing file and directory' ' + test_when_finished "rm one" && + test_when_finished "rmdir top-level-dir" && + >one && + mkdir top-level-dir && + git check-ignore one top-level-dir >actual && + grep one actual && + grep top-level-dir actual +' + +test_expect_success 'existing directory and file' ' + test_when_finished "rm one" && + test_when_finished "rmdir top-level-dir" && + >one && + mkdir top-level-dir && + git check-ignore top-level-dir one >actual && + grep one actual && + grep top-level-dir actual +' + ############################################################################ # # test whitespace handling diff --git a/t/t0050-filesystem.sh b/t/t0050-filesystem.sh index b29d749bb7..192c94eccd 100755 --- a/t/t0050-filesystem.sh +++ b/t/t0050-filesystem.sh @@ -80,7 +80,21 @@ test_expect_success 'merge (case change)' ' git merge topic ' - +test_expect_success CASE_INSENSITIVE_FS 'add directory (with different case)' ' + git reset --hard initial && + mkdir -p dir1/dir2 && + echo >dir1/dir2/a && + echo >dir1/dir2/b && + git add dir1/dir2/a && + git add dir1/DIR2/b && + git ls-files >actual && + cat >expected <<-\EOF && + camelcase + dir1/dir2/a + dir1/dir2/b + EOF + test_cmp expected actual +' test_expect_failure CASE_INSENSITIVE_FS 'add (with different case)' ' git reset --hard initial && diff --git a/t/t0061-run-command.sh b/t/t0061-run-command.sh index e4739170aa..24c92b6cd7 100755 --- a/t/t0061-run-command.sh +++ b/t/t0061-run-command.sh @@ -141,4 +141,41 @@ test_expect_success 'run_command outputs ' ' test_cmp expect actual ' +test_trace () { + expect="$1" + shift + GIT_TRACE=1 test-run-command "$@" run-command true 2>&1 >/dev/null | \ + sed 's/.* run_command: //' >actual && + echo "$expect true" >expect && + test_cmp expect actual +} + +test_expect_success 'GIT_TRACE with environment variables' ' + test_trace "abc=1 def=2" env abc=1 env def=2 && + test_trace "abc=2" env abc env abc=1 env abc=2 && + test_trace "abc=2" env abc env abc=2 && + ( + abc=1 && export abc && + test_trace "def=1" env abc=1 env def=1 + ) && + ( + abc=1 && export abc && + test_trace "def=1" env abc env abc=1 env def=1 + ) && + test_trace "def=1" env non-exist env def=1 && + test_trace "abc=2" env abc=1 env abc env abc=2 && + ( + abc=1 def=2 && export abc def && + test_trace "unset abc def;" env abc env def + ) && + ( + abc=1 def=2 && export abc def && + test_trace "unset def; abc=3" env abc env def env abc=3 + ) && + ( + abc=1 && export abc && + test_trace "unset abc;" env abc=2 env abc + ) +' + test_done diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh new file mode 100755 index 0000000000..cc18b75c03 --- /dev/null +++ b/t/t0410-partial-clone.sh @@ -0,0 +1,343 @@ +#!/bin/sh + +test_description='partial clone' + +. ./test-lib.sh + +delete_object () { + rm $1/.git/objects/$(echo $2 | sed -e 's|^..|&/|') +} + +pack_as_from_promisor () { + HASH=$(git -C repo pack-objects .git/objects/pack/pack) && + >repo/.git/objects/pack/pack-$HASH.promisor && + echo $HASH +} + +promise_and_delete () { + HASH=$(git -C repo rev-parse "$1") && + git -C repo tag -a -m message my_annotated_tag "$HASH" && + git -C repo rev-parse my_annotated_tag | pack_as_from_promisor && + # tag -d prints a message to stdout, so redirect it + git -C repo tag -d my_annotated_tag >/dev/null && + delete_object repo "$HASH" +} + +test_expect_success 'missing reflog object, but promised by a commit, passes fsck' ' + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + C=$(git -C repo commit-tree -m c -p $A HEAD^{tree}) && + + # Reference $A only from reflog, and delete it + git -C repo branch my_branch "$A" && + git -C repo branch -f my_branch my_commit && + delete_object repo "$A" && + + # State that we got $C, which refers to $A, from promisor + printf "$C\n" | pack_as_from_promisor && + + # Normally, it fails + test_must_fail git -C repo fsck && + + # But with the extension, it succeeds + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck +' + +test_expect_success 'missing reflog object, but promised by a tag, passes fsck' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + git -C repo tag -a -m d my_tag_name $A && + T=$(git -C repo rev-parse my_tag_name) && + git -C repo tag -d my_tag_name && + + # Reference $A only from reflog, and delete it + git -C repo branch my_branch "$A" && + git -C repo branch -f my_branch my_commit && + delete_object repo "$A" && + + # State that we got $T, which refers to $A, from promisor + printf "$T\n" | pack_as_from_promisor && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck +' + +test_expect_success 'missing reflog object alone fails fsck, even with extension set' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + B=$(git -C repo commit-tree -m b HEAD^{tree}) && + + # Reference $A only from reflog, and delete it + git -C repo branch my_branch "$A" && + git -C repo branch -f my_branch my_commit && + delete_object repo "$A" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + test_must_fail git -C repo fsck +' + +test_expect_success 'missing ref object, but promised, passes fsck' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + + # Reference $A only from ref + git -C repo branch my_branch "$A" && + promise_and_delete "$A" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck +' + +test_expect_success 'missing object, but promised, passes fsck' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo 1 && + test_commit -C repo 2 && + test_commit -C repo 3 && + git -C repo tag -a annotated_tag -m "annotated tag" && + + C=$(git -C repo rev-parse 1) && + T=$(git -C repo rev-parse 2^{tree}) && + B=$(git hash-object repo/3.t) && + AT=$(git -C repo rev-parse annotated_tag) && + + promise_and_delete "$C" && + promise_and_delete "$T" && + promise_and_delete "$B" && + promise_and_delete "$AT" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck +' + +test_expect_success 'missing CLI object, but promised, passes fsck' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + A=$(git -C repo commit-tree -m a HEAD^{tree}) && + promise_and_delete "$A" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo fsck "$A" +' + +test_expect_success 'fetching of missing objects' ' + rm -rf repo && + test_create_repo server && + test_commit -C server foo && + git -C server repack -a -d --write-bitmap-index && + + git clone "file://$(pwd)/server" repo && + HASH=$(git -C repo rev-parse foo) && + rm -rf repo/.git/objects/* && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "origin" && + git -C repo cat-file -p "$HASH" && + + # Ensure that the .promisor file is written, and check that its + # associated packfile contains the object + ls repo/.git/objects/pack/pack-*.promisor >promisorlist && + test_line_count = 1 promisorlist && + IDX=$(cat promisorlist | sed "s/promisor$/idx/") && + git verify-pack --verbose "$IDX" | grep "$HASH" +' + +test_expect_success 'rev-list stops traversal at missing and promised commit' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo foo && + test_commit -C repo bar && + + FOO=$(git -C repo rev-parse foo) && + promise_and_delete "$FOO" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects bar >out && + grep $(git -C repo rev-parse bar) out && + ! grep $FOO out +' + +test_expect_success 'rev-list stops traversal at missing and promised tree' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo foo && + mkdir repo/a_dir && + echo something >repo/a_dir/something && + git -C repo add a_dir/something && + git -C repo commit -m bar && + + # foo^{tree} (tree referenced from commit) + TREE=$(git -C repo rev-parse foo^{tree}) && + + # a tree referenced by HEAD^{tree} (tree referenced from tree) + TREE2=$(git -C repo ls-tree HEAD^{tree} | grep " tree " | head -1 | cut -b13-52) && + + promise_and_delete "$TREE" && + promise_and_delete "$TREE2" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects HEAD >out && + grep $(git -C repo rev-parse foo) out && + ! grep $TREE out && + grep $(git -C repo rev-parse HEAD) out && + ! grep $TREE2 out +' + +test_expect_success 'rev-list stops traversal at missing and promised blob' ' + rm -rf repo && + test_create_repo repo && + echo something >repo/something && + git -C repo add something && + git -C repo commit -m foo && + + BLOB=$(git -C repo hash-object -w something) && + promise_and_delete "$BLOB" && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects HEAD >out && + grep $(git -C repo rev-parse HEAD) out && + ! grep $BLOB out +' + +test_expect_success 'rev-list stops traversal at promisor commit, tree, and blob' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo foo && + test_commit -C repo bar && + test_commit -C repo baz && + + COMMIT=$(git -C repo rev-parse foo) && + TREE=$(git -C repo rev-parse bar^{tree}) && + BLOB=$(git hash-object repo/baz.t) && + printf "%s\n%s\n%s\n" $COMMIT $TREE $BLOB | pack_as_from_promisor && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects HEAD >out && + ! grep $COMMIT out && + ! grep $TREE out && + ! grep $BLOB out && + grep $(git -C repo rev-parse bar) out # sanity check that some walking was done +' + +test_expect_success 'rev-list accepts missing and promised objects on command line' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo foo && + test_commit -C repo bar && + test_commit -C repo baz && + + COMMIT=$(git -C repo rev-parse foo) && + TREE=$(git -C repo rev-parse bar^{tree}) && + BLOB=$(git hash-object repo/baz.t) && + + promise_and_delete $COMMIT && + promise_and_delete $TREE && + promise_and_delete $BLOB && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo rev-list --exclude-promisor-objects --objects "$COMMIT" "$TREE" "$BLOB" +' + +test_expect_success 'gc does not repack promisor objects' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) && + HASH=$(printf "$TREE_HASH\n" | pack_as_from_promisor) && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo gc && + + # Ensure that the promisor packfile still exists, and remove it + test -e repo/.git/objects/pack/pack-$HASH.pack && + rm repo/.git/objects/pack/pack-$HASH.* && + + # Ensure that the single other pack contains the commit, but not the tree + ls repo/.git/objects/pack/pack-*.pack >packlist && + test_line_count = 1 packlist && + git verify-pack repo/.git/objects/pack/pack-*.pack -v >out && + grep "$(git -C repo rev-parse HEAD)" out && + ! grep "$TREE_HASH" out +' + +test_expect_success 'gc stops traversal when a missing but promised object is reached' ' + rm -rf repo && + test_create_repo repo && + test_commit -C repo my_commit && + + TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) && + HASH=$(promise_and_delete $TREE_HASH) && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "arbitrary string" && + git -C repo gc && + + # Ensure that the promisor packfile still exists, and remove it + test -e repo/.git/objects/pack/pack-$HASH.pack && + rm repo/.git/objects/pack/pack-$HASH.* && + + # Ensure that the single other pack contains the commit, but not the tree + ls repo/.git/objects/pack/pack-*.pack >packlist && + test_line_count = 1 packlist && + git verify-pack repo/.git/objects/pack/pack-*.pack -v >out && + grep "$(git -C repo rev-parse HEAD)" out && + ! grep "$TREE_HASH" out +' + +LIB_HTTPD_PORT=12345 # default port, 410, cannot be used as non-root +. "$TEST_DIRECTORY"/lib-httpd.sh +start_httpd + +test_expect_success 'fetching of missing objects from an HTTP server' ' + rm -rf repo && + SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" && + test_create_repo "$SERVER" && + test_commit -C "$SERVER" foo && + git -C "$SERVER" repack -a -d --write-bitmap-index && + + git clone $HTTPD_URL/smart/server repo && + HASH=$(git -C repo rev-parse foo) && + rm -rf repo/.git/objects/* && + + git -C repo config core.repositoryformatversion 1 && + git -C repo config extensions.partialclone "origin" && + git -C repo cat-file -p "$HASH" && + + # Ensure that the .promisor file is written, and check that its + # associated packfile contains the object + ls repo/.git/objects/pack/pack-*.promisor >promisorlist && + test_line_count = 1 promisorlist && + IDX=$(cat promisorlist | sed "s/promisor$/idx/") && + git verify-pack --verbose "$IDX" | grep "$HASH" +' + +stop_httpd + +test_done diff --git a/t/t1300-repo-config.sh b/t/t1300-repo-config.sh index cbeb9bebee..4f8e6f5fde 100755 --- a/t/t1300-repo-config.sh +++ b/t/t1300-repo-config.sh @@ -1206,6 +1206,29 @@ test_expect_success 'git -c is not confused by empty environment' ' GIT_CONFIG_PARAMETERS="" git -c x.one=1 config --list ' +sq="'" +test_expect_success 'detect bogus GIT_CONFIG_PARAMETERS' ' + cat >expect <<-\EOF && + env.one one + env.two two + EOF + GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq} ${sq}env.two=two${sq}" \ + git config --get-regexp "env.*" >actual && + test_cmp expect actual && + + cat >expect <<-EOF && + env.one one${sq} + env.two two + EOF + GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq$sq$sq ${sq}env.two=two${sq}" \ + git config --get-regexp "env.*" >actual && + test_cmp expect actual && + + test_must_fail env \ + GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq ${sq}env.two=two${sq}" \ + git config --get-regexp "env.*" +' + test_expect_success 'git config --edit works' ' git config -f tmp test.value no && echo test.value=yes >expect && diff --git a/t/t1506-rev-parse-diagnosis.sh b/t/t1506-rev-parse-diagnosis.sh index 79a0251efa..4ee009da66 100755 --- a/t/t1506-rev-parse-diagnosis.sh +++ b/t/t1506-rev-parse-diagnosis.sh @@ -157,7 +157,7 @@ test_expect_success 'relative path not found' ' test_expect_success 'relative path outside worktree' ' test_must_fail git rev-parse HEAD:../file.txt >output 2>error && test -z "$(cat output)" && - grep "outside repository" error + test_i18ngrep "outside repository" error ' test_expect_success 'relative path when cwd is outside worktree' ' diff --git a/t/t1507-rev-parse-upstream.sh b/t/t1507-rev-parse-upstream.sh index b23c4e3fab..2ce68cc277 100755 --- a/t/t1507-rev-parse-upstream.sh +++ b/t/t1507-rev-parse-upstream.sh @@ -42,7 +42,7 @@ commit_subject () { error_message () { (cd clone && - test_must_fail git rev-parse --verify "$@") + test_must_fail git rev-parse --verify "$@" 2>../error) } test_expect_success '@{upstream} resolves to correct full name' ' @@ -159,8 +159,8 @@ test_expect_success 'branch@{u} error message when no upstream' ' cat >expect <<-EOF && fatal: no upstream configured for branch ${sq}non-tracking${sq} EOF - error_message non-tracking@{u} 2>actual && - test_i18ncmp expect actual + error_message non-tracking@{u} && + test_i18ncmp expect error ' test_expect_success '@{u} error message when no upstream' ' @@ -175,8 +175,8 @@ test_expect_success 'branch@{u} error message with misspelt branch' ' cat >expect <<-EOF && fatal: no such branch: ${sq}no-such-branch${sq} EOF - error_message no-such-branch@{u} 2>actual && - test_i18ncmp expect actual + error_message no-such-branch@{u} && + test_i18ncmp expect error ' test_expect_success '@{u} error message when not on a branch' ' @@ -192,8 +192,8 @@ test_expect_success 'branch@{u} error message if upstream branch not fetched' ' cat >expect <<-EOF && fatal: upstream branch ${sq}refs/heads/side${sq} not stored as a remote-tracking branch EOF - error_message bad-upstream@{u} 2>actual && - test_i18ncmp expect actual + error_message bad-upstream@{u} && + test_i18ncmp expect error ' test_expect_success 'pull works when tracking a local branch' ' diff --git a/t/t1510-repo-setup.sh b/t/t1510-repo-setup.sh index 13ae12dfa7..e6854b828e 100755 --- a/t/t1510-repo-setup.sh +++ b/t/t1510-repo-setup.sh @@ -39,6 +39,10 @@ A few rules for repo setup: 11. When user's cwd is outside worktree, cwd remains unchanged, prefix is NULL. " + +# This test heavily relies on the standard error of nested function calls. +test_untraceable=UnfortunatelyYes + . ./test-lib.sh here=$(pwd) diff --git a/t/t2025-worktree-add.sh b/t/t2025-worktree-add.sh index 2b95944973..d0d2e4f7ec 100755 --- a/t/t2025-worktree-add.sh +++ b/t/t2025-worktree-add.sh @@ -451,32 +451,68 @@ test_expect_success 'git worktree --no-guess-remote option overrides config' ' ' post_checkout_hook () { - test_when_finished "rm -f .git/hooks/post-checkout" && - mkdir -p .git/hooks && - write_script .git/hooks/post-checkout <<-\EOF - echo $* >hook.actual + gitdir=${1:-.git} + test_when_finished "rm -f $gitdir/hooks/post-checkout" && + mkdir -p $gitdir/hooks && + write_script $gitdir/hooks/post-checkout <<-\EOF + { + echo $* + git rev-parse --git-dir --show-toplevel + } >hook.actual EOF } test_expect_success '"add" invokes post-checkout hook (branch)' ' post_checkout_hook && - printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect && + { + echo $_z40 $(git rev-parse HEAD) 1 && + echo $(pwd)/.git/worktrees/gumby && + echo $(pwd)/gumby + } >hook.expect && git worktree add gumby && - test_cmp hook.expect hook.actual + test_cmp hook.expect gumby/hook.actual ' test_expect_success '"add" invokes post-checkout hook (detached)' ' post_checkout_hook && - printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect && + { + echo $_z40 $(git rev-parse HEAD) 1 && + echo $(pwd)/.git/worktrees/grumpy && + echo $(pwd)/grumpy + } >hook.expect && git worktree add --detach grumpy && - test_cmp hook.expect hook.actual + test_cmp hook.expect grumpy/hook.actual ' test_expect_success '"add --no-checkout" suppresses post-checkout hook' ' post_checkout_hook && rm -f hook.actual && git worktree add --no-checkout gloopy && - test_path_is_missing hook.actual + test_path_is_missing gloopy/hook.actual +' + +test_expect_success '"add" in other worktree invokes post-checkout hook' ' + post_checkout_hook && + { + echo $_z40 $(git rev-parse HEAD) 1 && + echo $(pwd)/.git/worktrees/guppy && + echo $(pwd)/guppy + } >hook.expect && + git -C gloopy worktree add --detach ../guppy && + test_cmp hook.expect guppy/hook.actual +' + +test_expect_success '"add" in bare repo invokes post-checkout hook' ' + rm -rf bare && + git clone --bare . bare && + { + echo $_z40 $(git --git-dir=bare rev-parse HEAD) 1 && + echo $(pwd)/bare/worktrees/goozy && + echo $(pwd)/goozy + } >hook.expect && + post_checkout_hook bare && + git -C bare worktree add --detach ../goozy && + test_cmp hook.expect goozy/hook.actual ' test_done diff --git a/t/t2028-worktree-move.sh b/t/t2028-worktree-move.sh index 8298aaf97f..5d5b3632ba 100755 --- a/t/t2028-worktree-move.sh +++ b/t/t2028-worktree-move.sh @@ -7,7 +7,8 @@ test_description='test git worktree move, remove, lock and unlock' test_expect_success 'setup' ' test_commit init && git worktree add source && - git worktree list --porcelain | grep "^worktree" >actual && + git worktree list --porcelain >out && + grep "^worktree" out >actual && cat <<-EOF >expected && worktree $(pwd) worktree $(pwd)/source @@ -59,4 +60,86 @@ test_expect_success 'unlock worktree twice' ' test_path_is_missing .git/worktrees/source/locked ' +test_expect_success 'move non-worktree' ' + mkdir abc && + test_must_fail git worktree move abc def +' + +test_expect_success 'move locked worktree' ' + git worktree lock source && + test_when_finished "git worktree unlock source" && + test_must_fail git worktree move source destination +' + +test_expect_success 'move worktree' ' + toplevel="$(pwd)" && + git worktree move source destination && + test_path_is_missing source && + git worktree list --porcelain >out && + grep "^worktree.*/destination" out && + ! grep "^worktree.*/source" out && + git -C destination log --format=%s >actual2 && + echo init >expected2 && + test_cmp expected2 actual2 +' + +test_expect_success 'move main worktree' ' + test_must_fail git worktree move . def +' + +test_expect_success 'move worktree to another dir' ' + mkdir some-dir && + git worktree move destination some-dir && + test_when_finished "git worktree move some-dir/destination destination" && + test_path_is_missing destination && + git worktree list --porcelain >out && + grep "^worktree.*/some-dir/destination" out && + git -C some-dir/destination log --format=%s >actual2 && + echo init >expected2 && + test_cmp expected2 actual2 +' + +test_expect_success 'remove main worktree' ' + test_must_fail git worktree remove . +' + +test_expect_success 'remove locked worktree' ' + git worktree lock destination && + test_when_finished "git worktree unlock destination" && + test_must_fail git worktree remove destination +' + +test_expect_success 'remove worktree with dirty tracked file' ' + echo dirty >>destination/init.t && + test_when_finished "git -C destination checkout init.t" && + test_must_fail git worktree remove destination +' + +test_expect_success 'remove worktree with untracked file' ' + : >destination/untracked && + test_must_fail git worktree remove destination +' + +test_expect_success 'force remove worktree with untracked file' ' + git worktree remove --force destination && + test_path_is_missing destination +' + +test_expect_success 'remove missing worktree' ' + git worktree add to-be-gone && + test -d .git/worktrees/to-be-gone && + mv to-be-gone gone && + git worktree remove to-be-gone && + test_path_is_missing .git/worktrees/to-be-gone +' + +test_expect_success 'NOT remove missing-but-locked worktree' ' + git worktree add gone-but-locked && + git worktree lock gone-but-locked && + test -d .git/worktrees/gone-but-locked && + mv gone-but-locked really-gone-now && + test_must_fail git worktree remove gone-but-locked && + test_path_is_dir .git/worktrees/gone-but-locked +' + test_done diff --git a/t/t3030-merge-recursive.sh b/t/t3030-merge-recursive.sh index cdc38fe5d1..3563e77b37 100755 --- a/t/t3030-merge-recursive.sh +++ b/t/t3030-merge-recursive.sh @@ -525,20 +525,22 @@ test_expect_success 'merge-recursive w/ empty work tree - ours has rename' ' GIT_INDEX_FILE="$PWD/ours-has-rename-index" && export GIT_INDEX_FILE && mkdir "$GIT_WORK_TREE" && - git read-tree -i -m $c7 && - git update-index --ignore-missing --refresh && - git merge-recursive $c0 -- $c7 $c3 && - git ls-files -s >actual-files - ) 2>actual-err && - >expected-err && + git read-tree -i -m $c7 2>actual-err && + test_must_be_empty actual-err && + git update-index --ignore-missing --refresh 2>actual-err && + test_must_be_empty actual-err && + git merge-recursive $c0 -- $c7 $c3 2>actual-err && + test_must_be_empty actual-err && + git ls-files -s >actual-files 2>actual-err && + test_must_be_empty actual-err + ) && cat >expected-files <<-EOF && 100644 $o3 0 b/c 100644 $o0 0 c 100644 $o0 0 d/e 100644 $o0 0 e EOF - test_cmp expected-files actual-files && - test_cmp expected-err actual-err + test_cmp expected-files actual-files ' test_expect_success 'merge-recursive w/ empty work tree - theirs has rename' ' @@ -548,20 +550,22 @@ test_expect_success 'merge-recursive w/ empty work tree - theirs has rename' ' GIT_INDEX_FILE="$PWD/theirs-has-rename-index" && export GIT_INDEX_FILE && mkdir "$GIT_WORK_TREE" && - git read-tree -i -m $c3 && - git update-index --ignore-missing --refresh && - git merge-recursive $c0 -- $c3 $c7 && - git ls-files -s >actual-files - ) 2>actual-err && - >expected-err && + git read-tree -i -m $c3 2>actual-err && + test_must_be_empty actual-err && + git update-index --ignore-missing --refresh 2>actual-err && + test_must_be_empty actual-err && + git merge-recursive $c0 -- $c3 $c7 2>actual-err && + test_must_be_empty actual-err && + git ls-files -s >actual-files 2>actual-err && + test_must_be_empty actual-err + ) && cat >expected-files <<-EOF && 100644 $o3 0 b/c 100644 $o0 0 c 100644 $o0 0 d/e 100644 $o0 0 e EOF - test_cmp expected-files actual-files && - test_cmp expected-err actual-err + test_cmp expected-files actual-files ' test_expect_success 'merge removes empty directories' ' diff --git a/t/t3070-wildmatch.sh b/t/t3070-wildmatch.sh index 163a14a1c2..c1fc6ca730 100755 --- a/t/t3070-wildmatch.sh +++ b/t/t3070-wildmatch.sh @@ -4,266 +4,431 @@ test_description='wildmatch tests' . ./test-lib.sh -match() { - if [ $1 = 1 ]; then - test_expect_success "wildmatch: match '$3' '$4'" " - test-wildmatch wildmatch '$3' '$4' - " - else - test_expect_success "wildmatch: no match '$3' '$4'" " - ! test-wildmatch wildmatch '$3' '$4' - " - fi +should_create_test_file() { + file=$1 + + case $file in + # `touch .` will succeed but obviously not do what we intend + # here. + ".") + return 1 + ;; + # We cannot create a file with an empty filename. + "") + return 1 + ;; + # The tests that are testing that e.g. foo//bar is matched by + # foo/*/bar can't be tested on filesystems since there's no + # way we're getting a double slash. + *//*) + return 1 + ;; + # When testing the difference between foo/bar and foo/bar/ we + # can't test the latter. + */) + return 1 + ;; + # On Windows, \ in paths is silently converted to /, which + # would result in the "touch" below working, but the test + # itself failing. See 6fd1106aa4 ("t3700: Skip a test with + # backslashes in pathspec", 2009-03-13) for prior art and + # details. + *\\*) + if ! test_have_prereq BSLASHPSPEC + then + return 1 + fi + # NOTE: The ;;& bash extension is not portable, so + # this test needs to be at the end of the pattern + # list. + # + # If we want to add more conditional returns we either + # need a new case statement, or turn this whole thing + # into a series of "if" tests. + ;; + esac + + + # On Windows proper (i.e. not Cygwin) many file names which + # under Cygwin would be emulated don't work. + if test_have_prereq MINGW + then + case $file in + " ") + # Files called " " are forbidden on Windows + return 1 + ;; + *\<*|*\>*|*:*|*\"*|*\|*|*\?*|*\**) + # Files with various special characters aren't + # allowed on Windows. Sourced from + # https://stackoverflow.com/a/31976060 + return 1 + ;; + esac + fi + + return 0 } -imatch() { - if [ $1 = 1 ]; then - test_expect_success "iwildmatch: match '$2' '$3'" " - test-wildmatch iwildmatch '$2' '$3' - " - else - test_expect_success "iwildmatch: no match '$2' '$3'" " - ! test-wildmatch iwildmatch '$2' '$3' - " - fi +match_with_function() { + text=$1 + pattern=$2 + match_expect=$3 + match_function=$4 + + if test "$match_expect" = 1 + then + test_expect_success "$match_function: match '$text' '$pattern'" " + test-wildmatch $match_function '$text' '$pattern' + " + elif test "$match_expect" = 0 + then + test_expect_success "$match_function: no match '$text' '$pattern'" " + test_must_fail test-wildmatch $match_function '$text' '$pattern' + " + else + test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false' + fi + +} + +match_with_ls_files() { + text=$1 + pattern=$2 + match_expect=$3 + match_function=$4 + ls_files_args=$5 + + match_stdout_stderr_cmp=" + tr -d '\0' <actual.raw >actual && + >expect.err && + test_cmp expect.err actual.err && + test_cmp expect actual" + + if test "$match_expect" = 'E' + then + if test -e .git/created_test_file + then + test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match dies on '$pattern' '$text'" " + printf '%s' '$text' >expect && + test_must_fail git$ls_files_args ls-files -z -- '$pattern' + " + else + test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false' + fi + elif test "$match_expect" = 1 + then + if test -e .git/created_test_file + then + test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match '$pattern' '$text'" " + printf '%s' '$text' >expect && + git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err && + $match_stdout_stderr_cmp + " + else + test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false' + fi + elif test "$match_expect" = 0 + then + if test -e .git/created_test_file + then + test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match '$pattern' '$text'" " + >expect && + git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err && + $match_stdout_stderr_cmp + " + else + test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match skip '$pattern' '$text'" 'false' + fi + else + test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false' + fi } -pathmatch() { - if [ $1 = 1 ]; then - test_expect_success "pathmatch: match '$2' '$3'" " - test-wildmatch pathmatch '$2' '$3' - " - else - test_expect_success "pathmatch: no match '$2' '$3'" " - ! test-wildmatch pathmatch '$2' '$3' - " - fi +match() { + if test "$#" = 6 + then + # When test-wildmatch and git ls-files produce the same + # result. + match_glob=$1 + match_file_glob=$match_glob + match_iglob=$2 + match_file_iglob=$match_iglob + match_pathmatch=$3 + match_file_pathmatch=$match_pathmatch + match_pathmatchi=$4 + match_file_pathmatchi=$match_pathmatchi + text=$5 + pattern=$6 + elif test "$#" = 10 + then + match_glob=$1 + match_iglob=$2 + match_pathmatch=$3 + match_pathmatchi=$4 + match_file_glob=$5 + match_file_iglob=$6 + match_file_pathmatch=$7 + match_file_pathmatchi=$8 + text=$9 + pattern=${10} + fi + + test_expect_success EXPENSIVE_ON_WINDOWS 'cleanup after previous file test' ' + if test -e .git/created_test_file + then + git reset && + git clean -df + fi + ' + + printf '%s' "$text" >.git/expected_test_file + + test_expect_success EXPENSIVE_ON_WINDOWS "setup match file test for $text" ' + file=$(cat .git/expected_test_file) && + if should_create_test_file "$file" + then + dirs=${file%/*} + if test "$file" != "$dirs" + then + mkdir -p -- "$dirs" && + touch -- "./$text" + else + touch -- "./$file" + fi && + git add -A && + printf "%s" "$file" >.git/created_test_file + elif test -e .git/created_test_file + then + rm .git/created_test_file + fi + ' + + # $1: Case sensitive glob match: test-wildmatch & ls-files + match_with_function "$text" "$pattern" $match_glob "wildmatch" + match_with_ls_files "$text" "$pattern" $match_file_glob "wildmatch" " --glob-pathspecs" + + # $2: Case insensitive glob match: test-wildmatch & ls-files + match_with_function "$text" "$pattern" $match_iglob "iwildmatch" + match_with_ls_files "$text" "$pattern" $match_file_iglob "iwildmatch" " --glob-pathspecs --icase-pathspecs" + + # $3: Case sensitive path match: test-wildmatch & ls-files + match_with_function "$text" "$pattern" $match_pathmatch "pathmatch" + match_with_ls_files "$text" "$pattern" $match_file_pathmatch "pathmatch" "" + + # $4: Case insensitive path match: test-wildmatch & ls-files + match_with_function "$text" "$pattern" $match_pathmatchi "ipathmatch" + match_with_ls_files "$text" "$pattern" $match_file_pathmatchi "ipathmatch" " --icase-pathspecs" } -# Basic wildmat features -match 1 1 foo foo -match 0 0 foo bar -match 1 1 '' "" -match 1 1 foo '???' -match 0 0 foo '??' -match 1 1 foo '*' -match 1 1 foo 'f*' -match 0 0 foo '*f' -match 1 1 foo '*foo*' -match 1 1 foobar '*ob*a*r*' -match 1 1 aaaaaaabababab '*ab' -match 1 1 'foo*' 'foo\*' -match 0 0 foobar 'foo\*bar' -match 1 1 'f\oo' 'f\\oo' -match 1 1 ball '*[al]?' -match 0 0 ten '[ten]' -match 0 1 ten '**[!te]' -match 0 0 ten '**[!ten]' -match 1 1 ten 't[a-g]n' -match 0 0 ten 't[!a-g]n' -match 1 1 ton 't[!a-g]n' -match 1 1 ton 't[^a-g]n' -match 1 x 'a]b' 'a[]]b' -match 1 x a-b 'a[]-]b' -match 1 x 'a]b' 'a[]-]b' -match 0 x aab 'a[]-]b' -match 1 x aab 'a[]a-]b' -match 1 1 ']' ']' +# Basic wildmatch features +match 1 1 1 1 foo foo +match 0 0 0 0 foo bar +match 1 1 1 1 '' "" +match 1 1 1 1 foo '???' +match 0 0 0 0 foo '??' +match 1 1 1 1 foo '*' +match 1 1 1 1 foo 'f*' +match 0 0 0 0 foo '*f' +match 1 1 1 1 foo '*foo*' +match 1 1 1 1 foobar '*ob*a*r*' +match 1 1 1 1 aaaaaaabababab '*ab' +match 1 1 1 1 'foo*' 'foo\*' +match 0 0 0 0 foobar 'foo\*bar' +match 1 1 1 1 'f\oo' 'f\\oo' +match 1 1 1 1 ball '*[al]?' +match 0 0 0 0 ten '[ten]' +match 0 0 1 1 ten '**[!te]' +match 0 0 0 0 ten '**[!ten]' +match 1 1 1 1 ten 't[a-g]n' +match 0 0 0 0 ten 't[!a-g]n' +match 1 1 1 1 ton 't[!a-g]n' +match 1 1 1 1 ton 't[^a-g]n' +match 1 1 1 1 'a]b' 'a[]]b' +match 1 1 1 1 a-b 'a[]-]b' +match 1 1 1 1 'a]b' 'a[]-]b' +match 0 0 0 0 aab 'a[]-]b' +match 1 1 1 1 aab 'a[]a-]b' +match 1 1 1 1 ']' ']' # Extended slash-matching features -match 0 0 'foo/baz/bar' 'foo*bar' -match 0 0 'foo/baz/bar' 'foo**bar' -match 0 1 'foobazbar' 'foo**bar' -match 1 1 'foo/baz/bar' 'foo/**/bar' -match 1 0 'foo/baz/bar' 'foo/**/**/bar' -match 1 0 'foo/b/a/z/bar' 'foo/**/bar' -match 1 0 'foo/b/a/z/bar' 'foo/**/**/bar' -match 1 0 'foo/bar' 'foo/**/bar' -match 1 0 'foo/bar' 'foo/**/**/bar' -match 0 0 'foo/bar' 'foo?bar' -match 0 0 'foo/bar' 'foo[/]bar' -match 0 0 'foo/bar' 'foo[^a-z]bar' -match 0 0 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r' -match 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r' -match 1 0 'foo' '**/foo' -match 1 x 'XXX/foo' '**/foo' -match 1 0 'bar/baz/foo' '**/foo' -match 0 0 'bar/baz/foo' '*/foo' -match 0 0 'foo/bar/baz' '**/bar*' -match 1 0 'deep/foo/bar/baz' '**/bar/*' -match 0 0 'deep/foo/bar/baz/' '**/bar/*' -match 1 0 'deep/foo/bar/baz/' '**/bar/**' -match 0 0 'deep/foo/bar' '**/bar/*' -match 1 0 'deep/foo/bar/' '**/bar/**' -match 0 0 'foo/bar/baz' '**/bar**' -match 1 0 'foo/bar/baz/x' '*/bar/**' -match 0 0 'deep/foo/bar/baz/x' '*/bar/**' -match 1 0 'deep/foo/bar/baz/x' '**/bar/*/*' +match 0 0 1 1 'foo/baz/bar' 'foo*bar' +match 0 0 1 1 'foo/baz/bar' 'foo**bar' +match 0 0 1 1 'foobazbar' 'foo**bar' +match 1 1 1 1 'foo/baz/bar' 'foo/**/bar' +match 1 1 0 0 'foo/baz/bar' 'foo/**/**/bar' +match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/bar' +match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/**/bar' +match 1 1 0 0 'foo/bar' 'foo/**/bar' +match 1 1 0 0 'foo/bar' 'foo/**/**/bar' +match 0 0 1 1 'foo/bar' 'foo?bar' +match 0 0 1 1 'foo/bar' 'foo[/]bar' +match 0 0 1 1 'foo/bar' 'foo[^a-z]bar' +match 0 0 1 1 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r' +match 1 1 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r' +match 1 1 0 0 'foo' '**/foo' +match 1 1 1 1 'XXX/foo' '**/foo' +match 1 1 1 1 'bar/baz/foo' '**/foo' +match 0 0 1 1 'bar/baz/foo' '*/foo' +match 0 0 1 1 'foo/bar/baz' '**/bar*' +match 1 1 1 1 'deep/foo/bar/baz' '**/bar/*' +match 0 0 1 1 'deep/foo/bar/baz/' '**/bar/*' +match 1 1 1 1 'deep/foo/bar/baz/' '**/bar/**' +match 0 0 0 0 'deep/foo/bar' '**/bar/*' +match 1 1 1 1 'deep/foo/bar/' '**/bar/**' +match 0 0 1 1 'foo/bar/baz' '**/bar**' +match 1 1 1 1 'foo/bar/baz/x' '*/bar/**' +match 0 0 1 1 'deep/foo/bar/baz/x' '*/bar/**' +match 1 1 1 1 'deep/foo/bar/baz/x' '**/bar/*/*' # Various additional tests -match 0 0 'acrt' 'a[c-c]st' -match 1 1 'acrt' 'a[c-c]rt' -match 0 0 ']' '[!]-]' -match 1 x 'a' '[!]-]' -match 0 0 '' '\' -match 0 x '\' '\' -match 0 x 'XXX/\' '*/\' -match 1 x 'XXX/\' '*/\\' -match 1 1 'foo' 'foo' -match 1 1 '@foo' '@foo' -match 0 0 'foo' '@foo' -match 1 1 '[ab]' '\[ab]' -match 1 1 '[ab]' '[[]ab]' -match 1 x '[ab]' '[[:]ab]' -match 0 x '[ab]' '[[::]ab]' -match 1 x '[ab]' '[[:digit]ab]' -match 1 x '[ab]' '[\[:]ab]' -match 1 1 '?a?b' '\??\?b' -match 1 1 'abc' '\a\b\c' -match 0 0 'foo' '' -match 1 0 'foo/bar/baz/to' '**/t[o]' +match 0 0 0 0 'acrt' 'a[c-c]st' +match 1 1 1 1 'acrt' 'a[c-c]rt' +match 0 0 0 0 ']' '[!]-]' +match 1 1 1 1 'a' '[!]-]' +match 0 0 0 0 '' '\' +match 0 0 0 0 \ + 1 1 1 1 '\' '\' +match 0 0 0 0 'XXX/\' '*/\' +match 1 1 1 1 'XXX/\' '*/\\' +match 1 1 1 1 'foo' 'foo' +match 1 1 1 1 '@foo' '@foo' +match 0 0 0 0 'foo' '@foo' +match 1 1 1 1 '[ab]' '\[ab]' +match 1 1 1 1 '[ab]' '[[]ab]' +match 1 1 1 1 '[ab]' '[[:]ab]' +match 0 0 0 0 '[ab]' '[[::]ab]' +match 1 1 1 1 '[ab]' '[[:digit]ab]' +match 1 1 1 1 '[ab]' '[\[:]ab]' +match 1 1 1 1 '?a?b' '\??\?b' +match 1 1 1 1 'abc' '\a\b\c' +match 0 0 0 0 \ + E E E E 'foo' '' +match 1 1 1 1 'foo/bar/baz/to' '**/t[o]' # Character class tests -match 1 x 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]' -match 0 x 'a' '[[:digit:][:upper:][:space:]]' -match 1 x 'A' '[[:digit:][:upper:][:space:]]' -match 1 x '1' '[[:digit:][:upper:][:space:]]' -match 0 x '1' '[[:digit:][:upper:][:spaci:]]' -match 1 x ' ' '[[:digit:][:upper:][:space:]]' -match 0 x '.' '[[:digit:][:upper:][:space:]]' -match 1 x '.' '[[:digit:][:punct:][:space:]]' -match 1 x '5' '[[:xdigit:]]' -match 1 x 'f' '[[:xdigit:]]' -match 1 x 'D' '[[:xdigit:]]' -match 1 x '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]' -match 1 x '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]' -match 1 x '5' '[a-c[:digit:]x-z]' -match 1 x 'b' '[a-c[:digit:]x-z]' -match 1 x 'y' '[a-c[:digit:]x-z]' -match 0 x 'q' '[a-c[:digit:]x-z]' - -# Additional tests, including some malformed wildmats -match 1 x ']' '[\\-^]' -match 0 0 '[' '[\\-^]' -match 1 x '-' '[\-_]' -match 1 x ']' '[\]]' -match 0 0 '\]' '[\]]' -match 0 0 '\' '[\]]' -match 0 0 'ab' 'a[]b' -match 0 x 'a[]b' 'a[]b' -match 0 x 'ab[' 'ab[' -match 0 0 'ab' '[!' -match 0 0 'ab' '[-' -match 1 1 '-' '[-]' -match 0 0 '-' '[a-' -match 0 0 '-' '[!a-' -match 1 x '-' '[--A]' -match 1 x '5' '[--A]' -match 1 1 ' ' '[ --]' -match 1 1 '$' '[ --]' -match 1 1 '-' '[ --]' -match 0 0 '0' '[ --]' -match 1 x '-' '[---]' -match 1 x '-' '[------]' -match 0 0 'j' '[a-e-n]' -match 1 x '-' '[a-e-n]' -match 1 x 'a' '[!------]' -match 0 0 '[' '[]-a]' -match 1 x '^' '[]-a]' -match 0 0 '^' '[!]-a]' -match 1 x '[' '[!]-a]' -match 1 1 '^' '[a^bc]' -match 1 x '-b]' '[a-]b]' -match 0 0 '\' '[\]' -match 1 1 '\' '[\\]' -match 0 0 '\' '[!\\]' -match 1 1 'G' '[A-\\]' -match 0 0 'aaabbb' 'b*a' -match 0 0 'aabcaa' '*ba*' -match 1 1 ',' '[,]' -match 1 1 ',' '[\\,]' -match 1 1 '\' '[\\,]' -match 1 1 '-' '[,-.]' -match 0 0 '+' '[,-.]' -match 0 0 '-.]' '[,-.]' -match 1 1 '2' '[\1-\3]' -match 1 1 '3' '[\1-\3]' -match 0 0 '4' '[\1-\3]' -match 1 1 '\' '[[-\]]' -match 1 1 '[' '[[-\]]' -match 1 1 ']' '[[-\]]' -match 0 0 '-' '[[-\]]' +match 1 1 1 1 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]' +match 0 1 0 1 'a' '[[:digit:][:upper:][:space:]]' +match 1 1 1 1 'A' '[[:digit:][:upper:][:space:]]' +match 1 1 1 1 '1' '[[:digit:][:upper:][:space:]]' +match 0 0 0 0 '1' '[[:digit:][:upper:][:spaci:]]' +match 1 1 1 1 ' ' '[[:digit:][:upper:][:space:]]' +match 0 0 0 0 '.' '[[:digit:][:upper:][:space:]]' +match 1 1 1 1 '.' '[[:digit:][:punct:][:space:]]' +match 1 1 1 1 '5' '[[:xdigit:]]' +match 1 1 1 1 'f' '[[:xdigit:]]' +match 1 1 1 1 'D' '[[:xdigit:]]' +match 1 1 1 1 '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]' +match 1 1 1 1 '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]' +match 1 1 1 1 '5' '[a-c[:digit:]x-z]' +match 1 1 1 1 'b' '[a-c[:digit:]x-z]' +match 1 1 1 1 'y' '[a-c[:digit:]x-z]' +match 0 0 0 0 'q' '[a-c[:digit:]x-z]' -# Test recursion and the abort code (use "wildtest -i" to see iteration counts) -match 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' -match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' -match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' -match 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*' -match 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*' -match 1 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t' -match 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t' -match 0 x foo '*/*/*' -match 0 x foo/bar '*/*/*' -match 1 x foo/bba/arr '*/*/*' -match 0 x foo/bb/aa/rr '*/*/*' -match 1 x foo/bb/aa/rr '**/**/**' -match 1 x abcXdefXghi '*X*i' -match 0 x ab/cXd/efXg/hi '*X*i' -match 1 x ab/cXd/efXg/hi '*/*X*/*/*i' -match 1 x ab/cXd/efXg/hi '**/*X*/**/*i' +# Additional tests, including some malformed wildmatch patterns +match 1 1 1 1 ']' '[\\-^]' +match 0 0 0 0 '[' '[\\-^]' +match 1 1 1 1 '-' '[\-_]' +match 1 1 1 1 ']' '[\]]' +match 0 0 0 0 '\]' '[\]]' +match 0 0 0 0 '\' '[\]]' +match 0 0 0 0 'ab' 'a[]b' +match 0 0 0 0 \ + 1 1 1 1 'a[]b' 'a[]b' +match 0 0 0 0 \ + 1 1 1 1 'ab[' 'ab[' +match 0 0 0 0 'ab' '[!' +match 0 0 0 0 'ab' '[-' +match 1 1 1 1 '-' '[-]' +match 0 0 0 0 '-' '[a-' +match 0 0 0 0 '-' '[!a-' +match 1 1 1 1 '-' '[--A]' +match 1 1 1 1 '5' '[--A]' +match 1 1 1 1 ' ' '[ --]' +match 1 1 1 1 '$' '[ --]' +match 1 1 1 1 '-' '[ --]' +match 0 0 0 0 '0' '[ --]' +match 1 1 1 1 '-' '[---]' +match 1 1 1 1 '-' '[------]' +match 0 0 0 0 'j' '[a-e-n]' +match 1 1 1 1 '-' '[a-e-n]' +match 1 1 1 1 'a' '[!------]' +match 0 0 0 0 '[' '[]-a]' +match 1 1 1 1 '^' '[]-a]' +match 0 0 0 0 '^' '[!]-a]' +match 1 1 1 1 '[' '[!]-a]' +match 1 1 1 1 '^' '[a^bc]' +match 1 1 1 1 '-b]' '[a-]b]' +match 0 0 0 0 '\' '[\]' +match 1 1 1 1 '\' '[\\]' +match 0 0 0 0 '\' '[!\\]' +match 1 1 1 1 'G' '[A-\\]' +match 0 0 0 0 'aaabbb' 'b*a' +match 0 0 0 0 'aabcaa' '*ba*' +match 1 1 1 1 ',' '[,]' +match 1 1 1 1 ',' '[\\,]' +match 1 1 1 1 '\' '[\\,]' +match 1 1 1 1 '-' '[,-.]' +match 0 0 0 0 '+' '[,-.]' +match 0 0 0 0 '-.]' '[,-.]' +match 1 1 1 1 '2' '[\1-\3]' +match 1 1 1 1 '3' '[\1-\3]' +match 0 0 0 0 '4' '[\1-\3]' +match 1 1 1 1 '\' '[[-\]]' +match 1 1 1 1 '[' '[[-\]]' +match 1 1 1 1 ']' '[[-\]]' +match 0 0 0 0 '-' '[[-\]]' -pathmatch 1 foo foo -pathmatch 0 foo fo -pathmatch 1 foo/bar foo/bar -pathmatch 1 foo/bar 'foo/*' -pathmatch 1 foo/bba/arr 'foo/*' -pathmatch 1 foo/bba/arr 'foo/**' -pathmatch 1 foo/bba/arr 'foo*' -pathmatch 1 foo/bba/arr 'foo**' -pathmatch 1 foo/bba/arr 'foo/*arr' -pathmatch 1 foo/bba/arr 'foo/**arr' -pathmatch 0 foo/bba/arr 'foo/*z' -pathmatch 0 foo/bba/arr 'foo/**z' -pathmatch 1 foo/bar 'foo?bar' -pathmatch 1 foo/bar 'foo[/]bar' -pathmatch 1 foo/bar 'foo[^a-z]bar' -pathmatch 0 foo '*/*/*' -pathmatch 0 foo/bar '*/*/*' -pathmatch 1 foo/bba/arr '*/*/*' -pathmatch 1 foo/bb/aa/rr '*/*/*' -pathmatch 1 abcXdefXghi '*X*i' -pathmatch 1 ab/cXd/efXg/hi '*/*X*/*/*i' -pathmatch 1 ab/cXd/efXg/hi '*Xg*i' +# Test recursion +match 1 1 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' +match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' +match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*' +match 1 1 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*' +match 0 0 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*' +match 1 1 1 1 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t' +match 0 0 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t' +match 0 0 0 0 foo '*/*/*' +match 0 0 0 0 foo/bar '*/*/*' +match 1 1 1 1 foo/bba/arr '*/*/*' +match 0 0 1 1 foo/bb/aa/rr '*/*/*' +match 1 1 1 1 foo/bb/aa/rr '**/**/**' +match 1 1 1 1 abcXdefXghi '*X*i' +match 0 0 1 1 ab/cXd/efXg/hi '*X*i' +match 1 1 1 1 ab/cXd/efXg/hi '*/*X*/*/*i' +match 1 1 1 1 ab/cXd/efXg/hi '**/*X*/**/*i' -# Case-sensitivity features -match 0 x 'a' '[A-Z]' -match 1 x 'A' '[A-Z]' -match 0 x 'A' '[a-z]' -match 1 x 'a' '[a-z]' -match 0 x 'a' '[[:upper:]]' -match 1 x 'A' '[[:upper:]]' -match 0 x 'A' '[[:lower:]]' -match 1 x 'a' '[[:lower:]]' -match 0 x 'A' '[B-Za]' -match 1 x 'a' '[B-Za]' -match 0 x 'A' '[B-a]' -match 1 x 'a' '[B-a]' -match 0 x 'z' '[Z-y]' -match 1 x 'Z' '[Z-y]' +# Extra pathmatch tests +match 0 0 0 0 foo fo +match 1 1 1 1 foo/bar foo/bar +match 1 1 1 1 foo/bar 'foo/*' +match 0 0 1 1 foo/bba/arr 'foo/*' +match 1 1 1 1 foo/bba/arr 'foo/**' +match 0 0 1 1 foo/bba/arr 'foo*' +match 0 0 1 1 \ + 1 1 1 1 foo/bba/arr 'foo**' +match 0 0 1 1 foo/bba/arr 'foo/*arr' +match 0 0 1 1 foo/bba/arr 'foo/**arr' +match 0 0 0 0 foo/bba/arr 'foo/*z' +match 0 0 0 0 foo/bba/arr 'foo/**z' +match 0 0 1 1 foo/bar 'foo?bar' +match 0 0 1 1 foo/bar 'foo[/]bar' +match 0 0 1 1 foo/bar 'foo[^a-z]bar' +match 0 0 1 1 ab/cXd/efXg/hi '*Xg*i' -imatch 1 'a' '[A-Z]' -imatch 1 'A' '[A-Z]' -imatch 1 'A' '[a-z]' -imatch 1 'a' '[a-z]' -imatch 1 'a' '[[:upper:]]' -imatch 1 'A' '[[:upper:]]' -imatch 1 'A' '[[:lower:]]' -imatch 1 'a' '[[:lower:]]' -imatch 1 'A' '[B-Za]' -imatch 1 'a' '[B-Za]' -imatch 1 'A' '[B-a]' -imatch 1 'a' '[B-a]' -imatch 1 'z' '[Z-y]' -imatch 1 'Z' '[Z-y]' +# Extra case-sensitivity tests +match 0 1 0 1 'a' '[A-Z]' +match 1 1 1 1 'A' '[A-Z]' +match 0 1 0 1 'A' '[a-z]' +match 1 1 1 1 'a' '[a-z]' +match 0 1 0 1 'a' '[[:upper:]]' +match 1 1 1 1 'A' '[[:upper:]]' +match 0 1 0 1 'A' '[[:lower:]]' +match 1 1 1 1 'a' '[[:lower:]]' +match 0 1 0 1 'A' '[B-Za]' +match 1 1 1 1 'a' '[B-Za]' +match 0 1 0 1 'A' '[B-a]' +match 1 1 1 1 'a' '[B-a]' +match 0 1 0 1 'z' '[Z-y]' +match 1 1 1 1 'Z' '[Z-y]' test_done diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh index 503a88d029..6c0b7ea4ad 100755 --- a/t/t3200-branch.sh +++ b/t/t3200-branch.sh @@ -528,7 +528,7 @@ test_expect_success 'git branch -c -f o/q o/p should work when o/p exists' ' git branch -c -f o/q o/p ' -test_expect_success 'git branch -c qq rr/qq should fail when r exists' ' +test_expect_success 'git branch -c qq rr/qq should fail when rr exists' ' git branch qq && git branch rr && test_must_fail git branch -c qq rr/qq diff --git a/t/t3400-rebase.sh b/t/t3400-rebase.sh index 8ac58d5ea5..72d9564747 100755 --- a/t/t3400-rebase.sh +++ b/t/t3400-rebase.sh @@ -277,4 +277,38 @@ EOF test_cmp From_.msg out ' +test_expect_success 'rebase--am.sh and --show-current-patch' ' + test_create_repo conflict-apply && + ( + cd conflict-apply && + test_commit init && + echo one >>init.t && + git commit -a -m one && + echo two >>init.t && + git commit -a -m two && + git tag two && + test_must_fail git rebase --onto init HEAD^ && + GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr && + grep "show.*$(git rev-parse two)" stderr + ) +' + +test_expect_success 'rebase--merge.sh and --show-current-patch' ' + test_create_repo conflict-merge && + ( + cd conflict-merge && + test_commit init && + echo one >>init.t && + git commit -a -m one && + echo two >>init.t && + git commit -a -m two && + git tag two && + test_must_fail git rebase --merge --onto init HEAD^ && + git rebase --show-current-patch >actual.patch && + GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr && + grep "show.*REBASE_HEAD" stderr && + test "$(git rev-parse REBASE_HEAD)" = "$(git rev-parse two)" + ) +' + test_done diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh index 481a350090..3b905406df 100755 --- a/t/t3404-rebase-interactive.sh +++ b/t/t3404-rebase-interactive.sh @@ -225,6 +225,14 @@ test_expect_success 'stop on conflicting pick' ' test 0 = $(grep -c "^[^#]" < .git/rebase-merge/git-rebase-todo) ' +test_expect_success 'show conflicted patch' ' + GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr && + grep "show.*REBASE_HEAD" stderr && + # the original stopped-sha1 is abbreviated + stopped_sha1="$(git rev-parse $(cat ".git/rebase-merge/stopped-sha"))" && + test "$(git rev-parse REBASE_HEAD)" = "$stopped_sha1" +' + test_expect_success 'abort' ' git rebase --abort && test $(git rev-parse new-branch1) = $(git rev-parse HEAD) && @@ -453,6 +461,10 @@ test_expect_success C_LOCALE_OUTPUT 'squash and fixup generate correct log messa git rebase -i $base && git cat-file commit HEAD | sed -e 1,/^\$/d > actual-squash-fixup && test_cmp expect-squash-fixup actual-squash-fixup && + git cat-file commit HEAD@{2} | + grep "^# This is a combination of 3 commits\." && + git cat-file commit HEAD@{3} | + grep "^# This is a combination of 2 commits\." && git checkout to-be-rebased && git branch -D squash-fixup ' @@ -1336,6 +1348,16 @@ test_expect_success 'editor saves as CR/LF' ' SQ="'" test_expect_success 'rebase -i --gpg-sign=<key-id>' ' + test_when_finished "test_might_fail git rebase --abort" && + set_fake_editor && + FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \ + >out 2>err && + test_i18ngrep "$SQ-S\"S I Gner\"$SQ" err +' + +test_expect_success 'rebase -i --gpg-sign=<key-id> overrides commit.gpgSign' ' + test_when_finished "test_might_fail git rebase --abort" && + test_config commit.gpgsign true && set_fake_editor && FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \ >out 2>err && diff --git a/t/t3405-rebase-malformed.sh b/t/t3405-rebase-malformed.sh index ff8c360cd5..cb7c6de84a 100755 --- a/t/t3405-rebase-malformed.sh +++ b/t/t3405-rebase-malformed.sh @@ -3,6 +3,7 @@ test_description='rebase should handle arbitrary git message' . ./test-lib.sh +. "$TEST_DIRECTORY"/lib-rebase.sh cat >F <<\EOF This is an example of a commit log message @@ -25,6 +26,7 @@ test_expect_success setup ' test_tick && git commit -m "Initial commit" && git branch diff-in-message && + git branch empty-message-merge && git checkout -b multi-line-subject && cat F >file2 && @@ -45,6 +47,11 @@ test_expect_success setup ' git cat-file commit HEAD | sed -e "1,/^\$/d" >G0 && + git checkout empty-message-merge && + echo file3 >file3 && + git add file3 && + git commit --allow-empty-message -m "" && + git checkout master && echo One >file1 && @@ -69,4 +76,20 @@ test_expect_success 'rebase commit with diff in message' ' test_cmp G G0 ' +test_expect_success 'rebase -m commit with empty message' ' + test_must_fail git rebase -m master empty-message-merge && + git rebase --abort && + git rebase -m --allow-empty-message master empty-message-merge +' + +test_expect_success 'rebase -i commit with empty message' ' + git checkout diff-in-message && + set_fake_editor && + test_must_fail env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \ + git rebase -i HEAD^ && + git rebase --abort && + FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \ + git rebase -i --allow-empty-message HEAD^ +' + test_done diff --git a/t/t3408-rebase-multi-line.sh b/t/t3408-rebase-multi-line.sh index 6b84e6042a..e7292f5b9b 100755 --- a/t/t3408-rebase-multi-line.sh +++ b/t/t3408-rebase-multi-line.sh @@ -24,8 +24,23 @@ But otherwise with a sane description." && >elif && git add elif && test_tick && - git commit -m second + git commit -m second && + git checkout -b side2 && + >afile && + git add afile && + test_tick && + git commit -m third && + echo hello >afile && + test_tick && + git commit -a -m fourth && + git checkout -b side-merge && + git reset --hard HEAD^^ && + git merge --no-ff -m "A merge commit log message that has a long +summary that spills over multiple lines. + +But otherwise with a sane description." side2 && + git branch side-merge-original ' test_expect_success rebase ' @@ -37,5 +52,14 @@ test_expect_success rebase ' test_cmp expect actual ' +test_expect_success rebasep ' + + git checkout side-merge && + git rebase -p side && + git cat-file commit HEAD | sed -e "1,/^\$/d" >actual && + git cat-file commit side-merge-original | sed -e "1,/^\$/d" >expect && + test_cmp expect actual + +' test_done diff --git a/t/t3501-revert-cherry-pick.sh b/t/t3501-revert-cherry-pick.sh index 4f2a263b63..783bdbf59d 100755 --- a/t/t3501-revert-cherry-pick.sh +++ b/t/t3501-revert-cherry-pick.sh @@ -141,7 +141,7 @@ test_expect_success 'cherry-pick "-" works with arguments' ' test_cmp expect actual ' -test_expect_success 'cherry-pick works with dirty renamed file' ' +test_expect_failure 'cherry-pick works with dirty renamed file' ' test_commit to-rename && git checkout -b unrelated && test_commit unrelated && @@ -150,7 +150,10 @@ test_expect_success 'cherry-pick works with dirty renamed file' ' test_tick && git commit -m renamed && echo modified >renamed && - git cherry-pick refs/heads/unrelated + test_must_fail git cherry-pick refs/heads/unrelated >out && + test_i18ngrep "Refusing to lose dirty file at renamed" out && + test $(git rev-parse :0:renamed) = $(git rev-parse HEAD^:to-rename.t) && + grep -q "^modified$" renamed ' test_done diff --git a/t/t3512-cherry-pick-submodule.sh b/t/t3512-cherry-pick-submodule.sh index ce48c4fcca..bd78287841 100755 --- a/t/t3512-cherry-pick-submodule.sh +++ b/t/t3512-cherry-pick-submodule.sh @@ -5,7 +5,6 @@ test_description='cherry-pick can handle submodules' . ./test-lib.sh . "$TEST_DIRECTORY"/lib-submodule-update.sh -KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1 KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1 KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1 test_submodule_switch "git cherry-pick" diff --git a/t/t3513-revert-submodule.sh b/t/t3513-revert-submodule.sh index db9378142a..5e39fcdb66 100755 --- a/t/t3513-revert-submodule.sh +++ b/t/t3513-revert-submodule.sh @@ -25,7 +25,6 @@ git_revert () { git revert HEAD } -KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1 KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1 test_submodule_switch "git_revert" diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh index 058698df6a..b170fb02b8 100755 --- a/t/t3701-add-interactive.sh +++ b/t/t3701-add-interactive.sh @@ -10,6 +10,19 @@ then test_done fi +diff_cmp () { + for x + do + sed -e '/^index/s/[0-9a-f]*[1-9a-f][0-9a-f]*\.\./1234567../' \ + -e '/^index/s/\.\.[0-9a-f]*[1-9a-f][0-9a-f]*/..9abcdef/' \ + -e '/^index/s/ 00*\.\./ 0000000../' \ + -e '/^index/s/\.\.00*$/..0000000/' \ + -e '/^index/s/\.\.00* /..0000000 /' \ + "$x" >"$x.filtered" + done + test_cmp "$1.filtered" "$2.filtered" +} + test_expect_success 'setup (initial)' ' echo content >file && git add file && @@ -22,20 +35,20 @@ test_expect_success 'status works (initial)' ' ' test_expect_success 'setup expected' ' -cat >expected <<EOF -new file mode 100644 -index 0000000..d95f3ad ---- /dev/null -+++ b/file -@@ -0,0 +1 @@ -+content -EOF + cat >expected <<-\EOF + new file mode 100644 + index 0000000..d95f3ad + --- /dev/null + +++ b/file + @@ -0,0 +1 @@ + +content + EOF ' test_expect_success 'diff works (initial)' ' (echo d; echo 1) | git add -i >output && sed -ne "/new file/,/content/p" <output >diff && - test_cmp expected diff + diff_cmp expected diff ' test_expect_success 'revert works (initial)' ' git add file && @@ -59,20 +72,20 @@ test_expect_success 'status works (commit)' ' ' test_expect_success 'setup expected' ' -cat >expected <<EOF -index 180b47c..b6f2c08 100644 ---- a/file -+++ b/file -@@ -1 +1,2 @@ - baseline -+content -EOF + cat >expected <<-\EOF + index 180b47c..b6f2c08 100644 + --- a/file + +++ b/file + @@ -1 +1,2 @@ + baseline + +content + EOF ' test_expect_success 'diff works (commit)' ' (echo d; echo 1) | git add -i >output && sed -ne "/^index/,/content/p" <output >diff && - test_cmp expected diff + diff_cmp expected diff ' test_expect_success 'revert works (commit)' ' git add file && @@ -83,39 +96,32 @@ test_expect_success 'revert works (commit)' ' test_expect_success 'setup expected' ' -cat >expected <<EOF -EOF -' - -test_expect_success 'setup fake editor' ' - >fake_editor.sh && - chmod a+x fake_editor.sh && - test_set_editor "$(pwd)/fake_editor.sh" + cat >expected <<-\EOF + EOF ' test_expect_success 'dummy edit works' ' + test_set_editor : && (echo e; echo a) | git add -p && git diff > diff && - test_cmp expected diff + diff_cmp expected diff ' test_expect_success 'setup patch' ' -cat >patch <<EOF -@@ -1,1 +1,4 @@ - this -+patch --does not - apply -EOF + cat >patch <<-\EOF + @@ -1,1 +1,4 @@ + this + +patch + -does not + apply + EOF ' test_expect_success 'setup fake editor' ' - echo "#!$SHELL_PATH" >fake_editor.sh && - cat >>fake_editor.sh <<\EOF && -mv -f "$1" oldpatch && -mv -f patch "$1" -EOF - chmod a+x fake_editor.sh && + write_script "fake_editor.sh" <<-\EOF && + mv -f "$1" oldpatch && + mv -f patch "$1" + EOF test_set_editor "$(pwd)/fake_editor.sh" ' @@ -126,10 +132,10 @@ test_expect_success 'bad edit rejected' ' ' test_expect_success 'setup patch' ' -cat >patch <<EOF -this patch -is garbage -EOF + cat >patch <<-\EOF + this patch + is garbage + EOF ' test_expect_success 'garbage edit rejected' ' @@ -139,34 +145,34 @@ test_expect_success 'garbage edit rejected' ' ' test_expect_success 'setup patch' ' -cat >patch <<EOF -@@ -1,0 +1,0 @@ - baseline -+content -+newcontent -+lines -EOF + cat >patch <<-\EOF + @@ -1,0 +1,0 @@ + baseline + +content + +newcontent + +lines + EOF ' test_expect_success 'setup expected' ' -cat >expected <<EOF -diff --git a/file b/file -index b5dd6c9..f910ae9 100644 ---- a/file -+++ b/file -@@ -1,4 +1,4 @@ - baseline - content --newcontent -+more - lines -EOF + cat >expected <<-\EOF + diff --git a/file b/file + index b5dd6c9..f910ae9 100644 + --- a/file + +++ b/file + @@ -1,4 +1,4 @@ + baseline + content + -newcontent + +more + lines + EOF ' test_expect_success 'real edit works' ' (echo e; echo n; echo d) | git add -p && git diff >output && - test_cmp expected output + diff_cmp expected output ' test_expect_success 'skip files similarly as commit -a' ' @@ -178,7 +184,7 @@ test_expect_success 'skip files similarly as commit -a' ' git reset && git commit -am commit && git diff >expected && - test_cmp expected output && + diff_cmp expected output && git reset --hard HEAD^ ' rm -f .gitignore @@ -222,52 +228,67 @@ test_expect_success 'setup again' ' # Write the patch file with a new line at the top and bottom test_expect_success 'setup patch' ' -cat >patch <<EOF -index 180b47c..b6f2c08 100644 ---- a/file -+++ b/file -@@ -1,2 +1,4 @@ -+firstline - baseline - content -+lastline -EOF -' - -# Expected output, similar to the patch but w/ diff at the top + cat >patch <<-\EOF + index 180b47c..b6f2c08 100644 + --- a/file + +++ b/file + @@ -1,2 +1,4 @@ + +firstline + baseline + content + +lastline + \ No newline at end of file + EOF +' + +# Expected output, diff is similar to the patch but w/ diff at the top test_expect_success 'setup expected' ' -cat >expected <<EOF -diff --git a/file b/file -index b6f2c08..61b9053 100755 ---- a/file -+++ b/file -@@ -1,2 +1,4 @@ -+firstline - baseline - content -+lastline -EOF + echo diff --git a/file b/file >expected && + cat patch |sed "/^index/s/ 100644/ 100755/" >>expected && + cat >expected-output <<-\EOF + --- a/file + +++ b/file + @@ -1,2 +1,4 @@ + +firstline + baseline + content + +lastline + \ No newline at end of file + @@ -1,2 +1,3 @@ + +firstline + baseline + content + @@ -1,2 +2,3 @@ + baseline + content + +lastline + \ No newline at end of file + EOF ' # Test splitting the first patch, then adding both -test_expect_success 'add first line works' ' +test_expect_success C_LOCALE_OUTPUT 'add first line works' ' git commit -am "clear local changes" && git apply patch && - (echo s; echo y; echo y) | git add -p file && - git diff --cached > diff && - test_cmp expected diff + printf "%s\n" s y y | git add -p file 2>error | + sed -n -e "s/^Stage this hunk[^@]*\(@@ .*\)/\1/" \ + -e "/^[-+@ \\\\]"/p >output && + test_must_be_empty error && + git diff --cached >diff && + diff_cmp expected diff && + test_cmp expected-output output ' test_expect_success 'setup expected' ' -cat >expected <<EOF -diff --git a/non-empty b/non-empty -deleted file mode 100644 -index d95f3ad..0000000 ---- a/non-empty -+++ /dev/null -@@ -1 +0,0 @@ --content -EOF + cat >expected <<-\EOF + diff --git a/non-empty b/non-empty + deleted file mode 100644 + index d95f3ad..0000000 + --- a/non-empty + +++ /dev/null + @@ -1 +0,0 @@ + -content + EOF ' test_expect_success 'deleting a non-empty file' ' @@ -278,15 +299,15 @@ test_expect_success 'deleting a non-empty file' ' rm non-empty && echo y | git add -p non-empty && git diff --cached >diff && - test_cmp expected diff + diff_cmp expected diff ' test_expect_success 'setup expected' ' -cat >expected <<EOF -diff --git a/empty b/empty -deleted file mode 100644 -index e69de29..0000000 -EOF + cat >expected <<-\EOF + diff --git a/empty b/empty + deleted file mode 100644 + index e69de29..0000000 + EOF ' test_expect_success 'deleting an empty file' ' @@ -297,23 +318,17 @@ test_expect_success 'deleting an empty file' ' rm empty && echo y | git add -p empty && git diff --cached >diff && - test_cmp expected diff + diff_cmp expected diff ' test_expect_success 'split hunk setup' ' git reset --hard && - for i in 10 20 30 40 50 60 - do - echo $i - done >test && + test_write_lines 10 20 30 40 50 60 >test && git add test && test_tick && git commit -m test && - for i in 10 15 20 21 22 23 24 30 40 50 60 - do - echo $i - done >test + test_write_lines 10 15 20 21 22 23 24 30 40 50 60 >test ' test_expect_success 'split hunk "add -p (edit)"' ' @@ -334,17 +349,7 @@ test_expect_success 'split hunk "add -p (edit)"' ' ' test_expect_failure 'split hunk "add -p (no, yes, edit)"' ' - cat >test <<-\EOF && - 5 - 10 - 20 - 21 - 30 - 31 - 40 - 50 - 60 - EOF + test_write_lines 5 10 20 21 30 31 40 50 60 >test && git reset && # test sequence is s(plit), n(o), y(es), e(dit) # q n q q is there to make sure we exit at the end. @@ -378,7 +383,7 @@ test_expect_success 'patch mode ignores unmerged entries' ' +changed EOF git diff --cached >diff && - test_cmp expected diff + diff_cmp expected diff ' test_expect_success TTY 'diffs can be colorized' ' @@ -392,6 +397,26 @@ test_expect_success TTY 'diffs can be colorized' ' grep "$(printf "\\033")" output ' +test_expect_success TTY 'diffFilter filters diff' ' + git reset --hard && + + echo content >test && + test_config interactive.diffFilter "sed s/^/foo:/" && + printf y | test_terminal git add -p >output 2>&1 && + + # avoid depending on the exact coloring or content of the prompts, + # and just make sure we saw our diff prefixed + grep foo:.*content output +' + +test_expect_success TTY 'detect bogus diffFilter output' ' + git reset --hard && + + echo content >test && + test_config interactive.diffFilter "echo too-short" && + printf y | test_must_fail test_terminal git add -p +' + test_expect_success 'patch-mode via -i prompts for files' ' git reset --hard && @@ -407,7 +432,7 @@ test_expect_success 'patch-mode via -i prompts for files' ' echo test >expect && git diff --cached --name-only >actual && - test_cmp expect actual + diff_cmp expect actual ' test_expect_success 'add -p handles globs' ' @@ -541,4 +566,34 @@ test_expect_success 'status ignores dirty submodules (except HEAD)' ' ! grep dirty-otherwise output ' +test_expect_success 'set up pathological context' ' + git reset --hard && + test_write_lines a a a a a a a a a a a >a && + git add a && + git commit -m a && + test_write_lines c b a a a a a a a b a a a a >a && + test_write_lines a a a a a a a b a a a a >expected-1 && + test_write_lines b a a a a a a a b a a a a >expected-2 && + # check editing can cope with missing header and deleted context lines + # as well as changes to other lines + test_write_lines +b " a" >patch +' + +test_expect_success 'add -p works with pathological context lines' ' + git reset && + printf "%s\n" n y | + git add -p && + git cat-file blob :a >actual && + test_cmp expected-1 actual +' + +test_expect_success 'add -p patch editing works with pathological context lines' ' + git reset && + # n q q below is in case edit fails + printf "%s\n" e y n q q | + git add -p && + git cat-file blob :a >actual && + test_cmp expected-2 actual +' + test_done diff --git a/t/t4013-diff-various.sh b/t/t4013-diff-various.sh index f10798b2df..3f9a24fd56 100755 --- a/t/t4013-diff-various.sh +++ b/t/t4013-diff-various.sh @@ -361,6 +361,11 @@ diff --no-index --raw dir2 dir diff --no-index --raw --abbrev=4 dir2 dir :noellipses diff --no-index --raw --abbrev=4 dir2 dir diff --no-index --raw --no-abbrev dir2 dir + +diff-tree --pretty --root --stat --compact-summary initial +diff-tree --pretty -R --root --stat --compact-summary initial +diff-tree --stat --compact-summary initial mode +diff-tree -R --stat --compact-summary initial mode EOF test_expect_success 'log -S requires an argument' ' diff --git a/t/t4013/diff.diff-tree_--pretty_--root_--stat_--compact-summary_initial b/t/t4013/diff.diff-tree_--pretty_--root_--stat_--compact-summary_initial new file mode 100644 index 0000000000..d6451ff7cc --- /dev/null +++ b/t/t4013/diff.diff-tree_--pretty_--root_--stat_--compact-summary_initial @@ -0,0 +1,12 @@ +$ git diff-tree --pretty --root --stat --compact-summary initial +commit 444ac553ac7612cc88969031b02b3767fb8a353a +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial + + dir/sub (new) | 2 ++ + file0 (new) | 3 +++ + file2 (new) | 3 +++ + 3 files changed, 8 insertions(+) +$ diff --git a/t/t4013/diff.diff-tree_--pretty_-R_--root_--stat_--compact-summary_initial b/t/t4013/diff.diff-tree_--pretty_-R_--root_--stat_--compact-summary_initial new file mode 100644 index 0000000000..1989e55cd0 --- /dev/null +++ b/t/t4013/diff.diff-tree_--pretty_-R_--root_--stat_--compact-summary_initial @@ -0,0 +1,12 @@ +$ git diff-tree --pretty -R --root --stat --compact-summary initial +commit 444ac553ac7612cc88969031b02b3767fb8a353a +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial + + dir/sub (gone) | 2 -- + file0 (gone) | 3 --- + file2 (gone) | 3 --- + 3 files changed, 8 deletions(-) +$ diff --git a/t/t4013/diff.diff-tree_--stat_--compact-summary_initial_mode b/t/t4013/diff.diff-tree_--stat_--compact-summary_initial_mode new file mode 100644 index 0000000000..9c7c8f63af --- /dev/null +++ b/t/t4013/diff.diff-tree_--stat_--compact-summary_initial_mode @@ -0,0 +1,4 @@ +$ git diff-tree --stat --compact-summary initial mode + file0 (mode +x) | 0 + 1 file changed, 0 insertions(+), 0 deletions(-) +$ diff --git a/t/t4013/diff.diff-tree_-R_--stat_--compact-summary_initial_mode b/t/t4013/diff.diff-tree_-R_--stat_--compact-summary_initial_mode new file mode 100644 index 0000000000..e38f3d3bfb --- /dev/null +++ b/t/t4013/diff.diff-tree_-R_--stat_--compact-summary_initial_mode @@ -0,0 +1,4 @@ +$ git diff-tree -R --stat --compact-summary initial mode + file0 (mode -x) | 0 + 1 file changed, 0 insertions(+), 0 deletions(-) +$ diff --git a/t/t4018-diff-funcname.sh b/t/t4018-diff-funcname.sh index 1795ffc3aa..22f9f88f0a 100755 --- a/t/t4018-diff-funcname.sh +++ b/t/t4018-diff-funcname.sh @@ -33,6 +33,7 @@ diffpatterns=" css fortran fountain + golang html java matlab diff --git a/t/t4018/golang-complex-function b/t/t4018/golang-complex-function new file mode 100644 index 0000000000..e057dcefed --- /dev/null +++ b/t/t4018/golang-complex-function @@ -0,0 +1,8 @@ +type Test struct { + a Type +} + +func (t *Test) RIGHT(a Type) (Type, error) { + t.a = a + return ChangeMe, nil +} diff --git a/t/t4018/golang-func b/t/t4018/golang-func new file mode 100644 index 0000000000..8e9c9ac7c3 --- /dev/null +++ b/t/t4018/golang-func @@ -0,0 +1,4 @@ +func RIGHT() { + a := 5 + b := ChangeMe +} diff --git a/t/t4018/golang-interface b/t/t4018/golang-interface new file mode 100644 index 0000000000..553bedec96 --- /dev/null +++ b/t/t4018/golang-interface @@ -0,0 +1,4 @@ +type RIGHT interface { + a() Type + b() ChangeMe +} diff --git a/t/t4018/golang-long-func b/t/t4018/golang-long-func new file mode 100644 index 0000000000..ac3a77b5c4 --- /dev/null +++ b/t/t4018/golang-long-func @@ -0,0 +1,5 @@ +func RIGHT(aVeryVeryVeryLongVariableName AVeryVeryVeryLongType, + anotherLongVariableName AnotherLongType) { + a := 5 + b := ChangeMe +} diff --git a/t/t4018/golang-struct b/t/t4018/golang-struct new file mode 100644 index 0000000000..5deda77fee --- /dev/null +++ b/t/t4018/golang-struct @@ -0,0 +1,4 @@ +type RIGHT struct { + a Type + b ChangeMe +} diff --git a/t/t4052-stat-output.sh b/t/t4052-stat-output.sh index 9f563db20a..6e2cf933f7 100755 --- a/t/t4052-stat-output.sh +++ b/t/t4052-stat-output.sh @@ -19,17 +19,33 @@ test_expect_success 'preparation' ' git commit -m message "$name" ' +cat >expect72 <<-'EOF' + ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + +EOF +test_expect_success "format-patch: small change with long name gives more space to the name" ' + git format-patch -1 --stdout >output && + grep " | " output >actual && + test_cmp expect72 actual +' + while read cmd args do - cat >expect <<-'EOF' + cat >expect80 <<-'EOF' ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + EOF test_expect_success "$cmd: small change with long name gives more space to the name" ' git $cmd $args >output && grep " | " output >actual && - test_cmp expect actual + test_cmp expect80 actual ' +done <<\EOF +diff HEAD^ HEAD --stat +show --stat +log -1 --stat +EOF +while read cmd args +do cat >expect <<-'EOF' ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 + EOF @@ -79,11 +95,11 @@ test_expect_success 'preparation for big change tests' ' git commit -m message abcd ' -cat >expect80 <<'EOF' - abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +cat >expect72 <<'EOF' + abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ EOF -cat >expect80-graph <<'EOF' -| abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +cat >expect72-graph <<'EOF' +| abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ EOF cat >expect200 <<'EOF' abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -107,7 +123,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect200 diff HEAD^ HEAD --stat respects expect200 show --stat respects expect200 log -1 --stat @@ -135,7 +151,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect40 diff HEAD^ HEAD --stat respects expect40 show --stat respects expect40 log -1 --stat @@ -163,7 +179,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect40 diff HEAD^ HEAD --stat respects expect40 show --stat respects expect40 log -1 --stat @@ -250,11 +266,11 @@ show --stat log -1 --stat EOF -cat >expect80 <<'EOF' - ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++ +cat >expect72 <<'EOF' + ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++ EOF -cat >expect80-graph <<'EOF' -| ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++ +cat >expect72-graph <<'EOF' +| ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++ EOF cat >expect200 <<'EOF' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -278,7 +294,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect200 diff HEAD^ HEAD --stat respects expect200 show --stat respects expect200 log -1 --stat @@ -308,7 +324,7 @@ do test_cmp "$expect-graph" actual ' done <<\EOF -ignores expect80 format-patch -1 --stdout +ignores expect72 format-patch -1 --stdout respects expect1 diff HEAD^ HEAD --stat respects expect1 show --stat respects expect1 log -1 --stat diff --git a/t/t4064-diff-oidfind.sh b/t/t4064-diff-oidfind.sh new file mode 100755 index 0000000000..3bdf317af8 --- /dev/null +++ b/t/t4064-diff-oidfind.sh @@ -0,0 +1,68 @@ +#!/bin/sh + +test_description='test finding specific blobs in the revision walking' +. ./test-lib.sh + +test_expect_success 'setup ' ' + git commit --allow-empty -m "empty initial commit" && + + echo "Hello, world!" >greeting && + git add greeting && + git commit -m "add the greeting blob" && # borrowed from Git from the Bottom Up + git tag -m "the blob" greeting $(git rev-parse HEAD:greeting) && + + echo asdf >unrelated && + git add unrelated && + git commit -m "unrelated history" && + + git revert HEAD^ && + + git commit --allow-empty -m "another unrelated commit" +' + +test_expect_success 'find the greeting blob' ' + cat >expect <<-EOF && + Revert "add the greeting blob" + add the greeting blob + EOF + + git log --format=%s --find-object=greeting^{blob} >actual && + + test_cmp expect actual +' + +test_expect_success 'setup a tree' ' + mkdir a && + echo asdf >a/file && + git add a/file && + git commit -m "add a file in a subdirectory" +' + +test_expect_success 'find a tree' ' + cat >expect <<-EOF && + add a file in a subdirectory + EOF + + git log --format=%s -t --find-object=HEAD:a >actual && + + test_cmp expect actual +' + +test_expect_success 'setup a submodule' ' + test_create_repo sub && + test_commit -C sub sub && + git submodule add ./sub sub && + git commit -a -m "add sub" +' + +test_expect_success 'find a submodule' ' + cat >expect <<-EOF && + add sub + EOF + + git log --format=%s --find-object=HEAD:sub >actual && + + test_cmp expect actual +' + +test_done diff --git a/t/t4135-apply-weird-filenames.sh b/t/t4135-apply-weird-filenames.sh index 27cb0009fb..c7c688fcc4 100755 --- a/t/t4135-apply-weird-filenames.sh +++ b/t/t4135-apply-weird-filenames.sh @@ -89,4 +89,21 @@ test_expect_success 'traditional, whitespace-damaged, colon in timezone' ' test_cmp expected "post image.txt" ' +cat >diff-from-svn <<\EOF +Index: Makefile +=================================================================== +diff --git a/branches/Makefile +deleted file mode 100644 +--- a/branches/Makefile (revision 13) ++++ /dev/null (nonexistent) +@@ +1 0,0 @@ +- +EOF + +test_expect_success 'apply handles a diff generated by Subversion' ' + >Makefile && + git apply -p2 diff-from-svn && + test_path_is_missing Makefile +' + test_done diff --git a/t/t4150-am.sh b/t/t4150-am.sh index 73b67b4280..1eccfb71d0 100755 --- a/t/t4150-am.sh +++ b/t/t4150-am.sh @@ -662,6 +662,11 @@ test_expect_success 'am pauses on conflict' ' test -d .git/rebase-apply ' +test_expect_success 'am --show-current-patch' ' + git am --show-current-patch >actual.patch && + test_cmp .git/rebase-apply/0001 actual.patch +' + test_expect_success 'am --skip works' ' echo goodbye >expected && git am --skip && @@ -1045,4 +1050,16 @@ test_expect_success 'am works with multi-line in-body headers' ' git cat-file commit HEAD | grep "^$LONG$" ' +test_expect_success 'am --quit keeps HEAD where it is' ' + mkdir .git/rebase-apply && + >.git/rebase-apply/last && + >.git/rebase-apply/next && + git rev-parse HEAD^ >.git/ORIG_HEAD && + git rev-parse HEAD >expected && + git am --quit && + test_path_is_missing .git/rebase-apply && + git rev-parse HEAD >actual && + test_cmp expected actual +' + test_done diff --git a/t/t4151-am-abort.sh b/t/t4151-am-abort.sh index 9473c2779e..16432781d2 100755 --- a/t/t4151-am-abort.sh +++ b/t/t4151-am-abort.sh @@ -46,9 +46,8 @@ do test_expect_success "am$with3 --skip continue after failed am$with3" ' test_must_fail git am$with3 --skip >output && - test_i18ngrep "^Applying" output >output.applying && - test_i18ngrep "^Applying: 6$" output.applying && - test_i18ncmp file-2-expect file-2 && + test_i18ngrep "^Applying: 6$" output && + test_cmp file-2-expect file-2 && test ! -f .git/MERGE_RR ' diff --git a/t/t5302-pack-index.sh b/t/t5302-pack-index.sh index c2fc584dac..d695a6082e 100755 --- a/t/t5302-pack-index.sh +++ b/t/t5302-pack-index.sh @@ -262,4 +262,9 @@ EOF grep "^warning:.* expected .tagger. line" err ' +test_expect_success 'index-pack --fsck-objects also warns upon missing tagger in tag' ' + git index-pack --fsck-objects tag-test-${pack1}.pack 2>err && + grep "^warning:.* expected .tagger. line" err +' + test_done diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh index 80a1a3239a..0680dec808 100755 --- a/t/t5500-fetch-pack.sh +++ b/t/t5500-fetch-pack.sh @@ -482,24 +482,24 @@ test_expect_success 'set up tests of missing reference' ' test_expect_success 'test lonely missing ref' ' ( cd client && - test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy - ) >/dev/null 2>error-m && + test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy 2>../error-m + ) && test_i18ncmp expect-error error-m ' test_expect_success 'test missing ref after existing' ' ( cd client && - test_must_fail git fetch-pack --no-progress .. refs/heads/A refs/heads/xyzzy - ) >/dev/null 2>error-em && + test_must_fail git fetch-pack --no-progress .. refs/heads/A refs/heads/xyzzy 2>../error-em + ) && test_i18ncmp expect-error error-em ' test_expect_success 'test missing ref before existing' ' ( cd client && - test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy refs/heads/A - ) >/dev/null 2>error-me && + test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy refs/heads/A 2>../error-me + ) && test_i18ncmp expect-error error-me ' @@ -755,4 +755,67 @@ test_expect_success 'fetching deepen' ' ) ' +test_expect_success 'filtering by size' ' + rm -rf server client && + test_create_repo server && + test_commit -C server one && + test_config -C server uploadpack.allowfilter 1 && + + test_create_repo client && + git -C client fetch-pack --filter=blob:limit=0 ../server HEAD && + + # Ensure that object is not inadvertently fetched + test_must_fail git -C client cat-file -e $(git hash-object server/one.t) +' + +test_expect_success 'filtering by size has no effect if support for it is not advertised' ' + rm -rf server client && + test_create_repo server && + test_commit -C server one && + + test_create_repo client && + git -C client fetch-pack --filter=blob:limit=0 ../server HEAD 2> err && + + # Ensure that object is fetched + git -C client cat-file -e $(git hash-object server/one.t) && + + test_i18ngrep "filtering not recognized by server" err +' + +fetch_filter_blob_limit_zero () { + SERVER="$1" + URL="$2" + + rm -rf "$SERVER" client && + test_create_repo "$SERVER" && + test_commit -C "$SERVER" one && + test_config -C "$SERVER" uploadpack.allowfilter 1 && + + git clone "$URL" client && + test_config -C client extensions.partialclone origin && + + test_commit -C "$SERVER" two && + + git -C client fetch --filter=blob:limit=0 origin HEAD:somewhere && + + # Ensure that commit is fetched, but blob is not + test_config -C client extensions.partialclone "arbitrary string" && + git -C client cat-file -e $(git -C "$SERVER" rev-parse two) && + test_must_fail git -C client cat-file -e $(git hash-object "$SERVER/two.t") +} + +test_expect_success 'fetch with --filter=blob:limit=0' ' + fetch_filter_blob_limit_zero server server +' + +. "$TEST_DIRECTORY"/lib-httpd.sh +start_httpd + +test_expect_success 'fetch with --filter=blob:limit=0 and HTTP' ' + fetch_filter_blob_limit_zero "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server" +' + +stop_httpd + + test_done diff --git a/t/t5510-fetch.sh b/t/t5510-fetch.sh index 3debc87d4a..da9ac00557 100755 --- a/t/t5510-fetch.sh +++ b/t/t5510-fetch.sh @@ -540,82 +540,232 @@ test_expect_success "should be able to fetch with duplicate refspecs" ' set_config_tristate () { # var=$1 val=$2 case "$2" in - unset) test_unconfig "$1" ;; - *) git config "$1" "$2" ;; + unset) + test_unconfig "$1" + ;; + *) + git config "$1" "$2" + key=$(echo $1 | sed -e 's/^remote\.origin/fetch/') + git_fetch_c="$git_fetch_c -c $key=$2" + ;; esac } test_configured_prune () { - fetch_prune=$1 remote_origin_prune=$2 cmdline=$3 expected=$4 + test_configured_prune_type "$@" "name" + test_configured_prune_type "$@" "link" +} - test_expect_success "prune fetch.prune=$1 remote.origin.prune=$2${3:+ $3}; $4" ' +test_configured_prune_type () { + fetch_prune=$1 + remote_origin_prune=$2 + fetch_prune_tags=$3 + remote_origin_prune_tags=$4 + expected_branch=$5 + expected_tag=$6 + cmdline=$7 + mode=$8 + + if test -z "$cmdline_setup" + then + test_expect_success 'setup cmdline_setup variable for subsequent test' ' + remote_url="file://$(git -C one config remote.origin.url)" && + remote_fetch="$(git -C one config remote.origin.fetch)" && + cmdline_setup="\"$remote_url\" \"$remote_fetch\"" + ' + fi + + if test "$mode" = 'link' + then + new_cmdline="" + + if test "$cmdline" = "" + then + new_cmdline=$cmdline_setup + else + new_cmdline=$(printf "%s" "$cmdline" | perl -pe 's[origin(?!/)]["'"$remote_url"'"]g') + fi + + if test "$fetch_prune_tags" = 'true' || + test "$remote_origin_prune_tags" = 'true' + then + if ! printf '%s' "$cmdline\n" | grep -q refs/remotes/origin/ + then + new_cmdline="$new_cmdline refs/tags/*:refs/tags/*" + fi + fi + + cmdline="$new_cmdline" + fi + + test_expect_success "$mode prune fetch.prune=$1 remote.origin.prune=$2 fetch.pruneTags=$3 remote.origin.pruneTags=$4${7:+ $7}; branch:$5 tag:$6" ' # make sure a newbranch is there in . and also in one git branch -f newbranch && + git tag -f newtag && ( cd one && test_unconfig fetch.prune && + test_unconfig fetch.pruneTags && test_unconfig remote.origin.prune && - git fetch && - git rev-parse --verify refs/remotes/origin/newbranch + test_unconfig remote.origin.pruneTags && + git fetch '"$cmdline_setup"' && + git rev-parse --verify refs/remotes/origin/newbranch && + git rev-parse --verify refs/tags/newtag ) && # now remove it git branch -d newbranch && + git tag -d newtag && # then test ( cd one && + git_fetch_c="" && set_config_tristate fetch.prune $fetch_prune && + set_config_tristate fetch.pruneTags $fetch_prune_tags && set_config_tristate remote.origin.prune $remote_origin_prune && - - git fetch $cmdline && - case "$expected" in + set_config_tristate remote.origin.pruneTags $remote_origin_prune_tags && + + if test "$mode" != "link" + then + git_fetch_c="" + fi && + git$git_fetch_c fetch '"$cmdline"' && + case "$expected_branch" in pruned) test_must_fail git rev-parse --verify refs/remotes/origin/newbranch ;; kept) git rev-parse --verify refs/remotes/origin/newbranch ;; + esac && + case "$expected_tag" in + pruned) + test_must_fail git rev-parse --verify refs/tags/newtag + ;; + kept) + git rev-parse --verify refs/tags/newtag + ;; esac ) ' } -test_configured_prune unset unset "" kept -test_configured_prune unset unset "--no-prune" kept -test_configured_prune unset unset "--prune" pruned - -test_configured_prune false unset "" kept -test_configured_prune false unset "--no-prune" kept -test_configured_prune false unset "--prune" pruned - -test_configured_prune true unset "" pruned -test_configured_prune true unset "--prune" pruned -test_configured_prune true unset "--no-prune" kept - -test_configured_prune unset false "" kept -test_configured_prune unset false "--no-prune" kept -test_configured_prune unset false "--prune" pruned - -test_configured_prune false false "" kept -test_configured_prune false false "--no-prune" kept -test_configured_prune false false "--prune" pruned - -test_configured_prune true false "" kept -test_configured_prune true false "--prune" pruned -test_configured_prune true false "--no-prune" kept - -test_configured_prune unset true "" pruned -test_configured_prune unset true "--no-prune" kept -test_configured_prune unset true "--prune" pruned - -test_configured_prune false true "" pruned -test_configured_prune false true "--no-prune" kept -test_configured_prune false true "--prune" pruned - -test_configured_prune true true "" pruned -test_configured_prune true true "--prune" pruned -test_configured_prune true true "--no-prune" kept +# $1 config: fetch.prune +# $2 config: remote.<name>.prune +# $3 config: fetch.pruneTags +# $4 config: remote.<name>.pruneTags +# $5 expect: branch to be pruned? +# $6 expect: tag to be pruned? +# $7 git-fetch $cmdline: +# +# $1 $2 $3 $4 $5 $6 $7 +test_configured_prune unset unset unset unset kept kept "" +test_configured_prune unset unset unset unset kept kept "--no-prune" +test_configured_prune unset unset unset unset pruned kept "--prune" +test_configured_prune unset unset unset unset kept pruned \ + "--prune origin refs/tags/*:refs/tags/*" +test_configured_prune unset unset unset unset pruned pruned \ + "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*" + +test_configured_prune false unset unset unset kept kept "" +test_configured_prune false unset unset unset kept kept "--no-prune" +test_configured_prune false unset unset unset pruned kept "--prune" + +test_configured_prune true unset unset unset pruned kept "" +test_configured_prune true unset unset unset pruned kept "--prune" +test_configured_prune true unset unset unset kept kept "--no-prune" + +test_configured_prune unset false unset unset kept kept "" +test_configured_prune unset false unset unset kept kept "--no-prune" +test_configured_prune unset false unset unset pruned kept "--prune" + +test_configured_prune false false unset unset kept kept "" +test_configured_prune false false unset unset kept kept "--no-prune" +test_configured_prune false false unset unset pruned kept "--prune" +test_configured_prune false false unset unset kept pruned \ + "--prune origin refs/tags/*:refs/tags/*" +test_configured_prune false false unset unset pruned pruned \ + "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*" + +test_configured_prune true false unset unset kept kept "" +test_configured_prune true false unset unset pruned kept "--prune" +test_configured_prune true false unset unset kept kept "--no-prune" + +test_configured_prune unset true unset unset pruned kept "" +test_configured_prune unset true unset unset kept kept "--no-prune" +test_configured_prune unset true unset unset pruned kept "--prune" + +test_configured_prune false true unset unset pruned kept "" +test_configured_prune false true unset unset kept kept "--no-prune" +test_configured_prune false true unset unset pruned kept "--prune" + +test_configured_prune true true unset unset pruned kept "" +test_configured_prune true true unset unset pruned kept "--prune" +test_configured_prune true true unset unset kept kept "--no-prune" +test_configured_prune true true unset unset kept pruned \ + "--prune origin refs/tags/*:refs/tags/*" +test_configured_prune true true unset unset pruned pruned \ + "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*" + +# --prune-tags on its own does nothing, needs --prune as well, same +# for for fetch.pruneTags without fetch.prune +test_configured_prune unset unset unset unset kept kept "--prune-tags" +test_configured_prune unset unset true unset kept kept "" +test_configured_prune unset unset unset true kept kept "" + +# These will prune the tags +test_configured_prune unset unset unset unset pruned pruned "--prune --prune-tags" +test_configured_prune true unset true unset pruned pruned "" +test_configured_prune unset true unset true pruned pruned "" + +# remote.<name>.pruneTags overrides fetch.pruneTags, just like +# remote.<name>.prune overrides fetch.prune if set. +test_configured_prune true unset true unset pruned pruned "" +test_configured_prune false true false true pruned pruned "" +test_configured_prune true false true false kept kept "" + +# When --prune-tags is supplied it's ignored if an explicit refspec is +# given, same for the configuration options. +test_configured_prune unset unset unset unset pruned kept \ + "--prune --prune-tags origin +refs/heads/*:refs/remotes/origin/*" +test_configured_prune unset unset true unset pruned kept \ + "--prune origin +refs/heads/*:refs/remotes/origin/*" +test_configured_prune unset unset unset true pruned kept \ + "--prune origin +refs/heads/*:refs/remotes/origin/*" + +# Pruning that also takes place if a file:// url replaces a named +# remote. However, because there's no implicit +# +refs/heads/*:refs/remotes/origin/* refspec and supplying it on the +# command-line negates --prune-tags, the branches will not be pruned. +test_configured_prune_type unset unset unset unset kept kept "origin --prune-tags" "name" +test_configured_prune_type unset unset unset unset kept kept "origin --prune-tags" "link" +test_configured_prune_type unset unset unset unset pruned pruned "origin --prune --prune-tags" "name" +test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link" +test_configured_prune_type unset unset unset unset pruned pruned "--prune --prune-tags origin" "name" +test_configured_prune_type unset unset unset unset kept pruned "--prune --prune-tags origin" "link" +test_configured_prune_type unset unset true unset pruned pruned "--prune origin" "name" +test_configured_prune_type unset unset true unset kept pruned "--prune origin" "link" +test_configured_prune_type unset unset unset true pruned pruned "--prune origin" "name" +test_configured_prune_type unset unset unset true kept pruned "--prune origin" "link" +test_configured_prune_type true unset true unset pruned pruned "origin" "name" +test_configured_prune_type true unset true unset kept pruned "origin" "link" +test_configured_prune_type unset true true unset pruned pruned "origin" "name" +test_configured_prune_type unset true true unset kept pruned "origin" "link" +test_configured_prune_type unset true unset true pruned pruned "origin" "name" +test_configured_prune_type unset true unset true kept pruned "origin" "link" + +# When all remote.origin.fetch settings are deleted a --prune +# --prune-tags still implicitly supplies refs/tags/*:refs/tags/* so +# tags, but not tracking branches, will be deleted. +test_expect_success 'remove remote.origin.fetch "one"' ' + ( + cd one && + git config --unset-all remote.origin.fetch + ) +' +test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "name" +test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link" test_expect_success 'all boundary commits are excluded' ' test_commit base && diff --git a/t/t5526-fetch-submodules.sh b/t/t5526-fetch-submodules.sh index 74486c73b0..9cc4b569c0 100755 --- a/t/t5526-fetch-submodules.sh +++ b/t/t5526-fetch-submodules.sh @@ -85,7 +85,7 @@ test_expect_success "fetch --recurse-submodules -j2 has the same output behaviou add_upstream_commit && ( cd downstream && - GIT_TRACE=$(pwd)/../trace.out git fetch --recurse-submodules -j2 2>../actual.err + GIT_TRACE="$TRASH_DIRECTORY/trace.out" git fetch --recurse-submodules -j2 2>../actual.err ) && test_must_be_empty actual.out && test_i18ncmp expect.err actual.err && diff --git a/t/t5536-fetch-conflicts.sh b/t/t5536-fetch-conflicts.sh index 644736b8a3..91f28c2f78 100755 --- a/t/t5536-fetch-conflicts.sh +++ b/t/t5536-fetch-conflicts.sh @@ -18,14 +18,6 @@ setup_repository () { ) } -verify_stderr () { - cat >expected && - # We're not interested in the error - # "fatal: The remote end hung up unexpectedly": - test_i18ngrep -E '^(fatal|warning):' error | grep -v 'hung up' >actual | sort && - test_i18ncmp expected actual -} - test_expect_success 'setup' ' git commit --allow-empty -m "Initial" && git branch branch1 && @@ -48,9 +40,7 @@ test_expect_success 'fetch conflict: config vs. config' ' "+refs/heads/branch2:refs/remotes/origin/branch1" && ( cd ccc && test_must_fail git fetch origin 2>error && - verify_stderr <<-\EOF - fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1 - EOF + test_i18ngrep "fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1" error ) ' @@ -77,9 +67,7 @@ test_expect_success 'fetch conflict: arg vs. arg' ' test_must_fail git fetch origin \ refs/heads/*:refs/remotes/origin/* \ refs/heads/branch2:refs/remotes/origin/branch1 2>error && - verify_stderr <<-\EOF - fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1 - EOF + test_i18ngrep "fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1" error ) ' @@ -90,10 +78,8 @@ test_expect_success 'fetch conflict: criss-cross args' ' git fetch origin \ refs/heads/branch1:refs/remotes/origin/branch2 \ refs/heads/branch2:refs/remotes/origin/branch1 2>error && - verify_stderr <<-\EOF - warning: refs/remotes/origin/branch1 usually tracks refs/heads/branch1, not refs/heads/branch2 - warning: refs/remotes/origin/branch2 usually tracks refs/heads/branch2, not refs/heads/branch1 - EOF + test_i18ngrep "warning: refs/remotes/origin/branch1 usually tracks refs/heads/branch1, not refs/heads/branch2" error && + test_i18ngrep "warning: refs/remotes/origin/branch2 usually tracks refs/heads/branch2, not refs/heads/branch1" error ) ' diff --git a/t/t5545-push-options.sh b/t/t5545-push-options.sh index 463783789c..b47a95871c 100755 --- a/t/t5545-push-options.sh +++ b/t/t5545-push-options.sh @@ -217,17 +217,32 @@ test_expect_success 'invalid push option in config' ' test_refs master HEAD@{1} ' +test_expect_success 'push options keep quoted characters intact (direct)' ' + mk_repo_pair && + git -C upstream config receive.advertisePushOptions true && + test_commit -C workbench one && + git -C workbench push --push-option="\"embedded quotes\"" up master && + echo "\"embedded quotes\"" >expect && + test_cmp expect upstream/.git/hooks/pre-receive.push_options +' + . "$TEST_DIRECTORY"/lib-httpd.sh start_httpd -test_expect_success 'push option denied properly by http server' ' +# set up http repository for fetching/pushing, with push options config +# bool set to $1 +mk_http_pair () { test_when_finished "rm -rf test_http_clone" && - test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" && + test_when_finished 'rm -rf "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git' && mk_repo_pair && - git -C upstream config receive.advertisePushOptions false && + git -C upstream config receive.advertisePushOptions "$1" && git -C upstream config http.receivepack true && cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git && - git clone "$HTTPD_URL"/smart/upstream test_http_clone && + git clone "$HTTPD_URL"/smart/upstream test_http_clone +} + +test_expect_success 'push option denied properly by http server' ' + mk_http_pair false && test_commit -C test_http_clone one && test_must_fail git -C test_http_clone push --push-option=asdf origin master 2>actual && test_i18ngrep "the receiving end does not support push options" actual && @@ -235,13 +250,7 @@ test_expect_success 'push option denied properly by http server' ' ' test_expect_success 'push options work properly across http' ' - test_when_finished "rm -rf test_http_clone" && - test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" && - mk_repo_pair && - git -C upstream config receive.advertisePushOptions true && - git -C upstream config http.receivepack true && - cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git && - git clone "$HTTPD_URL"/smart/upstream test_http_clone && + mk_http_pair true && test_commit -C test_http_clone one && git -C test_http_clone push origin master && @@ -260,6 +269,15 @@ test_expect_success 'push options work properly across http' ' test_cmp expect actual ' +test_expect_success 'push options keep quoted characters intact (http)' ' + mk_http_pair true && + + test_commit -C test_http_clone one && + git -C test_http_clone push --push-option="\"embedded quotes\"" origin master && + echo "\"embedded quotes\"" >expect && + test_cmp expect "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git/hooks/pre-receive.push_options +' + stop_httpd test_done diff --git a/t/t5570-git-daemon.sh b/t/t5570-git-daemon.sh index 755b05a8ae..0d4c52016b 100755 --- a/t/t5570-git-daemon.sh +++ b/t/t5570-git-daemon.sh @@ -50,7 +50,7 @@ test_expect_success 'no-op fetch -v stderr is as expected' ' ' test_expect_success 'no-op fetch without "-v" is quiet' ' - (cd clone && git fetch) 2>stderr && + (cd clone && git fetch 2>../stderr) && ! test -s stderr ' diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh index 8c437bf872..0b62037744 100755 --- a/t/t5601-clone.sh +++ b/t/t5601-clone.sh @@ -628,4 +628,105 @@ test_expect_success 'clone on case-insensitive fs' ' ) ' +partial_clone () { + SERVER="$1" && + URL="$2" && + + rm -rf "$SERVER" client && + test_create_repo "$SERVER" && + test_commit -C "$SERVER" one && + HASH1=$(git hash-object "$SERVER/one.t") && + git -C "$SERVER" revert HEAD && + test_commit -C "$SERVER" two && + HASH2=$(git hash-object "$SERVER/two.t") && + test_config -C "$SERVER" uploadpack.allowfilter 1 && + test_config -C "$SERVER" uploadpack.allowanysha1inwant 1 && + + git clone --filter=blob:limit=0 "$URL" client && + + git -C client fsck && + + # Ensure that unneeded blobs are not inadvertently fetched. + test_config -C client extensions.partialclone "not a remote" && + test_must_fail git -C client cat-file -e "$HASH1" && + + # But this blob was fetched, because clone performs an initial checkout + git -C client cat-file -e "$HASH2" +} + +test_expect_success 'partial clone' ' + partial_clone server "file://$(pwd)/server" +' + +test_expect_success 'partial clone: warn if server does not support object filtering' ' + rm -rf server client && + test_create_repo server && + test_commit -C server one && + + git clone --filter=blob:limit=0 "file://$(pwd)/server" client 2> err && + + test_i18ngrep "filtering not recognized by server" err +' + +test_expect_success 'batch missing blob request during checkout' ' + rm -rf server client && + + test_create_repo server && + echo a >server/a && + echo b >server/b && + git -C server add a b && + + git -C server commit -m x && + echo aa >server/a && + echo bb >server/b && + git -C server add a b && + git -C server commit -m x && + + test_config -C server uploadpack.allowfilter 1 && + test_config -C server uploadpack.allowanysha1inwant 1 && + + git clone --filter=blob:limit=0 "file://$(pwd)/server" client && + + # Ensure that there is only one negotiation by checking that there is + # only "done" line sent. ("done" marks the end of negotiation.) + GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ && + grep "git> done" trace >done_lines && + test_line_count = 1 done_lines +' + +test_expect_success 'batch missing blob request does not inadvertently try to fetch gitlinks' ' + rm -rf server client && + + test_create_repo repo_for_submodule && + test_commit -C repo_for_submodule x && + + test_create_repo server && + echo a >server/a && + echo b >server/b && + git -C server add a b && + git -C server commit -m x && + + echo aa >server/a && + echo bb >server/b && + # Also add a gitlink pointing to an arbitrary repository + git -C server submodule add "$(pwd)/repo_for_submodule" c && + git -C server add a b c && + git -C server commit -m x && + + test_config -C server uploadpack.allowfilter 1 && + test_config -C server uploadpack.allowanysha1inwant 1 && + + # Make sure that it succeeds + git clone --filter=blob:limit=0 "file://$(pwd)/server" client +' + +. "$TEST_DIRECTORY"/lib-httpd.sh +start_httpd + +test_expect_success 'partial clone using HTTP' ' + partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server" +' + +stop_httpd + test_done diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh new file mode 100755 index 0000000000..cee5565367 --- /dev/null +++ b/t/t5616-partial-clone.sh @@ -0,0 +1,157 @@ +#!/bin/sh + +test_description='git partial clone' + +. ./test-lib.sh + +# create a normal "src" repo where we can later create new commits. +# expect_1.oids will contain a list of the OIDs of all blobs. +test_expect_success 'setup normal src repo' ' + echo "{print \$1}" >print_1.awk && + echo "{print \$2}" >print_2.awk && + + git init src && + for n in 1 2 3 4 + do + echo "This is file: $n" > src/file.$n.txt + git -C src add file.$n.txt + git -C src commit -m "file $n" + git -C src ls-files -s file.$n.txt >>temp + done && + awk -f print_2.awk <temp | sort >expect_1.oids && + test_line_count = 4 expect_1.oids +' + +# bare clone "src" giving "srv.bare" for use as our server. +test_expect_success 'setup bare clone for server' ' + git clone --bare "file://$(pwd)/src" srv.bare && + git -C srv.bare config --local uploadpack.allowfilter 1 && + git -C srv.bare config --local uploadpack.allowanysha1inwant 1 +' + +# do basic partial clone from "srv.bare" +# confirm we are missing all of the known blobs. +# confirm partial clone was registered in the local config. +test_expect_success 'do partial clone 1' ' + git clone --no-checkout --filter=blob:none "file://$(pwd)/srv.bare" pc1 && + git -C pc1 rev-list HEAD --quiet --objects --missing=print \ + | awk -f print_1.awk \ + | sed "s/?//" \ + | sort >observed.oids && + test_cmp expect_1.oids observed.oids && + test "$(git -C pc1 config --local core.repositoryformatversion)" = "1" && + test "$(git -C pc1 config --local extensions.partialclone)" = "origin" && + test "$(git -C pc1 config --local core.partialclonefilter)" = "blob:none" +' + +# checkout master to force dynamic object fetch of blobs at HEAD. +test_expect_success 'verify checkout with dynamic object fetch' ' + git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed && + test_line_count = 4 observed && + git -C pc1 checkout master && + git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed && + test_line_count = 0 observed +' + +# create new commits in "src" repo to establish a blame history on file.1.txt +# and push to "srv.bare". +test_expect_success 'push new commits to server' ' + git -C src remote add srv "file://$(pwd)/srv.bare" && + for x in a b c d e + do + echo "Mod file.1.txt $x" >>src/file.1.txt + git -C src add file.1.txt + git -C src commit -m "mod $x" + done && + git -C src blame master -- file.1.txt >expect.blame && + git -C src push -u srv master +' + +# (partial) fetch in the partial clone repo from the promisor remote. +# verify that fetch inherited the filter-spec from the config and DOES NOT +# have the new blobs. +test_expect_success 'partial fetch inherits filter settings' ' + git -C pc1 fetch origin && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + test_line_count = 5 observed +' + +# force dynamic object fetch using diff. +# we should only get 1 new blob (for the file in origin/master). +test_expect_success 'verify diff causes dynamic object fetch' ' + git -C pc1 diff master..origin/master -- file.1.txt && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + test_line_count = 4 observed +' + +# force full dynamic object fetch of the file's history using blame. +# we should get the intermediate blobs for the file. +test_expect_success 'verify blame causes dynamic object fetch' ' + git -C pc1 blame origin/master -- file.1.txt >observed.blame && + test_cmp expect.blame observed.blame && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + test_line_count = 0 observed +' + +# create new commits in "src" repo to establish a history on file.2.txt +# and push to "srv.bare". +test_expect_success 'push new commits to server for file.2.txt' ' + for x in a b c d e f + do + echo "Mod file.2.txt $x" >>src/file.2.txt + git -C src add file.2.txt + git -C src commit -m "mod $x" + done && + git -C src push -u srv master +' + +# Do FULL fetch by disabling inherited filter-spec using --no-filter. +# Verify we have all the new blobs. +test_expect_success 'override inherited filter-spec using --no-filter' ' + git -C pc1 fetch --no-filter origin && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed && + test_line_count = 0 observed +' + +# create new commits in "src" repo to establish a history on file.3.txt +# and push to "srv.bare". +test_expect_success 'push new commits to server for file.3.txt' ' + for x in a b c d e f + do + echo "Mod file.3.txt $x" >>src/file.3.txt + git -C src add file.3.txt + git -C src commit -m "mod $x" + done && + git -C src push -u srv master +' + +# Do a partial fetch and then try to manually fetch the missing objects. +# This can be used as the basis of a pre-command hook to bulk fetch objects +# perhaps combined with a command in dry-run mode. +test_expect_success 'manual prefetch of missing objects' ' + git -C pc1 fetch --filter=blob:none origin && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \ + | awk -f print_1.awk \ + | sed "s/?//" \ + | sort >observed.oids && + test_line_count = 6 observed.oids && + git -C pc1 fetch-pack --stdin "file://$(pwd)/srv.bare" <observed.oids && + git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \ + | awk -f print_1.awk \ + | sed "s/?//" \ + | sort >observed.oids && + test_line_count = 0 observed.oids +' + +test_expect_success 'partial clone with transfer.fsckobjects=1 uses index-pack --fsck-objects' ' + git init src && + test_commit -C src x && + test_config -C src uploadpack.allowfilter 1 && + test_config -C src uploadpack.allowanysha1inwant 1 && + + GIT_TRACE="$(pwd)/trace" git -c transfer.fsckobjects=1 \ + clone --filter="blob:none" "file://$(pwd)/src" dst && + grep "git index-pack.*--fsck-objects" trace +' + +test_done diff --git a/t/t6040-tracking-info.sh b/t/t6040-tracking-info.sh index 8f17fd9da8..716283b274 100755 --- a/t/t6040-tracking-info.sh +++ b/t/t6040-tracking-info.sh @@ -147,6 +147,48 @@ test_expect_success 'status -s -b (diverged from upstream)' ' ' cat >expect <<\EOF +## b1...origin/master [different] +EOF + +test_expect_success 'status -s -b --no-ahead-behind (diverged from upstream)' ' + ( + cd test && + git checkout b1 >/dev/null && + git status -s -b --no-ahead-behind | head -1 + ) >actual && + test_i18ncmp expect actual +' + +cat >expect <<\EOF +On branch b1 +Your branch and 'origin/master' have diverged, +and have 1 and 1 different commits each, respectively. +EOF + +test_expect_success 'status --long --branch' ' + ( + cd test && + git checkout b1 >/dev/null && + git status --long -b | head -3 + ) >actual && + test_i18ncmp expect actual +' + +cat >expect <<\EOF +On branch b1 +Your branch and 'origin/master' refer to different commits. +EOF + +test_expect_success 'status --long --branch --no-ahead-behind' ' + ( + cd test && + git checkout b1 >/dev/null && + git status --long -b --no-ahead-behind | head -2 + ) >actual && + test_i18ncmp expect actual +' + +cat >expect <<\EOF ## b5...brokenbase [gone] EOF diff --git a/t/t6120-describe.sh b/t/t6120-describe.sh index a5d9015024..bae78c4e89 100755 --- a/t/t6120-describe.sh +++ b/t/t6120-describe.sh @@ -378,4 +378,12 @@ check_describe tags/A --all A check_describe tags/c --all c check_describe heads/branch_A --all --match='branch_*' branch_A +test_expect_success 'describe complains about tree object' ' + test_must_fail git describe HEAD^{tree} +' + +test_expect_success 'describe complains about missing object' ' + test_must_fail git describe $_z40 +' + test_done diff --git a/t/t6200-fmt-merge-msg.sh b/t/t6200-fmt-merge-msg.sh index 2e2fb0e957..a54a52aaa4 100755 --- a/t/t6200-fmt-merge-msg.sh +++ b/t/t6200-fmt-merge-msg.sh @@ -512,7 +512,7 @@ test_expect_success 'merge-msg with "merging" an annotated tag' ' test_when_finished "git reset --hard" && annote=$(git rev-parse annote) && - git merge --no-commit $annote && + git merge --no-commit --no-ff $annote && { cat <<-EOF Merge tag '\''$annote'\'' diff --git a/t/t6300-for-each-ref.sh b/t/t6300-for-each-ref.sh index c128dfc579..295d1475bd 100755 --- a/t/t6300-for-each-ref.sh +++ b/t/t6300-for-each-ref.sh @@ -373,11 +373,8 @@ test_expect_success 'Quoting style: tcl' ' for i in "--perl --shell" "-s --python" "--python --tcl" "--tcl --perl"; do test_expect_success "more than one quoting style: $i" " - git for-each-ref $i 2>&1 | (read line && - case \$line in - \"error: more than one quoting style\"*) : happy;; - *) false - esac) + test_must_fail git for-each-ref $i 2>err && + grep '^error: more than one quoting style' err " done diff --git a/t/t7004-tag.sh b/t/t7004-tag.sh index a9af2de996..2aac77af70 100755 --- a/t/t7004-tag.sh +++ b/t/t7004-tag.sh @@ -452,6 +452,21 @@ test_expect_success \ test_cmp expect actual ' +get_tag_header annotated-tag-edit $commit commit $time >expect +echo "An edited message" >>expect +test_expect_success 'set up editor' ' + write_script fakeeditor <<-\EOF + sed -e "s/A message/An edited message/g" <"$1" >"$1-" + mv "$1-" "$1" + EOF +' +test_expect_success \ + 'creating an annotated tag with -m message --edit should succeed' ' + GIT_EDITOR=./fakeeditor git tag -m "A message" --edit annotated-tag-edit && + get_tag_msg annotated-tag-edit >actual && + test_cmp expect actual +' + cat >msgfile <<EOF Another message in a file. @@ -465,6 +480,21 @@ test_expect_success \ test_cmp expect actual ' +get_tag_header file-annotated-tag-edit $commit commit $time >expect +sed -e "s/Another message/Another edited message/g" msgfile >>expect +test_expect_success 'set up editor' ' + write_script fakeeditor <<-\EOF + sed -e "s/Another message/Another edited message/g" <"$1" >"$1-" + mv "$1-" "$1" + EOF +' +test_expect_success \ + 'creating an annotated tag with -F messagefile --edit should succeed' ' + GIT_EDITOR=./fakeeditor git tag -F msgfile --edit file-annotated-tag-edit && + get_tag_msg file-annotated-tag-edit >actual && + test_cmp expect actual +' + cat >inputmsg <<EOF A message from the standard input diff --git a/t/t7006-pager.sh b/t/t7006-pager.sh index f5f46a95b4..7541ba5edb 100755 --- a/t/t7006-pager.sh +++ b/t/t7006-pager.sh @@ -110,13 +110,6 @@ test_expect_success TTY 'configuration can disable pager' ' ! test -e paginated.out ' -test_expect_success TTY 'git config uses a pager if configured to' ' - rm -f paginated.out && - test_config pager.config true && - test_terminal git config --list && - test -e paginated.out -' - test_expect_success TTY 'configuration can enable pager (from subdir)' ' rm -f paginated.out && mkdir -p subdir && @@ -252,6 +245,48 @@ test_expect_success TTY 'git branch --set-upstream-to ignores pager.branch' ' ! test -e paginated.out ' +test_expect_success TTY 'git config ignores pager.config when setting' ' + rm -f paginated.out && + test_terminal git -c pager.config config foo.bar bar && + ! test -e paginated.out +' + +test_expect_success TTY 'git config --edit ignores pager.config' ' + rm -f paginated.out editor.used && + write_script editor <<-\EOF && + touch editor.used + EOF + EDITOR=./editor test_terminal git -c pager.config config --edit && + ! test -e paginated.out && + test -e editor.used +' + +test_expect_success TTY 'git config --get ignores pager.config' ' + rm -f paginated.out && + test_terminal git -c pager.config config --get foo.bar && + ! test -e paginated.out +' + +test_expect_success TTY 'git config --get-urlmatch defaults to paging' ' + rm -f paginated.out && + test_terminal git -c http."https://foo.com/".bar=foo \ + config --get-urlmatch http https://foo.com && + test -e paginated.out +' + +test_expect_success TTY 'git config --get-all respects pager.config' ' + rm -f paginated.out && + test_terminal git -c pager.config=false config --get-all foo.bar && + ! test -e paginated.out +' + +test_expect_success TTY 'git config --list defaults to paging' ' + rm -f paginated.out && + test_terminal git config --list && + test -e paginated.out +' + + # A colored commit log will begin with an appropriate ANSI escape # for the first color; the text "commit" comes later. colorful() { diff --git a/t/t7063-status-untracked-cache.sh b/t/t7063-status-untracked-cache.sh index e5fb892f95..c61e304e97 100755 --- a/t/t7063-status-untracked-cache.sh +++ b/t/t7063-status-untracked-cache.sh @@ -14,6 +14,9 @@ test_description='test untracked cache' # See <20160803174522.5571-1-pclouds@gmail.com> if you want to know # more. +GIT_FORCE_UNTRACKED_CACHE=true +export GIT_FORCE_UNTRACKED_CACHE + sync_mtime () { find . -type d -ls >/dev/null } @@ -22,6 +25,12 @@ avoid_racy() { sleep 1 } +status_is_clean() { + >../status.expect && + git status --porcelain >../status.actual && + test_cmp ../status.expect ../status.actual +} + test_lazy_prereq UNTRACKED_CACHE ' { git update-index --test-untracked-cache; ret=$?; } && test $ret -ne 1 @@ -683,4 +692,85 @@ test_expect_success 'untracked cache survives a commit' ' test_cmp ../before ../after ' +test_expect_success 'teardown worktree' ' + cd .. +' + +test_expect_success SYMLINKS 'setup worktree for symlink test' ' + git init worktree-symlink && + cd worktree-symlink && + git config core.untrackedCache true && + mkdir one two && + touch one/file two/file && + git add one/file two/file && + git commit -m"first commit" && + git rm -rf one && + ln -s two one && + git add one && + git commit -m"second commit" +' + +test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=true' ' + git checkout HEAD~ && + status_is_clean && + status_is_clean && + git checkout master && + avoid_racy && + status_is_clean && + status_is_clean +' + +test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=false' ' + git config core.untrackedCache false && + git checkout HEAD~ && + status_is_clean && + status_is_clean && + git checkout master && + avoid_racy && + status_is_clean && + status_is_clean +' + +test_expect_success 'setup worktree for non-symlink test' ' + git init worktree-non-symlink && + cd worktree-non-symlink && + git config core.untrackedCache true && + mkdir one two && + touch one/file two/file && + git add one/file two/file && + git commit -m"first commit" && + git rm -rf one && + cp two/file one && + git add one && + git commit -m"second commit" +' + +test_expect_success '"status" after file replacement should be clean with UC=true' ' + git checkout HEAD~ && + status_is_clean && + status_is_clean && + git checkout master && + avoid_racy && + status_is_clean && + test-dump-untracked-cache >../actual && + grep -F "recurse valid" ../actual >../actual.grep && + cat >../expect.grep <<EOF && +/ 0000000000000000000000000000000000000000 recurse valid +/two/ 0000000000000000000000000000000000000000 recurse valid +EOF + status_is_clean && + test_cmp ../expect.grep ../actual.grep +' + +test_expect_success '"status" after file replacement should be clean with UC=false' ' + git config core.untrackedCache false && + git checkout HEAD~ && + status_is_clean && + status_is_clean && + git checkout master && + avoid_racy && + status_is_clean && + status_is_clean +' + test_done diff --git a/t/t7064-wtstatus-pv2.sh b/t/t7064-wtstatus-pv2.sh index e319fa2e84..8f795327a0 100755 --- a/t/t7064-wtstatus-pv2.sh +++ b/t/t7064-wtstatus-pv2.sh @@ -390,6 +390,68 @@ test_expect_success 'verify upstream fields in branch header' ' ) ' +test_expect_success 'verify --[no-]ahead-behind with V2 format' ' + git checkout master && + test_when_finished "rm -rf sub_repo" && + git clone . sub_repo && + ( + ## Confirm local master tracks remote master. + cd sub_repo && + HUF=$(git rev-parse HEAD) && + + # Confirm --no-ahead-behind reports traditional branch.ab with 0/0 for equal branches. + cat >expect <<-EOF && + # branch.oid $HUF + # branch.head master + # branch.upstream origin/master + # branch.ab +0 -0 + EOF + + git status --no-ahead-behind --porcelain=v2 --branch --untracked-files=all >actual && + test_cmp expect actual && + + # Confirm --ahead-behind reports traditional branch.ab with 0/0. + cat >expect <<-EOF && + # branch.oid $HUF + # branch.head master + # branch.upstream origin/master + # branch.ab +0 -0 + EOF + + git status --ahead-behind --porcelain=v2 --branch --untracked-files=all >actual && + test_cmp expect actual && + + ## Test non-equal ahead/behind. + echo xyz >file_xyz && + git add file_xyz && + git commit -m xyz && + + HUF=$(git rev-parse HEAD) && + + # Confirm --no-ahead-behind reports branch.ab with ?/? for non-equal branches. + cat >expect <<-EOF && + # branch.oid $HUF + # branch.head master + # branch.upstream origin/master + # branch.ab +? -? + EOF + + git status --no-ahead-behind --porcelain=v2 --branch --untracked-files=all >actual && + test_cmp expect actual && + + # Confirm --ahead-behind reports traditional branch.ab with 1/0. + cat >expect <<-EOF && + # branch.oid $HUF + # branch.head master + # branch.upstream origin/master + # branch.ab +1 -0 + EOF + + git status --ahead-behind --porcelain=v2 --branch --untracked-files=all >actual && + test_cmp expect actual + ) +' + test_expect_success 'create and add submodule, submodule appears clean (A. S...)' ' git checkout master && git clone . sub_repo && diff --git a/t/t7505-prepare-commit-msg-hook.sh b/t/t7505-prepare-commit-msg-hook.sh index b13f72975e..1f43b3cd4c 100755 --- a/t/t7505-prepare-commit-msg-hook.sh +++ b/t/t7505-prepare-commit-msg-hook.sh @@ -4,6 +4,38 @@ test_description='prepare-commit-msg hook' . ./test-lib.sh +test_expect_success 'set up commits for rebasing' ' + test_commit root && + test_commit a a a && + test_commit b b b && + git checkout -b rebase-me root && + test_commit rebase-a a aa && + test_commit rebase-b b bb && + for i in $(test_seq 1 13) + do + test_commit rebase-$i c $i + done && + git checkout master && + + cat >rebase-todo <<-EOF + pick $(git rev-parse rebase-a) + pick $(git rev-parse rebase-b) + fixup $(git rev-parse rebase-1) + fixup $(git rev-parse rebase-2) + pick $(git rev-parse rebase-3) + fixup $(git rev-parse rebase-4) + squash $(git rev-parse rebase-5) + reword $(git rev-parse rebase-6) + squash $(git rev-parse rebase-7) + fixup $(git rev-parse rebase-8) + fixup $(git rev-parse rebase-9) + edit $(git rev-parse rebase-10) + squash $(git rev-parse rebase-11) + squash $(git rev-parse rebase-12) + edit $(git rev-parse rebase-13) + EOF +' + test_expect_success 'with no hook' ' echo "foo" > file && @@ -31,17 +63,41 @@ mkdir -p "$HOOKDIR" echo "#!$SHELL_PATH" > "$HOOK" cat >> "$HOOK" <<'EOF' -if test "$2" = commit; then - source=$(git rev-parse "$3") +GIT_DIR=$(git rev-parse --git-dir) +if test -d "$GIT_DIR/rebase-merge" +then + rebasing=1 else - source=${2-default} + rebasing=0 fi -if test "$GIT_EDITOR" = :; then - sed -e "1s/.*/$source (no editor)/" "$1" > msg.tmp + +get_last_cmd () { + tail -n1 "$GIT_DIR/rebase-merge/done" | { + read cmd id _ + git log --pretty="[$cmd %s]" -n1 $id + } +} + +if test "$2" = commit +then + if test $rebasing = 1 + then + source="$3" + else + source=$(git rev-parse "$3") + fi else - sed -e "1s/.*/$source/" "$1" > msg.tmp + source=${2-default} +fi +test "$GIT_EDITOR" = : && source="$source (no editor)" + +if test $rebasing = 1 +then + echo "$source $(get_last_cmd)" >"$1" +else + sed -e "1s/.*/$source/" "$1" >msg.tmp + mv msg.tmp "$1" fi -mv msg.tmp "$1" exit 0 EOF chmod +x "$HOOK" @@ -156,6 +212,63 @@ test_expect_success 'with hook and editor (merge)' ' test "$(git log -1 --pretty=format:%s)" = "merge" ' +test_rebase () { + expect=$1 && + mode=$2 && + test_expect_$expect C_LOCALE_OUTPUT "with hook (rebase $mode)" ' + test_when_finished "\ + git rebase --abort + git checkout -f master + git branch -D tmp" && + git checkout -b tmp rebase-me && + GIT_SEQUENCE_EDITOR="cp rebase-todo" && + GIT_EDITOR="\"$FAKE_EDITOR\"" && + ( + export GIT_SEQUENCE_EDITOR GIT_EDITOR && + test_must_fail git rebase $mode b && + echo x >a && + git add a && + test_must_fail git rebase --continue && + echo x >b && + git add b && + git commit && + git rebase --continue && + echo y >a && + git add a && + git commit && + git rebase --continue && + echo y >b && + git add b && + git rebase --continue + ) && + if test $mode = -p # reword amended after pick + then + n=18 + else + n=17 + fi && + git log --pretty=%s -g -n$n HEAD@{1} >actual && + test_cmp "$TEST_DIRECTORY/t7505/expected-rebase$mode" actual + ' +} + +test_rebase success -i +test_rebase success -p + +test_expect_success 'with hook (cherry-pick)' ' + test_when_finished "git checkout -f master" && + git checkout -B other b && + git cherry-pick rebase-1 && + test "$(git log -1 --pretty=format:%s)" = "message (no editor)" +' + +test_expect_success 'with hook and editor (cherry-pick)' ' + test_when_finished "git checkout -f master" && + git checkout -B other b && + git cherry-pick -e rebase-1 && + test "$(git log -1 --pretty=format:%s)" = merge +' + cat > "$HOOK" <<'EOF' #!/bin/sh exit 1 @@ -197,4 +310,11 @@ test_expect_success 'with failing hook (merge)' ' ' +test_expect_success C_LOCALE_OUTPUT 'with failing hook (cherry-pick)' ' + test_when_finished "git checkout -f master" && + git checkout -B other b && + test_must_fail git cherry-pick rebase-1 2>actual && + test $(grep -c prepare-commit-msg actual) = 1 +' + test_done diff --git a/t/t7505/expected-rebase-i b/t/t7505/expected-rebase-i new file mode 100644 index 0000000000..c514bdbb94 --- /dev/null +++ b/t/t7505/expected-rebase-i @@ -0,0 +1,17 @@ +message [edit rebase-13] +message (no editor) [edit rebase-13] +message [squash rebase-12] +message (no editor) [squash rebase-11] +default [edit rebase-10] +message (no editor) [edit rebase-10] +message [fixup rebase-9] +message (no editor) [fixup rebase-8] +message (no editor) [squash rebase-7] +message [reword rebase-6] +message [squash rebase-5] +message (no editor) [fixup rebase-4] +message (no editor) [pick rebase-3] +message (no editor) [fixup rebase-2] +message (no editor) [fixup rebase-1] +merge [pick rebase-b] +message [pick rebase-a] diff --git a/t/t7505/expected-rebase-p b/t/t7505/expected-rebase-p new file mode 100644 index 0000000000..93bada596e --- /dev/null +++ b/t/t7505/expected-rebase-p @@ -0,0 +1,18 @@ +message [edit rebase-13] +message (no editor) [edit rebase-13] +message [squash rebase-12] +message (no editor) [squash rebase-11] +default [edit rebase-10] +message (no editor) [edit rebase-10] +message [fixup rebase-9] +message (no editor) [fixup rebase-8] +message (no editor) [squash rebase-7] +HEAD [reword rebase-6] +message (no editor) [reword rebase-6] +message [squash rebase-5] +message (no editor) [fixup rebase-4] +message (no editor) [pick rebase-3] +message (no editor) [fixup rebase-2] +message (no editor) [fixup rebase-1] +merge [pick rebase-b] +message [pick rebase-a] diff --git a/t/t7519-status-fsmonitor.sh b/t/t7519-status-fsmonitor.sh index eb2d13bbcf..756beb0d8e 100755 --- a/t/t7519-status-fsmonitor.sh +++ b/t/t7519-status-fsmonitor.sh @@ -314,4 +314,43 @@ test_expect_success 'splitting the index results in the same state' ' test_cmp expect actual ' +test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR' ' + test_create_repo dot-git && + ( + cd dot-git && + mkdir -p .git/hooks && + : >tracked && + : >modified && + mkdir dir1 && + : >dir1/tracked && + : >dir1/modified && + mkdir dir2 && + : >dir2/tracked && + : >dir2/modified && + write_integration_script && + git config core.fsmonitor .git/hooks/fsmonitor-test && + git update-index --untracked-cache && + git update-index --fsmonitor && + GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-before" \ + git status && + test-dump-untracked-cache >../before + ) && + cat >>dot-git/.git/hooks/fsmonitor-test <<-\EOF && + printf ".git\0" + printf ".git/index\0" + printf "dir1/.git\0" + printf "dir1/.git/index\0" + EOF + ( + cd dot-git && + GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-after" \ + git status && + test-dump-untracked-cache >../after + ) && + grep "directory invalidation" trace-before >>before && + grep "directory invalidation" trace-after >>after && + # UNTR extension unchanged, dir invalidation count unchanged + test_cmp before after +' + test_done diff --git a/t/t7600-merge.sh b/t/t7600-merge.sh index dfde6a675a..6736d8d131 100755 --- a/t/t7600-merge.sh +++ b/t/t7600-merge.sh @@ -700,6 +700,42 @@ test_expect_success 'merge --no-ff --edit' ' test_cmp expected actual ' +test_expect_success 'merge annotated/signed tag w/o tracking' ' + test_when_finished "rm -rf dst; git tag -d anno1" && + git tag -a -m "anno c1" anno1 c1 && + git init dst && + git rev-parse c1 >dst/expect && + ( + # c0 fast-forwards to c1 but because this repository + # is not a "downstream" whose refs/tags follows along + # tag from the "upstream", this pull defaults to --no-ff + cd dst && + git pull .. c0 && + git pull .. anno1 && + git rev-parse HEAD^2 >actual && + test_cmp expect actual + ) +' + +test_expect_success 'merge annotated/signed tag w/ tracking' ' + test_when_finished "rm -rf dst; git tag -d anno1" && + git tag -a -m "anno c1" anno1 c1 && + git init dst && + git rev-parse c1 >dst/expect && + ( + # c0 fast-forwards to c1 and because this repository + # is a "downstream" whose refs/tags follows along + # tag from the "upstream", this pull defaults to --ff + cd dst && + git remote add origin .. && + git pull origin c0 && + git fetch origin && + git merge anno1 && + git rev-parse HEAD >actual && + test_cmp expect actual + ) +' + test_expect_success GPG 'merge --ff-only tag' ' git reset --hard c0 && git commit --allow-empty -m "A newer commit" && @@ -718,7 +754,7 @@ test_expect_success GPG 'merge --no-edit tag should skip editor' ' git tag -f -s -m "A newer commit" signed && git reset --hard c0 && - EDITOR=false git merge --no-edit signed && + EDITOR=false git merge --no-edit --no-ff signed && git rev-parse signed^0 >expect && git rev-parse HEAD^2 >actual && test_cmp expect actual diff --git a/t/t7607-merge-overwrite.sh b/t/t7607-merge-overwrite.sh index 9444d6a9b9..9c422bcd7c 100755 --- a/t/t7607-merge-overwrite.sh +++ b/t/t7607-merge-overwrite.sh @@ -97,7 +97,10 @@ test_expect_failure 'will not overwrite unstaged changes in renamed file' ' git mv c1.c other.c && git commit -m rename && cp important other.c && - git merge c1a && + test_must_fail git merge c1a >out && + test_i18ngrep "Refusing to lose dirty file at other.c" out && + test_path_is_file other.c~HEAD && + test $(git hash-object other.c~HEAD) = $(git rev-parse c1a:c1.c) && test_cmp important other.c ' diff --git a/t/t9000-addresses.sh b/t/t9000-addresses.sh deleted file mode 100755 index a1ebef6de2..0000000000 --- a/t/t9000-addresses.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -test_description='compare address parsing with and without Mail::Address' -. ./test-lib.sh - -if ! test_have_prereq PERL; then - skip_all='skipping perl interface tests, perl not available' - test_done -fi - -perl -MTest::More -e 0 2>/dev/null || { - skip_all="Perl Test::More unavailable, skipping test" - test_done -} - -perl -MMail::Address -e 0 2>/dev/null || { - skip_all="Perl Mail::Address unavailable, skipping test" - test_done -} - -test_external_has_tap=1 - -test_external_without_stderr \ - 'Perl address parsing function' \ - perl "$TEST_DIRECTORY"/t9000/test.pl - -test_done diff --git a/t/t9000/test.pl b/t/t9000/test.pl deleted file mode 100755 index dfeaa9c655..0000000000 --- a/t/t9000/test.pl +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/perl -use lib (split(/:/, $ENV{GITPERLLIB})); - -use 5.008; -use warnings; -use strict; - -use Test::More qw(no_plan); -use Mail::Address; - -BEGIN { use_ok('Git') } - -my @success_list = (q[Jane], - q[jdoe@example.com], - q[<jdoe@example.com>], - q[Jane <jdoe@example.com>], - q[Jane Doe <jdoe@example.com>], - q["Jane" <jdoe@example.com>], - q["Doe, Jane" <jdoe@example.com>], - q["Jane@:;\>.,()<Doe" <jdoe@example.com>], - q[Jane!#$%&'*+-/=?^_{|}~Doe' <jdoe@example.com>], - q["<jdoe@example.com>"], - q["Jane jdoe@example.com"], - q[Jane Doe <jdoe @ example.com >], - q[Jane Doe < jdoe@example.com >], - q[Jane @ Doe @ Jane @ Doe], - q["Jane, 'Doe'" <jdoe@example.com>], - q['Doe, "Jane' <jdoe@example.com>], - q["Jane" "Do"e <jdoe@example.com>], - q["Jane' Doe" <jdoe@example.com>], - q["Jane Doe <jdoe@example.com>" <jdoe@example.com>], - q["Jane\" Doe" <jdoe@example.com>], - q[Doe, jane <jdoe@example.com>], - q["Jane Doe <jdoe@example.com>], - q['Jane 'Doe' <jdoe@example.com>], - q[Jane@:;\.,()<>Doe <jdoe@example.com>], - q[Jane <jdoe@example.com> Doe], - q[<jdoe@example.com> Jane Doe]); - -my @known_failure_list = (q[Jane\ Doe <jdoe@example.com>], - q["Doe, Ja"ne <jdoe@example.com>], - q["Doe, Katarina" Jane <jdoe@example.com>], - q[Jane jdoe@example.com], - q["Jane "Kat"a" ri"na" ",Doe" <jdoe@example.com>], - q[Jane Doe], - q[Jane "Doe <jdoe@example.com>"], - q[\"Jane Doe <jdoe@example.com>], - q[Jane\"\" Doe <jdoe@example.com>], - q['Jane "Katarina\" \' Doe' <jdoe@example.com>]); - -foreach my $str (@success_list) { - my @expected = map { $_->format } Mail::Address->parse("$str"); - my @actual = Git::parse_mailboxes("$str"); - is_deeply(\@expected, \@actual, qq[same output : $str]); -} - -TODO: { - local $TODO = "known breakage"; - foreach my $str (@known_failure_list) { - my @expected = map { $_->format } Mail::Address->parse("$str"); - my @actual = Git::parse_mailboxes("$str"); - is_deeply(\@expected, \@actual, qq[same output : $str]); - } -} - -my $is_passing = eval { Test::More->is_passing }; -exit($is_passing ? 0 : 1) unless $@ =~ /Can't locate object method/; diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh index 81869d8913..e80eacbb1b 100755 --- a/t/t9001-send-email.sh +++ b/t/t9001-send-email.sh @@ -178,6 +178,25 @@ test_expect_success $PREREQ 'cc trailer with various syntax' ' test_cmp expected-cc commandline1 ' +test_expect_success $PREREQ 'setup fake get_maintainer.pl script for cc trailer' " + write_script expected-cc-script.sh <<-EOF + echo 'One Person <one@example.com> (supporter:THIS (FOO/bar))' + echo 'Two Person <two@example.com> (maintainer:THIS THING)' + echo 'Third List <three@example.com> (moderated list:THIS THING (FOO/bar))' + echo '<four@example.com> (moderated list:FOR THING)' + echo 'five@example.com (open list:FOR THING (FOO/bar))' + echo 'six@example.com (open list)' + EOF +" + +test_expect_success $PREREQ 'cc trailer with get_maintainer.pl output' ' + clean_fake_sendmail && + git send-email -1 --to=recipient@example.com \ + --cc-cmd=./expected-cc-script.sh \ + --smtp-server="$(pwd)/fake.sendmail" && + test_cmp expected-cc commandline1 +' + test_expect_success $PREREQ 'setup expect' " cat >expected-show-all-headers <<\EOF 0001-Second.patch @@ -205,6 +224,7 @@ Message-Id: MESSAGE-ID-STRING X-Mailer: X-MAILER-STRING In-Reply-To: <unique-message-id@example.com> References: <unique-message-id@example.com> +Reply-To: Reply <reply@example.com> Result: OK EOF @@ -297,6 +317,7 @@ test_expect_success $PREREQ 'Show all headers' ' --dry-run \ --suppress-cc=sob \ --from="Example <from@example.com>" \ + --reply-to="Reply <reply@example.com>" \ --to=to@example.com \ --cc=cc@example.com \ --bcc=bcc@example.com \ diff --git a/t/t9400-git-cvsserver-server.sh b/t/t9400-git-cvsserver-server.sh index c30660d606..06742748e9 100755 --- a/t/t9400-git-cvsserver-server.sh +++ b/t/t9400-git-cvsserver-server.sh @@ -447,12 +447,10 @@ test_expect_success 'cvs update (-p)' ' git push gitcvs.git >/dev/null && cd cvswork && GIT_CONFIG="$git_config" cvs update && - rm -f failures && for i in merge no-lf empty really-empty; do - GIT_CONFIG="$git_config" cvs update -p "$i" >$i.out - test_cmp $i.out ../$i >>failures 2>&1 - done && - test -z "$(cat failures)" + GIT_CONFIG="$git_config" cvs update -p "$i" >$i.out && + test_cmp $i.out ../$i || return 1 + done ' cd "$WORKDIR" diff --git a/t/t9402-git-cvsserver-refs.sh b/t/t9402-git-cvsserver-refs.sh index 6d2d3c8739..cf31ace667 100755 --- a/t/t9402-git-cvsserver-refs.sh +++ b/t/t9402-git-cvsserver-refs.sh @@ -455,20 +455,20 @@ test_expect_success 'cvs up -r $(git rev-parse v1)' ' ' test_expect_success 'cvs diff -r v1 -u' ' - ( cd cvswork && cvs -f diff -r v1 -u ) >cvsDiff.out 2>cvs.log && + ( cd cvswork && cvs -f diff -r v1 -u >../cvsDiff.out 2>../cvs.log ) && test_must_be_empty cvsDiff.out && test_must_be_empty cvs.log ' test_expect_success 'cvs diff -N -r v2 -u' ' - ( cd cvswork && ! cvs -f diff -N -r v2 -u ) >cvsDiff.out 2>cvs.log && + ( cd cvswork && ! cvs -f diff -N -r v2 -u >../cvsDiff.out 2>../cvs.log ) && test_must_be_empty cvs.log && test -s cvsDiff.out && check_diff cvsDiff.out v2 v1 >check_diff.out 2>&1 ' test_expect_success 'cvs diff -N -r v2 -r v1.2' ' - ( cd cvswork && ! cvs -f diff -N -r v2 -r v1.2 -u ) >cvsDiff.out 2>cvs.log && + ( cd cvswork && ! cvs -f diff -N -r v2 -r v1.2 -u >../cvsDiff.out 2>../cvs.log ) && test_must_be_empty cvs.log && test -s cvsDiff.out && check_diff cvsDiff.out v2 v1.2 >check_diff.out 2>&1 @@ -487,7 +487,7 @@ test_expect_success 'apply early [cvswork3] diff to b3' ' ' test_expect_success 'check [cvswork3] diff' ' - ( cd cvswork3 && ! cvs -f diff -N -u ) >"$WORKDIR/cvsDiff.out" 2>cvs.log && + ( cd cvswork3 && ! cvs -f diff -N -u >"$WORKDIR/cvsDiff.out" 2>../cvs.log ) && test_must_be_empty cvs.log && test -s cvsDiff.out && test $(grep Index: cvsDiff.out | wc -l) = 3 && diff --git a/t/t9902-completion.sh b/t/t9902-completion.sh index fc614dcbfa..e6485feb0a 100755 --- a/t/t9902-completion.sh +++ b/t/t9902-completion.sh @@ -1237,17 +1237,19 @@ test_expect_success 'double dash "git" itself' ' test_expect_success 'double dash "git checkout"' ' test_completion "git checkout --" <<-\EOF --quiet Z + --detach Z + --track Z + --orphan=Z --ours Z --theirs Z - --track Z - --no-track Z --merge Z - --conflict= - --orphan Z + --conflict=Z --patch Z - --detach Z --ignore-skip-worktree-bits Z + --ignore-other-worktrees Z --recurse-submodules Z + --progress Z + --no-track Z --no-recurse-submodules Z EOF ' diff --git a/t/t9903-bash-prompt.sh b/t/t9903-bash-prompt.sh index 97c9b32c2e..8f5c811dd7 100755 --- a/t/t9903-bash-prompt.sh +++ b/t/t9903-bash-prompt.sh @@ -735,22 +735,12 @@ test_expect_success 'prompt - hide if pwd ignored - env var set, config unset, p test_cmp expected "$actual" ' -test_expect_success 'prompt - hide if pwd ignored - inside gitdir (stdout)' ' +test_expect_success 'prompt - hide if pwd ignored - inside gitdir' ' printf " (GIT_DIR!)" >expected && ( GIT_PS1_HIDE_IF_PWD_IGNORED=y && cd .git && - __git_ps1 >"$actual" 2>/dev/null - ) && - test_cmp expected "$actual" -' - -test_expect_success 'prompt - hide if pwd ignored - inside gitdir (stderr)' ' - printf "" >expected && - ( - GIT_PS1_HIDE_IF_PWD_IGNORED=y && - cd .git && - __git_ps1 >/dev/null 2>"$actual" + __git_ps1 >"$actual" ) && test_cmp expected "$actual" ' diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh index 8a8a9329ee..b895366fee 100644 --- a/t/test-lib-functions.sh +++ b/t/test-lib-functions.sh @@ -629,30 +629,30 @@ test_must_fail () { _test_ok= ;; esac - "$@" + "$@" 2>&7 exit_code=$? if test $exit_code -eq 0 && ! list_contains "$_test_ok" success then - echo >&2 "test_must_fail: command succeeded: $*" + echo >&4 "test_must_fail: command succeeded: $*" return 1 elif test_match_signal 13 $exit_code && list_contains "$_test_ok" sigpipe then return 0 elif test $exit_code -gt 129 && test $exit_code -le 192 then - echo >&2 "test_must_fail: died by signal $(($exit_code - 128)): $*" + echo >&4 "test_must_fail: died by signal $(($exit_code - 128)): $*" return 1 elif test $exit_code -eq 127 then - echo >&2 "test_must_fail: command not found: $*" + echo >&4 "test_must_fail: command not found: $*" return 1 elif test $exit_code -eq 126 then - echo >&2 "test_must_fail: valgrind error: $*" + echo >&4 "test_must_fail: valgrind error: $*" return 1 fi return 0 -} +} 7>&2 2>&4 # Similar to test_must_fail, but tolerates success, too. This is # meant to be used in contexts like: @@ -668,8 +668,8 @@ test_must_fail () { # Accepts the same options as test_must_fail. test_might_fail () { - test_must_fail ok=success "$@" -} + test_must_fail ok=success "$@" 2>&7 +} 7>&2 2>&4 # Similar to test_must_fail and test_might_fail, but check that a # given command exited with a given exit code. Meant to be used as: @@ -681,16 +681,16 @@ test_might_fail () { test_expect_code () { want_code=$1 shift - "$@" + "$@" 2>&7 exit_code=$? if test $exit_code = $want_code then return 0 fi - echo >&2 "test_expect_code: command exited with $exit_code, we wanted $want_code $*" + echo >&4 "test_expect_code: command exited with $exit_code, we wanted $want_code $*" return 1 -} +} 7>&2 2>&4 # test_cmp is a helper function to compare actual and expected output. # You can use it like: @@ -752,18 +752,18 @@ test_i18ngrep () { shift ! grep "$@" && return 0 - echo >&2 "error: '! grep $@' did find a match in:" + echo >&4 "error: '! grep $@' did find a match in:" else grep "$@" && return 0 - echo >&2 "error: 'grep $@' didn't find a match in:" + echo >&4 "error: 'grep $@' didn't find a match in:" fi if test -s "$last_arg" then - cat >&2 "$last_arg" + cat >&4 "$last_arg" else - echo >&2 "<File '$last_arg' is empty>" + echo >&4 "<File '$last_arg' is empty>" fi return 1 @@ -774,7 +774,7 @@ test_i18ngrep () { # not output anything when they fail. verbose () { "$@" && return 0 - echo >&2 "command failed: $(git rev-parse --sq-quote "$@")" + echo >&4 "command failed: $(git rev-parse --sq-quote "$@")" return 1 } @@ -782,7 +782,11 @@ verbose () { # otherwise. test_must_be_empty () { - if test -s "$1" + if ! test -f "$1" + then + echo "'$1' is missing" + return 1 + elif test -s "$1" then echo "'$1' is not empty, it contains:" cat "$1" @@ -892,8 +896,8 @@ test_write_lines () { } perl () { - command "$PERL_PATH" "$@" -} + command "$PERL_PATH" "$@" 2>&7 +} 7>&2 2>&4 # Is the value one of the various ways to spell a boolean true/false? test_normalize_bool () { @@ -1033,13 +1037,13 @@ test_env () { shift ;; *) - "$@" + "$@" 2>&7 exit ;; esac done ) -} +} 7>&2 2>&4 # Returns true if the numeric exit code in "$2" represents the expected signal # in "$1". Signals should be given numerically. @@ -1081,9 +1085,9 @@ nongit () { GIT_CEILING_DIRECTORIES=$(pwd) && export GIT_CEILING_DIRECTORIES && cd non-repo && - "$@" + "$@" 2>&7 ) -} +} 7>&2 2>&4 # convert stdin to pktline representation; note that empty input becomes an # empty packet, not a flush packet (for that you can just print 0000 yourself). diff --git a/t/test-lib.sh b/t/test-lib.sh index 816e692391..7740d511d2 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh @@ -264,7 +264,24 @@ do GIT_TEST_CHAIN_LINT=0 shift ;; -x) - trace=t + # Some test scripts can't be reliably traced with '-x', + # unless the test is run with a Bash version supporting + # BASH_XTRACEFD (introduced in Bash v4.1). Check whether + # this test is marked as such, and ignore '-x' if it + # isn't executed with a suitable Bash version. + if test -z "$test_untraceable" || { + test -n "$BASH_VERSION" && { + test ${BASH_VERSINFO[0]} -gt 4 || { + test ${BASH_VERSINFO[0]} -eq 4 && + test ${BASH_VERSINFO[1]} -ge 1 + } + } + } + then + trace=t + else + echo >&2 "warning: ignoring -x; '$0' is untraceable without BASH_XTRACEFD" + fi shift ;; --verbose-log) verbose_log=t @@ -940,7 +957,7 @@ then fi fi -GITPERLLIB="$GIT_BUILD_DIR"/perl/blib/lib:"$GIT_BUILD_DIR"/perl/blib/arch/auto/Git +GITPERLLIB="$GIT_BUILD_DIR"/perl/build/lib export GITPERLLIB test -d "$GIT_BUILD_DIR"/templates/blt || { error "You haven't built things yet, have you?" @@ -1107,6 +1124,10 @@ test_lazy_prereq EXPENSIVE ' test -n "$GIT_TEST_LONG" ' +test_lazy_prereq EXPENSIVE_ON_WINDOWS ' + test_have_prereq EXPENSIVE || test_have_prereq !MINGW,!CYGWIN +' + test_lazy_prereq USR_BIN_TIME ' test -x /usr/bin/time ' @@ -47,7 +47,7 @@ int gpg_verify_tag(const struct object_id *oid, const char *name_to_report, name_to_report ? name_to_report : find_unique_abbrev(oid->hash, DEFAULT_ABBREV), - typename(type)); + type_name(type)); buf = read_sha1_file(oid->hash, &type, &size); if (!buf) diff --git a/tempfile.c b/tempfile.c index 5fdafdd2d2..139ecd97f8 100644 --- a/tempfile.c +++ b/tempfile.c @@ -165,11 +165,11 @@ struct tempfile *register_tempfile(const char *path) return tempfile; } -struct tempfile *mks_tempfile_sm(const char *template, int suffixlen, int mode) +struct tempfile *mks_tempfile_sm(const char *filename_template, int suffixlen, int mode) { struct tempfile *tempfile = new_tempfile(); - strbuf_add_absolute_path(&tempfile->filename, template); + strbuf_add_absolute_path(&tempfile->filename, filename_template); tempfile->fd = git_mkstemps_mode(tempfile->filename.buf, suffixlen, mode); if (tempfile->fd < 0) { deactivate_tempfile(tempfile); @@ -179,7 +179,7 @@ struct tempfile *mks_tempfile_sm(const char *template, int suffixlen, int mode) return tempfile; } -struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode) +struct tempfile *mks_tempfile_tsm(const char *filename_template, int suffixlen, int mode) { struct tempfile *tempfile = new_tempfile(); const char *tmpdir; @@ -188,7 +188,7 @@ struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode) if (!tmpdir) tmpdir = "/tmp"; - strbuf_addf(&tempfile->filename, "%s/%s", tmpdir, template); + strbuf_addf(&tempfile->filename, "%s/%s", tmpdir, filename_template); tempfile->fd = git_mkstemps_mode(tempfile->filename.buf, suffixlen, mode); if (tempfile->fd < 0) { deactivate_tempfile(tempfile); @@ -198,12 +198,12 @@ struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode) return tempfile; } -struct tempfile *xmks_tempfile_m(const char *template, int mode) +struct tempfile *xmks_tempfile_m(const char *filename_template, int mode) { struct tempfile *tempfile; struct strbuf full_template = STRBUF_INIT; - strbuf_add_absolute_path(&full_template, template); + strbuf_add_absolute_path(&full_template, filename_template); tempfile = mks_tempfile_m(full_template.buf, mode); if (!tempfile) die_errno("Unable to create temporary file '%s'", diff --git a/tempfile.h b/tempfile.h index 450908b2e0..8959c5f1b5 100644 --- a/tempfile.h +++ b/tempfile.h @@ -135,58 +135,58 @@ extern struct tempfile *register_tempfile(const char *path); */ /* See "mks_tempfile functions" above. */ -extern struct tempfile *mks_tempfile_sm(const char *template, +extern struct tempfile *mks_tempfile_sm(const char *filename_template, int suffixlen, int mode); /* See "mks_tempfile functions" above. */ -static inline struct tempfile *mks_tempfile_s(const char *template, +static inline struct tempfile *mks_tempfile_s(const char *filename_template, int suffixlen) { - return mks_tempfile_sm(template, suffixlen, 0600); + return mks_tempfile_sm(filename_template, suffixlen, 0600); } /* See "mks_tempfile functions" above. */ -static inline struct tempfile *mks_tempfile_m(const char *template, int mode) +static inline struct tempfile *mks_tempfile_m(const char *filename_template, int mode) { - return mks_tempfile_sm(template, 0, mode); + return mks_tempfile_sm(filename_template, 0, mode); } /* See "mks_tempfile functions" above. */ -static inline struct tempfile *mks_tempfile(const char *template) +static inline struct tempfile *mks_tempfile(const char *filename_template) { - return mks_tempfile_sm(template, 0, 0600); + return mks_tempfile_sm(filename_template, 0, 0600); } /* See "mks_tempfile functions" above. */ -extern struct tempfile *mks_tempfile_tsm(const char *template, +extern struct tempfile *mks_tempfile_tsm(const char *filename_template, int suffixlen, int mode); /* See "mks_tempfile functions" above. */ -static inline struct tempfile *mks_tempfile_ts(const char *template, +static inline struct tempfile *mks_tempfile_ts(const char *filename_template, int suffixlen) { - return mks_tempfile_tsm(template, suffixlen, 0600); + return mks_tempfile_tsm(filename_template, suffixlen, 0600); } /* See "mks_tempfile functions" above. */ -static inline struct tempfile *mks_tempfile_tm(const char *template, int mode) +static inline struct tempfile *mks_tempfile_tm(const char *filename_template, int mode) { - return mks_tempfile_tsm(template, 0, mode); + return mks_tempfile_tsm(filename_template, 0, mode); } /* See "mks_tempfile functions" above. */ -static inline struct tempfile *mks_tempfile_t(const char *template) +static inline struct tempfile *mks_tempfile_t(const char *filename_template) { - return mks_tempfile_tsm(template, 0, 0600); + return mks_tempfile_tsm(filename_template, 0, 0600); } /* See "mks_tempfile functions" above. */ -extern struct tempfile *xmks_tempfile_m(const char *template, int mode); +extern struct tempfile *xmks_tempfile_m(const char *filename_template, int mode); /* See "mks_tempfile functions" above. */ -static inline struct tempfile *xmks_tempfile(const char *template) +static inline struct tempfile *xmks_tempfile(const char *filename_template) { - return xmks_tempfile_m(template, 0600); + return xmks_tempfile_m(filename_template, 0600); } /* @@ -131,7 +131,6 @@ static void print_trace_line(struct trace_key *key, struct strbuf *buf) { strbuf_complete_line(buf); trace_write(key, buf->buf, buf->len); - strbuf_release(buf); } static void trace_vprintf_fl(const char *file, int line, struct trace_key *key, @@ -144,6 +143,7 @@ static void trace_vprintf_fl(const char *file, int line, struct trace_key *key, strbuf_vaddf(&buf, format, ap); print_trace_line(key, &buf); + strbuf_release(&buf); } static void trace_argv_vprintf_fl(const char *file, int line, @@ -157,8 +157,9 @@ static void trace_argv_vprintf_fl(const char *file, int line, strbuf_vaddf(&buf, format, ap); - sq_quote_argv(&buf, argv, 0); + sq_quote_argv_pretty(&buf, argv); print_trace_line(&trace_default_key, &buf); + strbuf_release(&buf); } void trace_strbuf_fl(const char *file, int line, struct trace_key *key, @@ -171,6 +172,7 @@ void trace_strbuf_fl(const char *file, int line, struct trace_key *key, strbuf_addbuf(&buf, data); print_trace_line(key, &buf); + strbuf_release(&buf); } static void trace_performance_vprintf_fl(const char *file, int line, @@ -190,6 +192,7 @@ static void trace_performance_vprintf_fl(const char *file, int line, } print_trace_line(&trace_perf_key, &buf); + strbuf_release(&buf); } #ifndef HAVE_VARIADIC_MACROS @@ -426,6 +429,6 @@ void trace_command_performance(const char **argv) atexit(print_command_performance_atexit); strbuf_reset(&command_line); - sq_quote_argv(&command_line, argv, 0); + sq_quote_argv_pretty(&command_line, argv); command_start_time = getnanotime(); } @@ -174,12 +174,12 @@ static void print_all(FILE *outfile, struct list_head *head, static struct trailer_item *trailer_from_arg(struct arg_item *arg_tok) { - struct trailer_item *new = xcalloc(sizeof(*new), 1); - new->token = arg_tok->token; - new->value = arg_tok->value; + struct trailer_item *new_item = xcalloc(sizeof(*new_item), 1); + new_item->token = arg_tok->token; + new_item->value = arg_tok->value; arg_tok->token = arg_tok->value = NULL; free_arg_item(arg_tok); - return new; + return new_item; } static void add_arg_to_input_list(struct trailer_item *on_tok, @@ -666,30 +666,30 @@ static void parse_trailer(struct strbuf *tok, struct strbuf *val, static struct trailer_item *add_trailer_item(struct list_head *head, char *tok, char *val) { - struct trailer_item *new = xcalloc(sizeof(*new), 1); - new->token = tok; - new->value = val; - list_add_tail(&new->list, head); - return new; + struct trailer_item *new_item = xcalloc(sizeof(*new_item), 1); + new_item->token = tok; + new_item->value = val; + list_add_tail(&new_item->list, head); + return new_item; } static void add_arg_item(struct list_head *arg_head, char *tok, char *val, const struct conf_info *conf, const struct new_trailer_item *new_trailer_item) { - struct arg_item *new = xcalloc(sizeof(*new), 1); - new->token = tok; - new->value = val; - duplicate_conf(&new->conf, conf); + struct arg_item *new_item = xcalloc(sizeof(*new_item), 1); + new_item->token = tok; + new_item->value = val; + duplicate_conf(&new_item->conf, conf); if (new_trailer_item) { if (new_trailer_item->where != WHERE_DEFAULT) - new->conf.where = new_trailer_item->where; + new_item->conf.where = new_trailer_item->where; if (new_trailer_item->if_exists != EXISTS_DEFAULT) - new->conf.if_exists = new_trailer_item->if_exists; + new_item->conf.if_exists = new_trailer_item->if_exists; if (new_trailer_item->if_missing != MISSING_DEFAULT) - new->conf.if_missing = new_trailer_item->if_missing; + new_item->conf.if_missing = new_trailer_item->if_missing; } - list_add_tail(&new->list, arg_head); + list_add_tail(&new_item->list, arg_head); } static void process_command_line_args(struct list_head *arg_head, @@ -1000,7 +1000,7 @@ static struct tempfile *trailers_tempfile; static FILE *create_in_place_tempfile(const char *file) { struct stat st; - struct strbuf template = STRBUF_INIT; + struct strbuf filename_template = STRBUF_INIT; const char *tail; FILE *outfile; @@ -1014,11 +1014,11 @@ static FILE *create_in_place_tempfile(const char *file) /* Create temporary file in the same directory as the original */ tail = strrchr(file, '/'); if (tail != NULL) - strbuf_add(&template, file, tail - file + 1); - strbuf_addstr(&template, "git-interpret-trailers-XXXXXX"); + strbuf_add(&filename_template, file, tail - file + 1); + strbuf_addstr(&filename_template, "git-interpret-trailers-XXXXXX"); - trailers_tempfile = xmks_tempfile_m(template.buf, st.st_mode); - strbuf_release(&template); + trailers_tempfile = xmks_tempfile_m(filename_template.buf, st.st_mode); + strbuf_release(&filename_template); outfile = fdopen_tempfile(trailers_tempfile, "w"); if (!outfile) die_errno(_("could not open temporary file")); diff --git a/transport-helper.c b/transport-helper.c index 5080150231..3f380d87d9 100644 --- a/transport-helper.c +++ b/transport-helper.c @@ -672,6 +672,11 @@ static int fetch(struct transport *transport, if (data->transport_options.update_shallow) set_helper_option(transport, "update-shallow", "true"); + if (data->transport_options.filter_options.choice) + set_helper_option( + transport, "filter", + data->transport_options.filter_options.filter_spec); + if (data->fetch) return fetch_with_fetch(transport, nr_heads, to_fetch); diff --git a/transport.c b/transport.c index fc802260f6..00d48b5b56 100644 --- a/transport.c +++ b/transport.c @@ -161,6 +161,15 @@ static int set_git_option(struct git_transport_options *opts, } else if (!strcmp(name, TRANS_OPT_DEEPEN_RELATIVE)) { opts->deepen_relative = !!value; return 0; + } else if (!strcmp(name, TRANS_OPT_FROM_PROMISOR)) { + opts->from_promisor = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_NO_DEPENDENTS)) { + opts->no_dependents = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_LIST_OBJECTS_FILTER)) { + parse_list_objects_filter(&opts->filter_options, value); + return 0; } return 1; } @@ -229,6 +238,9 @@ static int fetch_refs_via_pack(struct transport *transport, data->options.check_self_contained_and_connected; args.cloning = transport->cloning; args.update_shallow = data->options.update_shallow; + args.from_promisor = data->options.from_promisor; + args.no_dependents = data->options.no_dependents; + args.filter_options = data->options.filter_options; if (!data->got_remote_heads) { connect_setup(transport, 0); diff --git a/transport.h b/transport.h index 731c78b679..3c68d73b21 100644 --- a/transport.h +++ b/transport.h @@ -4,6 +4,7 @@ #include "cache.h" #include "run-command.h" #include "remote.h" +#include "list-objects-filter-options.h" struct string_list; @@ -15,12 +16,15 @@ struct git_transport_options { unsigned self_contained_and_connected : 1; unsigned update_shallow : 1; unsigned deepen_relative : 1; + unsigned from_promisor : 1; + unsigned no_dependents : 1; int depth; const char *deepen_since; const struct string_list *deepen_not; const char *uploadpack; const char *receivepack; struct push_cas_option *cas; + struct list_objects_filter_options filter_options; }; enum transport_family { @@ -159,6 +163,18 @@ void transport_check_allowed(const char *type); /* Send push certificates */ #define TRANS_OPT_PUSH_CERT "pushcert" +/* Indicate that these objects are being fetched by a promisor */ +#define TRANS_OPT_FROM_PROMISOR "from-promisor" + +/* + * Indicate that only the objects wanted need to be fetched, not their + * dependents + */ +#define TRANS_OPT_NO_DEPENDENTS "no-dependents" + +/* Filter objects for partial clone and fetch */ +#define TRANS_OPT_LIST_OBJECTS_FILTER "filter" + /** * Returns 0 if the option was used, non-zero otherwise. Prints a * message to stderr if the option is not used. diff --git a/unpack-trees.c b/unpack-trees.c index 96c3327f19..d5685891a5 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -15,6 +15,7 @@ #include "submodule.h" #include "submodule-config.h" #include "fsmonitor.h" +#include "fetch-object.h" /* * Error messages expected by scripts out of plumbing commands such as @@ -194,10 +195,10 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, static struct cache_entry *dup_entry(const struct cache_entry *ce) { unsigned int size = ce_size(ce); - struct cache_entry *new = xmalloc(size); + struct cache_entry *new_entry = xmalloc(size); - memcpy(new, ce, size); - return new; + memcpy(new_entry, ce, size); + return new_entry; } static void add_entry(struct unpack_trees_options *o, @@ -370,6 +371,27 @@ static int check_updates(struct unpack_trees_options *o) load_gitmodules_file(index, &state); enable_delayed_checkout(&state); + if (repository_format_partial_clone && o->update && !o->dry_run) { + /* + * Prefetch the objects that are to be checked out in the loop + * below. + */ + struct oid_array to_fetch = OID_ARRAY_INIT; + int fetch_if_missing_store = fetch_if_missing; + fetch_if_missing = 0; + for (i = 0; i < index->cache_nr; i++) { + struct cache_entry *ce = index->cache[i]; + if ((ce->ce_flags & CE_UPDATE) && + !S_ISGITLINK(ce->ce_mode)) { + if (!has_object_file(&ce->oid)) + oid_array_append(&to_fetch, &ce->oid); + } + } + if (to_fetch.nr) + fetch_objects(repository_format_partial_clone, + &to_fetch); + fetch_if_missing = fetch_if_missing_store; + } for (i = 0; i < index->cache_nr; i++) { struct cache_entry *ce = index->cache[i]; @@ -1506,7 +1528,7 @@ static void invalidate_ce_path(const struct cache_entry *ce, if (!ce) return; cache_tree_invalidate_path(o->src_index, ce->name); - untracked_cache_invalidate_path(o->src_index, ce->name); + untracked_cache_invalidate_path(o->src_index, ce->name, 1); } /* diff --git a/upload-pack.c b/upload-pack.c index d5de18127c..f51b6cfca9 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -10,6 +10,8 @@ #include "diff.h" #include "revision.h" #include "list-objects.h" +#include "list-objects-filter.h" +#include "list-objects-filter-options.h" #include "run-command.h" #include "connect.h" #include "sigchain.h" @@ -19,6 +21,7 @@ #include "argv-array.h" #include "prio-queue.h" #include "protocol.h" +#include "quote.h" static const char * const upload_pack_usage[] = { N_("git upload-pack [<options>] <dir>"), @@ -65,6 +68,10 @@ static int advertise_refs; static int stateless_rpc; static const char *pack_objects_hook; +static int filter_capability_requested; +static int filter_advertise; +static struct list_objects_filter_options filter_options; + static void reset_timeout(void) { alarm(timeout); @@ -132,6 +139,17 @@ static void create_pack_file(void) argv_array_push(&pack_objects.args, "--delta-base-offset"); if (use_include_tag) argv_array_push(&pack_objects.args, "--include-tag"); + if (filter_options.filter_spec) { + if (pack_objects.use_shell) { + struct strbuf buf = STRBUF_INIT; + sq_quote_buf(&buf, filter_options.filter_spec); + argv_array_pushf(&pack_objects.args, "--filter=%s", buf.buf); + strbuf_release(&buf); + } else { + argv_array_pushf(&pack_objects.args, "--filter=%s", + filter_options.filter_spec); + } + } pack_objects.in = -1; pack_objects.out = -1; @@ -795,6 +813,12 @@ static void receive_needs(void) deepen_rev_list = 1; continue; } + if (skip_prefix(line, "filter ", &arg)) { + if (!filter_capability_requested) + die("git upload-pack: filtering capability not negotiated"); + parse_list_objects_filter(&filter_options, arg); + continue; + } if (!skip_prefix(line, "want ", &arg) || get_oid_hex(arg, &oid_buf)) die("git upload-pack: protocol error, " @@ -822,6 +846,8 @@ static void receive_needs(void) no_progress = 1; if (parse_feature_request(features, "include-tag")) use_include_tag = 1; + if (parse_feature_request(features, "filter")) + filter_capability_requested = 1; o = parse_object(&oid_buf); if (!o) { @@ -941,7 +967,7 @@ static int send_ref(const char *refname, const struct object_id *oid, struct strbuf symref_info = STRBUF_INIT; format_symref_info(&symref_info, cb_data); - packet_write_fmt(1, "%s %s%c%s%s%s%s%s agent=%s\n", + packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s agent=%s\n", oid_to_hex(oid), refname_nons, 0, capabilities, (allow_unadvertised_object_request & ALLOW_TIP_SHA1) ? @@ -950,6 +976,7 @@ static int send_ref(const char *refname, const struct object_id *oid, " allow-reachable-sha1-in-want" : "", stateless_rpc ? " no-done" : "", symref_info.buf, + filter_advertise ? " filter" : "", git_user_agent_sanitized()); strbuf_release(&symref_info); } else { @@ -1028,6 +1055,8 @@ static int upload_pack_config(const char *var, const char *value, void *unused) } else if (current_config_scope() != CONFIG_SCOPE_REPO) { if (!strcmp("uploadpack.packobjectshook", var)) return git_config_string(&pack_objects_hook, var, value); + } else if (!strcmp("uploadpack.allowfilter", var)) { + filter_advertise = git_config_bool(var, value); } return parse_hide_refs_config(var, value, "uploadpack"); } diff --git a/userdiff.c b/userdiff.c index dbfb4e13cd..a69241b25d 100644 --- a/userdiff.c +++ b/userdiff.c @@ -38,6 +38,15 @@ IPATTERN("fortran", "|//|\\*\\*|::|[/<>=]="), IPATTERN("fountain", "^((\\.[^.]|(int|ext|est|int\\.?/ext|i/e)[. ]).*)$", "[^ \t-]+"), +PATTERNS("golang", + /* Functions */ + "^[ \t]*(func[ \t]*.*(\\{[ \t]*)?)\n" + /* Structs and interfaces */ + "^[ \t]*(type[ \t].*(struct|interface)[ \t]*(\\{[ \t]*)?)", + /* -- */ + "[a-zA-Z_][a-zA-Z0-9_]*" + "|[-+0-9.eE]+i?|0[xX]?[0-9a-fA-F]+i?" + "|[-+*/<>%&^|=!:]=|--|\\+\\+|<<=?|>>=?|&\\^=?|&&|\\|\\||<-|\\.{3}"), PATTERNS("html", "^[ \t]*(<[Hh][1-6]([ \t].*)?>.*)$", "[^<>= \t]+"), PATTERNS("java", @@ -138,7 +147,7 @@ PATTERNS("csharp", /* Keywords */ "!^[ \t]*(do|while|for|if|else|instanceof|new|return|switch|case|throw|catch|using)\n" /* Methods and constructors */ - "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n" + "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe|async)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n" /* Properties */ "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[@._[:alnum:]]+)[ \t]*$\n" /* Type definitions */ @@ -22,7 +22,7 @@ void walker_say(struct walker *walker, const char *fmt, ...) static void report_missing(const struct object *obj) { fprintf(stderr, "Cannot obtain needed %s %s\n", - obj->type ? typename(obj->type): "object", + obj->type ? type_name(obj->type): "object", oid_to_hex(&obj->oid)); if (!is_null_oid(¤t_commit_oid)) fprintf(stderr, "while processing commit %s.\n", @@ -134,7 +134,7 @@ static int process_object(struct walker *walker, struct object *obj) } return error("Unable to determine requirements " "of type %s for %s", - typename(obj->type), oid_to_hex(&obj->oid)); + type_name(obj->type), oid_to_hex(&obj->oid)); } static int process(struct walker *walker, struct object *obj) diff --git a/worktree.c b/worktree.c index f5da7d286d..28989cf06e 100644 --- a/worktree.c +++ b/worktree.c @@ -254,6 +254,102 @@ const char *is_worktree_locked(struct worktree *wt) return wt->lock_reason; } +/* convenient wrapper to deal with NULL strbuf */ +static void strbuf_addf_gently(struct strbuf *buf, const char *fmt, ...) +{ + va_list params; + + if (!buf) + return; + + va_start(params, fmt); + strbuf_vaddf(buf, fmt, params); + va_end(params); +} + +int validate_worktree(const struct worktree *wt, struct strbuf *errmsg, + unsigned flags) +{ + struct strbuf wt_path = STRBUF_INIT; + char *path = NULL; + int err, ret = -1; + + strbuf_addf(&wt_path, "%s/.git", wt->path); + + if (is_main_worktree(wt)) { + if (is_directory(wt_path.buf)) { + ret = 0; + goto done; + } + /* + * Main worktree using .git file to point to the + * repository would make it impossible to know where + * the actual worktree is if this function is executed + * from another worktree. No .git file support for now. + */ + strbuf_addf_gently(errmsg, + _("'%s' at main working tree is not the repository directory"), + wt_path.buf); + goto done; + } + + /* + * Make sure "gitdir" file points to a real .git file and that + * file points back here. + */ + if (!is_absolute_path(wt->path)) { + strbuf_addf_gently(errmsg, + _("'%s' file does not contain absolute path to the working tree location"), + git_common_path("worktrees/%s/gitdir", wt->id)); + goto done; + } + + if (flags & WT_VALIDATE_WORKTREE_MISSING_OK && + !file_exists(wt->path)) { + ret = 0; + goto done; + } + + if (!file_exists(wt_path.buf)) { + strbuf_addf_gently(errmsg, _("'%s' does not exist"), wt_path.buf); + goto done; + } + + path = xstrdup_or_null(read_gitfile_gently(wt_path.buf, &err)); + if (!path) { + strbuf_addf_gently(errmsg, _("'%s' is not a .git file, error code %d"), + wt_path.buf, err); + goto done; + } + + ret = fspathcmp(path, real_path(git_common_path("worktrees/%s", wt->id))); + + if (ret) + strbuf_addf_gently(errmsg, _("'%s' does not point back to '%s'"), + wt->path, git_common_path("worktrees/%s", wt->id)); +done: + free(path); + strbuf_release(&wt_path); + return ret; +} + +void update_worktree_location(struct worktree *wt, const char *path_) +{ + struct strbuf path = STRBUF_INIT; + + if (is_main_worktree(wt)) + die("BUG: can't relocate main worktree"); + + strbuf_realpath(&path, path_, 1); + if (fspathcmp(wt->path, path.buf)) { + write_file(git_common_path("worktrees/%s/gitdir", wt->id), + "%s/.git", path.buf); + free(wt->path); + wt->path = strbuf_detach(&path, NULL); + } + strbuf_release(&path); +} + int is_worktree_being_rebased(const struct worktree *wt, const char *target) { diff --git a/worktree.h b/worktree.h index c28a880e18..fe38ce10c3 100644 --- a/worktree.h +++ b/worktree.h @@ -3,6 +3,8 @@ #include "refs.h" +struct strbuf; + struct worktree { char *path; char *id; @@ -59,6 +61,22 @@ extern int is_main_worktree(const struct worktree *wt); */ extern const char *is_worktree_locked(struct worktree *wt); +#define WT_VALIDATE_WORKTREE_MISSING_OK (1 << 0) + +/* + * Return zero if the worktree is in good condition. Error message is + * returned if "errmsg" is not NULL. + */ +extern int validate_worktree(const struct worktree *wt, + struct strbuf *errmsg, + unsigned flags); + +/* + * Update worktrees/xxx/gitdir with the new path. + */ +extern void update_worktree_location(struct worktree *wt, + const char *path_); + /* * Free up the memory for worktree(s) */ diff --git a/wrap-for-bin.sh b/wrap-for-bin.sh index 22b6e4948f..5842408817 100644 --- a/wrap-for-bin.sh +++ b/wrap-for-bin.sh @@ -14,7 +14,7 @@ else GIT_TEMPLATE_DIR='@@BUILD_DIR@@/templates/blt' export GIT_TEMPLATE_DIR fi -GITPERLLIB='@@BUILD_DIR@@/perl/blib/lib'"${GITPERLLIB:+:$GITPERLLIB}" +GITPERLLIB='@@BUILD_DIR@@/perl/build/lib'"${GITPERLLIB:+:$GITPERLLIB}" GIT_TEXTDOMAINDIR='@@BUILD_DIR@@/po/build/locale' PATH='@@BUILD_DIR@@/bin-wrappers:'"$PATH" @@ -445,21 +445,21 @@ FILE *fopen_or_warn(const char *path, const char *mode) return NULL; } -int xmkstemp(char *template) +int xmkstemp(char *filename_template) { int fd; char origtemplate[PATH_MAX]; - strlcpy(origtemplate, template, sizeof(origtemplate)); + strlcpy(origtemplate, filename_template, sizeof(origtemplate)); - fd = mkstemp(template); + fd = mkstemp(filename_template); if (fd < 0) { int saved_errno = errno; const char *nonrelative_template; - if (strlen(template) != strlen(origtemplate)) - template = origtemplate; + if (strlen(filename_template) != strlen(origtemplate)) + filename_template = origtemplate; - nonrelative_template = absolute_path(template); + nonrelative_template = absolute_path(filename_template); errno = saved_errno; die_errno("Unable to create temporary file '%s'", nonrelative_template); @@ -481,7 +481,7 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode) static const int num_letters = 62; uint64_t value; struct timeval tv; - char *template; + char *filename_template; size_t len; int fd, count; @@ -503,16 +503,16 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode) */ gettimeofday(&tv, NULL); value = ((size_t)(tv.tv_usec << 16)) ^ tv.tv_sec ^ getpid(); - template = &pattern[len - 6 - suffix_len]; + filename_template = &pattern[len - 6 - suffix_len]; for (count = 0; count < TMP_MAX; ++count) { uint64_t v = value; /* Fill in the random bits. */ - template[0] = letters[v % num_letters]; v /= num_letters; - template[1] = letters[v % num_letters]; v /= num_letters; - template[2] = letters[v % num_letters]; v /= num_letters; - template[3] = letters[v % num_letters]; v /= num_letters; - template[4] = letters[v % num_letters]; v /= num_letters; - template[5] = letters[v % num_letters]; v /= num_letters; + filename_template[0] = letters[v % num_letters]; v /= num_letters; + filename_template[1] = letters[v % num_letters]; v /= num_letters; + filename_template[2] = letters[v % num_letters]; v /= num_letters; + filename_template[3] = letters[v % num_letters]; v /= num_letters; + filename_template[4] = letters[v % num_letters]; v /= num_letters; + filename_template[5] = letters[v % num_letters]; v /= num_letters; fd = open(pattern, O_CREAT | O_EXCL | O_RDWR, mode); if (fd >= 0) @@ -541,21 +541,21 @@ int git_mkstemp_mode(char *pattern, int mode) return git_mkstemps_mode(pattern, 0, mode); } -int xmkstemp_mode(char *template, int mode) +int xmkstemp_mode(char *filename_template, int mode) { int fd; char origtemplate[PATH_MAX]; - strlcpy(origtemplate, template, sizeof(origtemplate)); + strlcpy(origtemplate, filename_template, sizeof(origtemplate)); - fd = git_mkstemp_mode(template, mode); + fd = git_mkstemp_mode(filename_template, mode); if (fd < 0) { int saved_errno = errno; const char *nonrelative_template; - if (!template[0]) - template = origtemplate; + if (!filename_template[0]) + filename_template = origtemplate; - nonrelative_template = absolute_path(template); + nonrelative_template = absolute_path(filename_template); errno = saved_errno; die_errno("Unable to create temporary file '%s'", nonrelative_template); diff --git a/wt-status.c b/wt-status.c index f5debcd2b4..66f4234af1 100644 --- a/wt-status.c +++ b/wt-status.c @@ -136,6 +136,7 @@ void wt_status_prepare(struct wt_status *s) s->ignored.strdup_strings = 1; s->show_branch = -1; /* unspecified */ s->show_stash = 0; + s->ahead_behind_flags = AHEAD_BEHIND_UNSPECIFIED; s->display_comment_prefix = 0; } @@ -1032,7 +1033,7 @@ static void wt_longstatus_print_tracking(struct wt_status *s) if (!skip_prefix(s->branch, "refs/heads/", &branch_name)) return; branch = branch_get(branch_name); - if (!format_tracking_info(branch, &sb)) + if (!format_tracking_info(branch, &sb, s->ahead_behind_flags)) return; i = 0; @@ -1793,7 +1794,7 @@ static void wt_shortstatus_print_tracking(struct wt_status *s) const char *base; char *short_base; const char *branch_name; - int num_ours, num_theirs; + int num_ours, num_theirs, sti; int upstream_is_gone = 0; color_fprintf(s->fp, color(WT_STATUS_HEADER, s), "## "); @@ -1819,7 +1820,9 @@ static void wt_shortstatus_print_tracking(struct wt_status *s) color_fprintf(s->fp, branch_color_local, "%s", branch_name); - if (stat_tracking_info(branch, &num_ours, &num_theirs, &base) < 0) { + sti = stat_tracking_info(branch, &num_ours, &num_theirs, &base, + s->ahead_behind_flags); + if (sti < 0) { if (!base) goto conclude; @@ -1831,12 +1834,14 @@ static void wt_shortstatus_print_tracking(struct wt_status *s) color_fprintf(s->fp, branch_color_remote, "%s", short_base); free(short_base); - if (!upstream_is_gone && !num_ours && !num_theirs) + if (!upstream_is_gone && !sti) goto conclude; color_fprintf(s->fp, header_color, " ["); if (upstream_is_gone) { color_fprintf(s->fp, header_color, LABEL(N_("gone"))); + } else if (s->ahead_behind_flags == AHEAD_BEHIND_QUICK) { + color_fprintf(s->fp, header_color, LABEL(N_("different"))); } else if (!num_ours) { color_fprintf(s->fp, header_color, LABEL(N_("behind "))); color_fprintf(s->fp, branch_color_remote, "%d", num_theirs); @@ -1905,18 +1910,19 @@ static void wt_porcelain_print(struct wt_status *s) * * <upstream> ::= the upstream branch name, when set. * - * <ahead> ::= integer ahead value, when upstream set - * and the commit is present (not gone). - * - * <behind> ::= integer behind value, when upstream set - * and commit is present. + * <ahead> ::= integer ahead value or '?'. * + * <behind> ::= integer behind value or '?'. * * The end-of-line is defined by the -z flag. * * <eol> ::= NUL when -z, * LF when NOT -z. * + * When an upstream is set and present, the 'branch.ab' line will + * be printed with the ahead/behind counts for the branch and the + * upstream. When AHEAD_BEHIND_QUICK is requested and the branches + * are different, '?' will be substituted for the actual count. */ static void wt_porcelain_v2_print_tracking(struct wt_status *s) { @@ -1956,14 +1962,25 @@ static void wt_porcelain_v2_print_tracking(struct wt_status *s) /* Lookup stats on the upstream tracking branch, if set. */ branch = branch_get(branch_name); base = NULL; - ab_info = (stat_tracking_info(branch, &nr_ahead, &nr_behind, &base) == 0); + ab_info = stat_tracking_info(branch, &nr_ahead, &nr_behind, + &base, s->ahead_behind_flags); if (base) { base = shorten_unambiguous_ref(base, 0); fprintf(s->fp, "# branch.upstream %s%c", base, eol); free((char *)base); - if (ab_info) - fprintf(s->fp, "# branch.ab +%d -%d%c", nr_ahead, nr_behind, eol); + if (ab_info > 0) { + /* different */ + if (nr_ahead || nr_behind) + fprintf(s->fp, "# branch.ab +%d -%d%c", + nr_ahead, nr_behind, eol); + else + fprintf(s->fp, "# branch.ab +? -?%c", + eol); + } else if (!ab_info) { + /* same */ + fprintf(s->fp, "# branch.ab +0 -0%c", eol); + } } } diff --git a/wt-status.h b/wt-status.h index 3f84d5c29f..ea2456daf2 100644 --- a/wt-status.h +++ b/wt-status.h @@ -5,6 +5,7 @@ #include "string-list.h" #include "color.h" #include "pathspec.h" +#include "remote.h" struct worktree; @@ -87,6 +88,7 @@ struct wt_status { int show_branch; int show_stash; int hints; + enum ahead_behind_flags ahead_behind_flags; enum wt_status_format status_format; unsigned char sha1_commit[GIT_MAX_RAWSZ]; /* when not Initial */ |
