summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format2
-rw-r--r--.github/workflows/main.yml4
-rw-r--r--.gitignore2
-rw-r--r--.gitlab-ci.yml33
-rw-r--r--.mailmap1
-rw-r--r--Cargo.toml9
-rw-r--r--Documentation/BreakingChanges.adoc47
-rw-r--r--Documentation/Makefile8
-rw-r--r--Documentation/RelNotes/2.51.1.adoc99
-rw-r--r--Documentation/RelNotes/2.52.0.adoc81
-rw-r--r--Documentation/config.adoc4
-rw-r--r--Documentation/config/alias.adoc3
-rw-r--r--Documentation/config/extensions.adoc26
-rw-r--r--Documentation/config/stash.adoc29
-rw-r--r--Documentation/config/tag.adoc22
-rw-r--r--Documentation/config/worktree.adoc14
-rw-r--r--Documentation/fsck-msgids.adoc6
-rw-r--r--Documentation/git-fast-import.adoc5
-rw-r--r--Documentation/git-merge-tree.adoc14
-rw-r--r--Documentation/git-pack-refs.adoc53
-rw-r--r--Documentation/git-patch-id.adoc43
-rw-r--r--Documentation/git-push.adoc245
-rw-r--r--Documentation/git-refs.adoc10
-rw-r--r--Documentation/git-stash.adoc134
-rw-r--r--Documentation/git-tag.adoc173
-rw-r--r--Documentation/git-whatchanged.adoc8
-rw-r--r--Documentation/git-worktree.adoc161
-rw-r--r--Documentation/git.adoc3
-rw-r--r--Documentation/gitcli.adoc14
-rw-r--r--Documentation/gitcredentials.adoc15
-rw-r--r--Documentation/howto/meson.build4
-rw-r--r--Documentation/meson.build8
-rw-r--r--Documentation/pack-refs-options.adoc52
-rw-r--r--Documentation/pretty-formats.adoc6
-rw-r--r--Documentation/pretty-options.adoc11
-rw-r--r--Documentation/technical/meson.build4
-rw-r--r--Documentation/urls-remotes.adoc42
-rw-r--r--Makefile219
-rw-r--r--add-interactive.c7
-rw-r--r--add-patch.c15
-rw-r--r--blame.c3
-rw-r--r--bloom.c8
-rw-r--r--builtin/add.c7
-rw-r--r--builtin/backfill.c2
-rw-r--r--builtin/cat-file.c3
-rw-r--r--builtin/config.c20
-rw-r--r--builtin/count-objects.c3
-rw-r--r--builtin/fast-export.c19
-rw-r--r--builtin/fast-import.c73
-rw-r--r--builtin/fsck.c11
-rw-r--r--builtin/gc.c14
-rw-r--r--builtin/grep.c2
-rw-r--r--builtin/index-pack.c10
-rw-r--r--builtin/log.c15
-rw-r--r--builtin/pack-objects.c38
-rw-r--r--builtin/pack-redundant.c8
-rw-r--r--builtin/pack-refs.c54
-rw-r--r--builtin/range-diff.c16
-rw-r--r--builtin/receive-pack.c2
-rw-r--r--builtin/reflog.c2
-rw-r--r--builtin/refs.c17
-rw-r--r--builtin/repack.c11
-rw-r--r--builtin/unpack-objects.c5
-rw-r--r--builtin/update-index.c29
-rw-r--r--bulk-checkin.c403
-rw-r--r--bulk-checkin.h61
-rw-r--r--cache-tree.c5
-rwxr-xr-xci/install-dependencies.sh17
-rwxr-xr-xci/run-build-and-tests.sh31
-rwxr-xr-xci/test-documentation.sh4
-rw-r--r--commit-graph.c116
-rw-r--r--commit-graph.h12
-rw-r--r--config.c16
-rw-r--r--connected.c5
-rw-r--r--contrib/contacts/meson.build4
-rw-r--r--contrib/subtree/meson.build4
-rw-r--r--dir.c18
-rw-r--r--fetch-pack.c4
-rw-r--r--fsck.h39
-rw-r--r--git-compat-util.h2
-rw-r--r--git.c91
-rwxr-xr-xgitk-git/gitk283
-rw-r--r--gpg-interface.c17
-rw-r--r--gpg-interface.h15
-rw-r--r--help.c6
-rw-r--r--http-backend.c5
-rw-r--r--http-push.c3
-rw-r--r--http.c5
-rw-r--r--http.h12
-rw-r--r--imap-send.c2
-rw-r--r--log-tree.c3
-rw-r--r--mailmap.c10
-rw-r--r--meson.build28
-rw-r--r--meson_options.txt2
-rw-r--r--midx.c29
-rw-r--r--midx.h1
-rw-r--r--object-file.c404
-rw-r--r--object-file.h16
-rw-r--r--object-name.c6
-rw-r--r--odb.c50
-rw-r--r--odb.h82
-rw-r--r--pack-bitmap.c4
-rw-r--r--pack-objects.c4
-rw-r--r--pack-refs.c56
-rw-r--r--pack-refs.h23
-rw-r--r--packfile.c283
-rw-r--r--packfile.h125
-rw-r--r--parse-options.c31
-rw-r--r--range-diff.c10
-rw-r--r--range-diff.h2
-rw-r--r--read-cache.c11
-rw-r--r--refs.c22
-rw-r--r--refs.h6
-rw-r--r--refs/debug.c1
-rw-r--r--refs/files-backend.c13
-rw-r--r--refs/ref-cache.c2
-rw-r--r--refs/refs-internal.h3
-rw-r--r--refs/reftable-backend.c65
-rw-r--r--reftable/basics.c37
-rw-r--r--reftable/basics.h7
-rw-r--r--reftable/fsck.c100
-rw-r--r--reftable/reftable-fsck.h40
-rw-r--r--reftable/stack.c7
-rw-r--r--remote-curl.c14
-rw-r--r--revision.c3
-rw-r--r--revision.h2
-rw-r--r--server-info.c3
-rw-r--r--shared.mak1
-rwxr-xr-xsrc/cargo-meson.sh32
-rw-r--r--src/lib.rs1
-rw-r--r--src/meson.build41
-rw-r--r--src/varint.rs92
-rw-r--r--string-list.c29
-rw-r--r--string-list.h12
-rw-r--r--t/helper/test-find-pack.c2
-rw-r--r--t/helper/test-pack-deltas.c10
-rw-r--r--t/helper/test-pack-mtimes.c2
-rw-r--r--t/meson.build3
-rw-r--r--t/pack-refs-tests.sh431
-rwxr-xr-xt/t0014-alias.sh57
-rwxr-xr-xt/t0300-credentials.sh19
-rwxr-xr-xt/t0601-reffiles-pack-refs.sh430
-rwxr-xr-xt/t0614-reftable-fsck.sh58
-rwxr-xr-xt/t1300-config.sh349
-rwxr-xr-xt/t1421-reflog-write.sh36
-rwxr-xr-xt/t1463-refs-optimize.sh17
-rwxr-xr-xt/t3206-range-diff.sh16
-rwxr-xr-xt/t3701-add-interactive.sh31
-rwxr-xr-xt/t6302-for-each-ref-filter.sh65
-rwxr-xr-xt/t7500-commit-template-squash-signoff.sh38
-rwxr-xr-xt/t8020-last-modified.sh34
-rwxr-xr-xt/t9305-fast-import-signatures.sh106
-rw-r--r--t/unit-tests/u-reftable-basics.c24
-rw-r--r--transport-helper.c2
-rw-r--r--usage.c33
-rw-r--r--varint.c6
-rw-r--r--varint.h4
-rw-r--r--wrapper.c13
-rw-r--r--wrapper.h4
-rw-r--r--xdiff/xdiffi.c102
-rw-r--r--xdiff/xdiffi.h11
-rw-r--r--xdiff/xemit.c38
-rw-r--r--xdiff/xhistogram.c10
-rw-r--r--xdiff/xmerge.c56
-rw-r--r--xdiff/xpatience.c18
-rw-r--r--xdiff/xprepare.c346
-rw-r--r--xdiff/xtypes.h9
-rw-r--r--xdiff/xutils.c16
168 files changed, 4417 insertions, 2880 deletions
diff --git a/.clang-format b/.clang-format
index dcfd0aad60..86b4fe33e5 100644
--- a/.clang-format
+++ b/.clang-format
@@ -149,7 +149,7 @@ SpaceBeforeCaseColon: false
# f();
# }
# }
-SpaceBeforeParens: ControlStatements
+SpaceBeforeParens: ControlStatementsExceptControlMacros
# Don't insert spaces inside empty '()'
SpaceInEmptyParentheses: false
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index d122e79415..393ea4d1cc 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -379,6 +379,8 @@ jobs:
- jobname: linux-breaking-changes
cc: gcc
image: ubuntu:rolling
+ - jobname: fedora-breaking-changes-meson
+ image: fedora:latest
- jobname: linux-leaks
image: ubuntu:rolling
cc: gcc
@@ -396,8 +398,6 @@ jobs:
# Supported until 2025-04-02.
- jobname: linux32
image: i386/ubuntu:focal
- - jobname: pedantic
- image: fedora:latest
# A RHEL 8 compatible distro. Supported until 2029-05-31.
- jobname: almalinux-8
image: almalinux:8
diff --git a/.gitignore b/.gitignore
index 802ce70e48..78a45cb5be 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,6 @@
/fuzz_corpora
+/target/
+/Cargo.lock
/GIT-BUILD-DIR
/GIT-BUILD-OPTIONS
/GIT-CFLAGS
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index cf122e706f..f7d57d1ee9 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -45,6 +45,8 @@ test:linux:
- jobname: linux-breaking-changes
image: ubuntu:20.04
CC: gcc
+ - jobname: fedora-breaking-changes-meson
+ image: fedora:latest
- jobname: linux-TEST-vars
image: ubuntu:20.04
CC: gcc
@@ -58,8 +60,6 @@ test:linux:
- jobname: linux-asan-ubsan
image: ubuntu:rolling
CC: clang
- - jobname: pedantic
- image: fedora:latest
- jobname: linux-musl-meson
image: alpine:latest
- jobname: linux32
@@ -70,6 +70,8 @@ test:linux:
artifacts:
paths:
- t/failed-test-artifacts
+ reports:
+ junit: build/meson-logs/testlog.junit.xml
when: on_failure
test:osx:
@@ -110,8 +112,16 @@ test:osx:
artifacts:
paths:
- t/failed-test-artifacts
+ reports:
+ junit: build/meson-logs/testlog.junit.xml
when: on_failure
+.windows_before_script: &windows_before_script
+ # Disabling realtime monitoring fails on some of the runners, but it
+ # significantly speeds up test execution in the case where it works. We thus
+ # try our luck, but ignore any failures.
+ - Set-MpPreference -DisableRealtimeMonitoring $true; $true
+
build:mingw64:
stage: build
tags:
@@ -119,7 +129,7 @@ build:mingw64:
variables:
NO_PERL: 1
before_script:
- - Set-MpPreference -DisableRealtimeMonitoring $true
+ - *windows_before_script
- ./ci/install-sdk.ps1 -directory "git-sdk"
script:
- git-sdk/usr/bin/bash.exe -l -c 'ci/make-test-artifacts.sh artifacts'
@@ -136,7 +146,7 @@ test:mingw64:
- job: "build:mingw64"
artifacts: true
before_script:
- - Set-MpPreference -DisableRealtimeMonitoring $true
+ - *windows_before_script
- git-sdk/usr/bin/bash.exe -l -c 'tar xf artifacts/artifacts.tar.gz'
- New-Item -Path .git/info -ItemType Directory
- New-Item .git/info/exclude -ItemType File -Value "/git-sdk"
@@ -150,18 +160,10 @@ test:mingw64:
tags:
- saas-windows-medium-amd64
before_script:
- - Set-MpPreference -DisableRealtimeMonitoring $true
- - choco install -y git meson ninja openssl
+ - *windows_before_script
+ - choco install -y git meson ninja
- Import-Module $env:ChocolateyInstall\helpers\chocolateyProfile.psm1
- refreshenv
- # The certificate store for Python on Windows is broken and fails to fetch
- # certificates, see https://bugs.python.org/issue36011. This seems to
- # mostly be an issue with how the GitLab image is set up as it is a
- # non-issue on GitHub Actions. Work around the issue by importing
- # cetrificates manually.
- - Invoke-WebRequest https://curl.haxx.se/ca/cacert.pem -OutFile cacert.pem
- - openssl pkcs12 -export -nokeys -in cacert.pem -out certs.pfx -passout "pass:"
- - Import-PfxCertificate -CertStoreLocation Cert:\LocalMachine\Root -FilePath certs.pfx
build:msvc-meson:
extends: .msvc-meson
@@ -183,6 +185,9 @@ test:msvc-meson:
script:
- meson test -C build --no-rebuild --print-errorlogs --slice $Env:CI_NODE_INDEX/$Env:CI_NODE_TOTAL
parallel: 10
+ artifacts:
+ reports:
+ junit: build/meson-logs/testlog.junit.xml
test:fuzz-smoke-tests:
image: ubuntu:latest
diff --git a/.mailmap b/.mailmap
index afa21abbaa..7b3198171f 100644
--- a/.mailmap
+++ b/.mailmap
@@ -126,6 +126,7 @@ Jon Loeliger <jdl@jdl.com> <jdl@freescale.org>
Jon Seymour <jon.seymour@gmail.com> <jon@blackcubes.dyndns.org>
Jonathan Nieder <jrnieder@gmail.com> <jrnieder@uchicago.edu>
Jonathan del Strother <jon.delStrother@bestbefore.tv> <maillist@steelskies.com>
+Jonathan Tan <jonathantanmy@fastmail.com> <jonathantanmy@google.com>
Josh Triplett <josh@joshtriplett.org> <josh@freedesktop.org>
Josh Triplett <josh@joshtriplett.org> <josht@us.ibm.com>
Julian Phillips <julian@quantumfyre.co.uk> <jp3@quantumfyre.co.uk>
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000000..45c9b34981
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,9 @@
+[package]
+name = "gitcore"
+version = "0.1.0"
+edition = "2018"
+
+[lib]
+crate-type = ["staticlib"]
+
+[dependencies]
diff --git a/Documentation/BreakingChanges.adoc b/Documentation/BreakingChanges.adoc
index 0cba20fadb..90b53abcea 100644
--- a/Documentation/BreakingChanges.adoc
+++ b/Documentation/BreakingChanges.adoc
@@ -171,6 +171,51 @@ JGit, libgit2 and Gitoxide need to support it.
matches the default branch name used in new repositories by many of the
big Git forges.
+* Git will require Rust as a mandatory part of the build process. While Git
+ already started to adopt Rust in Git 2.49, all parts written in Rust are
+ optional for the time being. This includes:
++
+ ** The Rust wrapper around libgit.a that is part of "contrib/" and which has
+ been introduced in Git 2.49.
+ ** Subsystems that have an alternative implementation in Rust to test
+ interoperability between our C and Rust codebase.
+ ** Newly written features that are not mission critical for a fully functional
+ Git client.
++
+These changes are meant as test balloons to allow distributors of Git to prepare
+for Rust becoming a mandatory part of the build process. There will be multiple
+milestones for the introduction of Rust:
++
+--
+1. Initially, with Git 2.52, support for Rust will be auto-detected by Meson and
+ disabled in our Makefile so that the project can sort out the initial
+ infrastructure.
+2. In Git 2.53, both build systems will default-enable support for Rust.
+ Consequently, builds will break by default if Rust is not available on the
+ build host. The use of Rust can still be explicitly disabled via build
+ flags.
+3. In Git 3.0, the build options will be removed and support for Rust is
+ mandatory.
+--
++
+You can explicitly ask both Meson and our Makefile-based system to enable Rust
+by saying `meson configure -Drust=enabled` and `make WITH_RUST=YesPlease`,
+respectively.
++
+The Git project will declare the last version before Git 3.0 to be a long-term
+support release. This long-term release will receive important bug fixes for at
+least four release cycles and security fixes for six release cycles. The Git
+project will hand over maintainership of the long-term release to distributors
+in case they need to extend the life of that long-term release even further.
+Details of how this long-term release will be handed over to the community will
+be discussed once the Git project decides to stop officially supporting it.
++
+We will evaluate the impact on downstream distributions before making Rust
+mandatory in Git 3.0. If we see that the impact on downstream distributions
+would be significant, we may decide to defer this change to a subsequent minor
+release. This evaluation will also take into account our own experience with
+how painful it is to keep Rust an optional component.
+
=== Removals
* Support for grafting commits has long been superseded by git-replace(1).
@@ -241,7 +286,7 @@ These features will be removed.
equivalent `git log --raw`. We have nominated the command for
removal, have changed the command to refuse to work unless the
`--i-still-use-this` option is given, and asked the users to report
- when they do so. So far there hasn't been a single complaint.
+ when they do so.
+
The command will be removed.
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 6fb83d0c6e..a3fbd29744 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -119,18 +119,26 @@ TECH_DOCS += ToolsForGit
TECH_DOCS += technical/bitmap-format
TECH_DOCS += technical/build-systems
TECH_DOCS += technical/bundle-uri
+TECH_DOCS += technical/commit-graph
+TECH_DOCS += technical/directory-rename-detection
TECH_DOCS += technical/hash-function-transition
TECH_DOCS += technical/long-running-process-protocol
TECH_DOCS += technical/multi-pack-index
+TECH_DOCS += technical/packfile-uri
TECH_DOCS += technical/pack-heuristics
TECH_DOCS += technical/parallel-checkout
TECH_DOCS += technical/partial-clone
TECH_DOCS += technical/platform-support
TECH_DOCS += technical/racy-git
TECH_DOCS += technical/reftable
+TECH_DOCS += technical/remembering-renames
+TECH_DOCS += technical/repository-version
+TECH_DOCS += technical/rerere
TECH_DOCS += technical/scalar
TECH_DOCS += technical/send-pack-pipeline
TECH_DOCS += technical/shallow
+TECH_DOCS += technical/sparse-checkout
+TECH_DOCS += technical/sparse-index
TECH_DOCS += technical/trivial-merge
TECH_DOCS += technical/unit-tests
SP_ARTICLES += $(TECH_DOCS)
diff --git a/Documentation/RelNotes/2.51.1.adoc b/Documentation/RelNotes/2.51.1.adoc
new file mode 100644
index 0000000000..b8bd49c876
--- /dev/null
+++ b/Documentation/RelNotes/2.51.1.adoc
@@ -0,0 +1,99 @@
+Git 2.51.1 Release Notes
+========================
+
+There shouldn't be anything exciting to see here. This is primarily
+to flush the "do you still use it?" improvements that has landed on
+the master front, together with a handful of low-hanging, low-impact
+fixes that should be safe.
+
+
+Fixes since Git 2.51.0
+----------------------
+
+ * The "do you still use it?" message given by a command that is
+ deeply deprecated and allow us to suggest alternatives has been
+ updated.
+
+ * The compatObjectFormat extension is used to hide an incomplete
+ feature that is not yet usable for any purpose other than
+ developing the feature further. Document it as such to discourage
+ its use by mere mortals.
+
+ * Manual page for "gitk" is updated with the current maintainer's
+ name.
+
+ * Update the instructions for using GGG in the MyFirstContribution
+ document to say that a GitHub PR could be made against `git/git`
+ instead of `gitgitgadget/git`.
+
+ * Clang-format update to let our control macros be formatted the way we
+ had them traditionally, e.g., "for_each_string_list_item()" without
+ space before the parentheses.
+
+ * A few places where a size_t value was cast to curl_off_t without
+ checking has been updated to use the existing helper function.
+
+ * The start_delayed_progress() function in the progress eye-candy API
+ did not clear its internal state, making an initial delay value
+ larger than 1 second ineffective, which has been corrected.
+
+ * Makefile tried to run multiple "cargo build" which would not work
+ very well; serialize their execution to work around this problem.
+
+ * Adjust to the way newer versions of cURL selectively enable tracing
+ options, so that our tests can continue to work.
+
+ * During interactive rebase, using 'drop' on a merge commit led to
+ an error, which has been corrected.
+
+ * "git refs migrate" to migrate the reflog entries from a refs
+ backend to another had a handful of bugs squashed.
+
+ * "git push" had a code path that led to BUG() but it should have
+ been a die(), as it is a response to a usual but invalid end-user
+ action to attempt pushing an object that does not exist.
+
+ * Various bugs about rename handling in "ort" merge strategy have
+ been fixed.
+
+ * "git diff --no-index" run inside a subdirectory under control of a
+ Git repository operated at the top of the working tree and stripped
+ the prefix from the output, and oddballs like "-" (stdin) did not
+ work correctly because of it. Correct the set-up by undoing what
+ the set-up sequence did to cwd and prefix.
+
+ * Various options to "git diff" that make comparison ignore certain
+ aspects of the differences (like "space changes are ignored",
+ "differences in lines that match these regular expressions are
+ ignored") did not work well with "--name-only" and friends.
+
+ * Under a race against another process that is repacking the
+ repository, especially a partially cloned one, "git fetch" may
+ mistakenly think some objects we do have are missing, which has
+ been corrected.
+
+ * "git repack --path-walk" lost objects in some corner cases, which
+ has been corrected.
+ cf. <CABPp-BHFxxGrqKc0m==TjQNjDGdO=H5Rf6EFsf2nfE1=TuraOQ@mail.gmail.com>
+
+ * Fixes multiple crashes around midx write-out codepaths.
+
+ * A broken or malicious "git fetch" can say that it has the same
+ object for many many times, and the upload-pack serving it can
+ exhaust memory storing them redundantly, which has been corrected.
+
+ * A corner case bug in "git log -L..." has been corrected.
+
+ * Some among "git add -p" and friends ignored color.diff and/or
+ color.ui configuration variables, which is an old regression, which
+ has been corrected.
+
+ * "git rebase -i" failed to clean-up the commit log message when the
+ command commits the final one in a chain of "fixup" commands, which
+ has been corrected.
+
+ * Deal more gracefully with directory / file conflicts when the files
+ backend is used for ref storage, by failing only the ones that are
+ involved in the conflict while allowing others.
+
+Also contains various documentation updates, code cleanups and minor fixups.
diff --git a/Documentation/RelNotes/2.52.0.adoc b/Documentation/RelNotes/2.52.0.adoc
index 1e5281188f..8c4ed4eef4 100644
--- a/Documentation/RelNotes/2.52.0.adoc
+++ b/Documentation/RelNotes/2.52.0.adoc
@@ -44,6 +44,17 @@ UI, Workflows & Features
* The stash.index configuration variable can be set to make "git stash
pop/apply" pretend that it was invoked with "--index".
+ * "git fast-import" learned that "--signed-commits=<how>" option that
+ corresponds to that of "git fast-export".
+
+ * Marking a hunk 'selected' in "git add -p" and then splitting made
+ all the split pieces 'selected'; this has been changed to make them
+ all 'undecided', which gives better end-user experience.
+
+ * Configuration variables that take a pathname as a value
+ (e.g. blame.ignorerevsfile) can be marked as optional by prefixing
+ ":(optoinal)" before its value.
+
Performance, Internal Implementation, Development Support etc.
--------------------------------------------------------------
@@ -83,9 +94,13 @@ Performance, Internal Implementation, Development Support etc.
singleton variable, which has been updated to pass an instance
throughout the callchain.
+ * The work to build on the bulk-checkin infrastructure to create many
+ objects at once in a transaction and to abstract it into the
+ generic object layer continues.
+
* CodingGuidelines now spells out how bitfields are to be written.
- * Adjust to the way newer versions of cURL selectivel enables tracing
+ * Adjust to the way newer versions of cURL selectively enable tracing
options, so that our tests can continue to work.
(merge 1b5a6bfff3 jk/curl-global-trace-components later to maint).
@@ -102,6 +117,16 @@ Performance, Internal Implementation, Development Support etc.
while the code has been cleaned up to prevent similar bugs in the
future.
+ * The build procedure based on meson learned a target to only build
+ documentation, similar to "make doc".
+ (merge ff4ec8ded0 ps/meson-build-docs later to maint).
+
+ * Dip our toes a bit to (optionally) use Rust implemented helper
+ called from our C code.
+
+ * Documentation for "git log --pretty" options has been updated
+ to make it easier to translate.
+
Fixes since v2.51
-----------------
@@ -187,13 +212,13 @@ including security updates, are included in this release.
name.
(merge bcb20dda83 js/doc-gitk-history later to maint).
- * Update the instruction to use of GGG in the MyFirstContribution
+ * Update the instructions for using GGG in the MyFirstContribution
document to say that a GitHub PR could be made against `git/git`
instead of `gitgitgadget/git`.
(merge 37001cdbc4 ds/doc-ggg-pr-fork-clarify later to maint).
* Makefile tried to run multiple "cargo build" which would not work
- very well; serialize their execution to work it around.
+ very well; serialize their execution to work around this problem.
(merge 0eeacde50e da/cargo-serialize later to maint).
* "git repack --path-walk" lost objects in some corner cases, which
@@ -259,6 +284,51 @@ including security updates, are included in this release.
* "git last-modified" operating in non-recursive mode used to trigger
a BUG(), which has been corrected.
+ * The use of "git config get" command to learn how ANSI color
+ sequence is for a particular type, e.g., "git config get
+ --type=color --default=reset no.such.thing", isn't very ergonomic.
+ (merge e4dabf4fd6 ps/config-get-color-fixes later to maint).
+
+ * The "do you still use it?" message given by a command that is
+ deeply deprecated and allow us to suggest alternatives has been
+ updated.
+ (merge 54a60e5b38 kh/you-still-use-whatchanged-fix later to maint).
+
+ * Clang-format update to let our control macros be formatted the way we
+ had them traditionally, e.g., "for_each_string_list_item()" without
+ space before the parentheses.
+ (merge 3721541d35 jt/clang-format-foreach-wo-space-before-parenthesis later to maint).
+
+ * A few places where a size_t value was cast to curl_off_t without
+ checking has been updated to use the existing helper function.
+ (merge ecc5749578 js/curl-off-t-fixes later to maint).
+
+ * "git reflog write" did not honor the configured user.name/email
+ which has been corrected.
+
+ * Handling of an empty subdirectory of .git/refs/ in the ref-files
+ backend has been corrected.
+
+ * Our CI script requires "sudo" that can be told to preserve
+ environment, but Ubuntu replaced with "sudo" with an implementation
+ that lacks the feature. Work this around by reinstalling the
+ original version.
+ (merge fddb484255 ps/ci-avoid-broken-sudo-on-ubuntu later to maint).
+
+ * The reftable backend learned to sanity check its on-disk data more
+ carefully.
+ (merge 466a3a1afd kn/reftable-consistency-checks later to maint).
+
+ * A lot of code clean-up of xdiff.
+ Split out of a larger topic.
+ (merge 8b9c5d2e3a en/xdiff-cleanup later to maint).
+
+ * "git format-patch --range-diff=... --notes=..." did not drive the
+ underlying range-diff with correct --notes parameter, ending up
+ comparing with different set of notes from its main patch output
+ you would get from "git format-patch --notes=..." for a singleton
+ patch.
+
* Other code cleanup, docfix, build fix, etc.
(merge 823d537fa7 kh/doc-git-log-markup-fix later to maint).
(merge cf7efa4f33 rj/t6137-cygwin-fix later to maint).
@@ -284,3 +354,8 @@ including security updates, are included in this release.
(merge ac7096723b jc/doc-includeif-hasconfig-remote-url-fix later to maint).
(merge fafc9b08b8 ag/doc-sendmail-gmail-example-update later to maint).
(merge a66fc22bf9 rs/get-oid-with-flags-cleanup later to maint).
+ (merge e1d062e8ba ps/odb-clean-stale-wrappers later to maint).
+ (merge fdd21ba116 mh/doc-credential-url-prefix later to maint).
+ (merge 1c573a3451 en/doc-merge-tree-describe-merge-base later to maint).
+ (merge 84a6bf7965 ja/doc-markup-attached-paragraph-fix later to maint).
+ (merge 399694384b kh/doc-patch-id-markup-fix later to maint).
diff --git a/Documentation/config.adoc b/Documentation/config.adoc
index 05f1ca7293..62eebe7c54 100644
--- a/Documentation/config.adoc
+++ b/Documentation/config.adoc
@@ -357,7 +357,9 @@ compiled without runtime prefix support, the compiled-in prefix will be
substituted instead. In the unlikely event that a literal path needs to
be specified that should _not_ be expanded, it needs to be prefixed by
`./`, like so: `./%(prefix)/bin`.
-
++
+If prefixed with `:(optional)`, the configuration variable is treated
+as if it does not exist, if the named path does not exist.
Variables
~~~~~~~~~
diff --git a/Documentation/config/alias.adoc b/Documentation/config/alias.adoc
index 95825354bf..80ce17d2de 100644
--- a/Documentation/config/alias.adoc
+++ b/Documentation/config/alias.adoc
@@ -3,7 +3,8 @@ alias.*::
after defining `alias.last = cat-file commit HEAD`, the invocation
`git last` is equivalent to `git cat-file commit HEAD`. To avoid
confusion and troubles with script usage, aliases that
- hide existing Git commands are ignored. Arguments are split by
+ hide existing Git commands are ignored except for deprecated
+ commands. Arguments are split by
spaces, the usual shell quoting and escaping are supported.
A quote pair or a backslash can be used to quote them.
+
diff --git a/Documentation/config/extensions.adoc b/Documentation/config/extensions.adoc
index 829f2523fc..532456644b 100644
--- a/Documentation/config/extensions.adoc
+++ b/Documentation/config/extensions.adoc
@@ -3,8 +3,7 @@ extensions.*::
`core.repositoryFormatVersion` is not `1`. See
linkgit:gitrepository-layout[5].
+
---
-compatObjectFormat::
+compatObjectFormat:::
Specify a compatibility hash algorithm to use. The acceptable values
are `sha1` and `sha256`. The value specified must be different from the
value of `extensions.objectFormat`. This allows client level
@@ -19,18 +18,18 @@ Note that the functionality enabled by this extension is incomplete and subject
to change. It currently exists only to allow development and testing of
the underlying feature and is not designed to be enabled by end users.
-noop::
+noop:::
This extension does not change git's behavior at all. It is useful only
for testing format-1 compatibility.
+
For historical reasons, this extension is respected regardless of the
`core.repositoryFormatVersion` setting.
-noop-v1::
+noop-v1:::
This extension does not change git's behavior at all. It is useful only
for testing format-1 compatibility.
-objectFormat::
+objectFormat:::
Specify the hash algorithm to use. The acceptable values are `sha1` and
`sha256`. If not specified, `sha1` is assumed.
+
@@ -38,7 +37,7 @@ Note that this setting should only be set by linkgit:git-init[1] or
linkgit:git-clone[1]. Trying to change it after initialization will not
work and will produce hard-to-diagnose issues.
-partialClone::
+partialClone:::
When enabled, indicates that the repo was created with a partial clone
(or later performed a partial fetch) and that the remote may have
omitted sending certain unwanted objects. Such a remote is called a
@@ -50,30 +49,31 @@ The value of this key is the name of the promisor remote.
For historical reasons, this extension is respected regardless of the
`core.repositoryFormatVersion` setting.
-preciousObjects::
+preciousObjects:::
If enabled, indicates that objects in the repository MUST NOT be deleted
(e.g., by `git-prune` or `git repack -d`).
+
For historical reasons, this extension is respected regardless of the
`core.repositoryFormatVersion` setting.
-refStorage::
+refStorage:::
Specify the ref storage format to use. The acceptable values are:
+
+--
include::../ref-storage-format.adoc[]
-
+--
+
Note that this setting should only be set by linkgit:git-init[1] or
linkgit:git-clone[1]. Trying to change it after initialization will not
work and will produce hard-to-diagnose issues.
-relativeWorktrees::
+relativeWorktrees:::
If enabled, indicates at least one worktree has been linked with
relative paths. Automatically set if a worktree has been created or
repaired with either the `--relative-paths` option or with the
`worktree.useRelativePaths` config set to `true`.
-worktreeConfig::
+worktreeConfig:::
If enabled, then worktrees will load config settings from the
`$GIT_DIR/config.worktree` file in addition to the
`$GIT_COMMON_DIR/config` file. Note that `$GIT_COMMON_DIR` and
@@ -87,11 +87,12 @@ When enabling this extension, you must be careful to move
certain values from the common config file to the main working tree's
`config.worktree` file, if present:
+
+--
* `core.worktree` must be moved from `$GIT_COMMON_DIR/config` to
`$GIT_COMMON_DIR/config.worktree`.
* If `core.bare` is true, then it must be moved from `$GIT_COMMON_DIR/config`
to `$GIT_COMMON_DIR/config.worktree`.
-
+--
+
It may also be beneficial to adjust the locations of `core.sparseCheckout`
and `core.sparseCheckoutCone` depending on your desire for customizable
@@ -104,4 +105,3 @@ details.
+
For historical reasons, this extension is respected regardless of the
`core.repositoryFormatVersion` setting.
---
diff --git a/Documentation/config/stash.adoc b/Documentation/config/stash.adoc
index e556105a15..7fc32027f7 100644
--- a/Documentation/config/stash.adoc
+++ b/Documentation/config/stash.adoc
@@ -1,19 +1,28 @@
-stash.index::
+ifndef::git-stash[]
+:see-show: See the description of the 'show' command in linkgit:git-stash[1].
+endif::git-stash[]
+
+ifdef::git-stash[]
+:see-show:
+endif::git-stash[]
+
+`stash.index`::
If this is set to true, `git stash apply` and `git stash pop` will
- behave as if `--index` was supplied. Defaults to false. See the
- descriptions in linkgit:git-stash[1].
+ behave as if `--index` was supplied. Defaults to false.
+ifndef::git-stash[]
+See the descriptions in linkgit:git-stash[1].
+endif::git-stash[]
-stash.showIncludeUntracked::
+`stash.showIncludeUntracked`::
If this is set to true, the `git stash show` command will show
- the untracked files of a stash entry. Defaults to false. See
- the description of the 'show' command in linkgit:git-stash[1].
+ the untracked files of a stash entry. Defaults to false. {see-show}
-stash.showPatch::
+`stash.showPatch`::
If this is set to true, the `git stash show` command without an
option will show the stash entry in patch form. Defaults to false.
- See the description of the 'show' command in linkgit:git-stash[1].
+ {see-show}
-stash.showStat::
+`stash.showStat`::
If this is set to true, the `git stash show` command without an
option will show a diffstat of the stash entry. Defaults to true.
- See the description of the 'show' command in linkgit:git-stash[1].
+ {see-show}
diff --git a/Documentation/config/tag.adoc b/Documentation/config/tag.adoc
index 5062a057ff..d878da98d4 100644
--- a/Documentation/config/tag.adoc
+++ b/Documentation/config/tag.adoc
@@ -1,17 +1,23 @@
-tag.forceSignAnnotated::
+`tag.forceSignAnnotated`::
A boolean to specify whether annotated tags created should be GPG signed.
If `--annotate` is specified on the command line, it takes
precedence over this option.
-tag.sort::
- This variable controls the sort ordering of tags when displayed by
- linkgit:git-tag[1]. Without the "--sort=<value>" option provided, the
- value of this variable will be used as the default.
+`tag.sort`::
+ifdef::git-tag[]
+This variable controls the sort ordering of tags when displayed by `git-tag`.
+endif::git-tag[]
+ifndef::git-tag[]
+This variable controls the sort ordering of tags when displayed by
+linkgit:git-tag[1].
+endif::git-tag[]
+Without the `--sort=<value>` option provided, the value of this variable will
+be used as the default.
-tag.gpgSign::
+`tag.gpgSign`::
A boolean to specify whether all tags should be GPG signed.
Use of this option when running in an automated script can
result in a large number of tags being signed. It is therefore
- convenient to use an agent to avoid typing your gpg passphrase
+ convenient to use an agent to avoid typing your GPG passphrase
several times. Note that this option doesn't affect tag signing
- behavior enabled by "-u <keyid>" or "--local-user=<keyid>" options.
+ behavior enabled by `-u <keyid>` or `--local-user=<keyid>` options.
diff --git a/Documentation/config/worktree.adoc b/Documentation/config/worktree.adoc
index 9e3f84f748..a248076ea5 100644
--- a/Documentation/config/worktree.adoc
+++ b/Documentation/config/worktree.adoc
@@ -1,4 +1,4 @@
-worktree.guessRemote::
+`worktree.guessRemote`::
If no branch is specified and neither `-b` nor `-B` nor
`--detach` is used, then `git worktree add` defaults to
creating a new branch from HEAD. If `worktree.guessRemote` is
@@ -6,14 +6,14 @@ worktree.guessRemote::
branch whose name uniquely matches the new branch name. If
such a branch exists, it is checked out and set as "upstream"
for the new branch. If no such match can be found, it falls
- back to creating a new branch from the current HEAD.
+ back to creating a new branch from the current `HEAD`.
-worktree.useRelativePaths::
- Link worktrees using relative paths (when "true") or absolute
- paths (when "false"). This is particularly useful for setups
+`worktree.useRelativePaths`::
+ Link worktrees using relative paths (when "`true`") or absolute
+ paths (when "`false`"). This is particularly useful for setups
where the repository and worktrees may be moved between
- different locations or environments. Defaults to "false".
+ different locations or environments. Defaults to "`false`".
+
-Note that setting `worktree.useRelativePaths` to "true" implies enabling the
+Note that setting `worktree.useRelativePaths` to "`true`" implies enabling the
`extensions.relativeWorktrees` config (see linkgit:git-config[1]),
thus making it incompatible with older versions of Git.
diff --git a/Documentation/fsck-msgids.adoc b/Documentation/fsck-msgids.adoc
index 0ba4f9a27e..81f11ba125 100644
--- a/Documentation/fsck-msgids.adoc
+++ b/Documentation/fsck-msgids.adoc
@@ -38,6 +38,9 @@
`badReferentName`::
(ERROR) The referent name of a symref is invalid.
+`badReftableTableName`::
+ (WARN) A reftable table has an invalid name.
+
`badTagName`::
(INFO) A tag has an invalid format.
@@ -104,9 +107,6 @@
`gitmodulesParse`::
(INFO) Could not parse `.gitmodules` blob.
-`gitmodulesLarge`;
- (ERROR) `.gitmodules` blob is too large to parse.
-
`gitmodulesPath`::
(ERROR) `.gitmodules` path is invalid.
diff --git a/Documentation/git-fast-import.adoc b/Documentation/git-fast-import.adoc
index 6e095b02a1..85ed7a7270 100644
--- a/Documentation/git-fast-import.adoc
+++ b/Documentation/git-fast-import.adoc
@@ -66,6 +66,11 @@ fast-import stream! This option is enabled automatically for
remote-helpers that use the `import` capability, as they are
already trusted to run their own code.
+--signed-commits=(verbatim|warn-verbatim|warn-strip|strip|abort)::
+ Specify how to handle signed commits. Behaves in the same way
+ as the same option in linkgit:git-fast-export[1], except that
+ default is 'verbatim' (instead of 'abort').
+
Options for Frontends
~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/git-merge-tree.adoc b/Documentation/git-merge-tree.adoc
index 271ab220e8..4391bbee47 100644
--- a/Documentation/git-merge-tree.adoc
+++ b/Documentation/git-merge-tree.adoc
@@ -79,11 +79,17 @@ OPTIONS
--merge-base=<tree-ish>::
Instead of finding the merge-bases for <branch1> and <branch2>,
- specify a merge-base for the merge, and specifying multiple bases is
- currently not supported. This option is incompatible with `--stdin`.
+ specify a merge-base for the merge. This option is incompatible with
+ `--stdin`.
+
-As the merge-base is provided directly, <branch1> and <branch2> do not need
-to specify commits; trees are enough.
+Specifying multiple bases is currently not supported, which means that when
+merging two branches with more than one merge-base, using this option may
+cause merge results to differ from what `git merge` would compute. This
+can include potentially losing some changes made on one side of the history
+in the resulting merge.
++
+With this option, since the merge-base is provided directly, <branch1> and
+<branch2> do not need to specify commits; trees are enough.
-X<option>::
--strategy-option=<option>::
diff --git a/Documentation/git-pack-refs.adoc b/Documentation/git-pack-refs.adoc
index 42b90051e6..fde9f2f294 100644
--- a/Documentation/git-pack-refs.adoc
+++ b/Documentation/git-pack-refs.adoc
@@ -45,58 +45,7 @@ unpacked.
OPTIONS
-------
---all::
-
-The command by default packs all tags and refs that are already
-packed, and leaves other refs
-alone. This is because branches are expected to be actively
-developed and packing their tips does not help performance.
-This option causes all refs to be packed as well, with the exception
-of hidden refs, broken refs, and symbolic refs. Useful for a repository
-with many branches of historical interests.
-
---no-prune::
-
-The command usually removes loose refs under `$GIT_DIR/refs`
-hierarchy after packing them. This option tells it not to.
-
---auto::
-
-Pack refs as needed depending on the current state of the ref database. The
-behavior depends on the ref format used by the repository and may change in the
-future.
-+
- - "files": Loose references are packed into the `packed-refs` file
- based on the ratio of loose references to the size of the
- `packed-refs` file. The bigger the `packed-refs` file, the more loose
- references need to exist before we repack.
-+
- - "reftable": Tables are compacted such that they form a geometric
- sequence. For two tables N and N+1, where N+1 is newer, this
- maintains the property that N is at least twice as big as N+1. Only
- tables that violate this property are compacted.
-
---include <pattern>::
-
-Pack refs based on a `glob(7)` pattern. Repetitions of this option
-accumulate inclusion patterns. If a ref is both included in `--include` and
-`--exclude`, `--exclude` takes precedence. Using `--include` will preclude all
-tags from being included by default. Symbolic refs and broken refs will never
-be packed. When used with `--all`, it will be a noop. Use `--no-include` to clear
-and reset the list of patterns.
-
---exclude <pattern>::
-
-Do not pack refs matching the given `glob(7)` pattern. Repetitions of this option
-accumulate exclusion patterns. Use `--no-exclude` to clear and reset the list of
-patterns. If a ref is already packed, including it with `--exclude` will not
-unpack it.
-+
-When used with `--all`, pack only loose refs which do not match any of
-the provided `--exclude` patterns.
-+
-When used with `--include`, refs provided to `--include`, minus refs that are
-provided to `--exclude` will be packed.
+include::pack-refs-options.adoc[]
BUGS
diff --git a/Documentation/git-patch-id.adoc b/Documentation/git-patch-id.adoc
index 1d15fa45d5..45da0f27ac 100644
--- a/Documentation/git-patch-id.adoc
+++ b/Documentation/git-patch-id.adoc
@@ -33,27 +33,30 @@ OPTIONS
--verbatim::
Calculate the patch-id of the input as it is given, do not strip
any whitespace.
-
- This is the default if patchid.verbatim is true.
++
+This is the default if patchid.verbatim is true.
--stable::
Use a "stable" sum of hashes as the patch ID. With this option:
- - Reordering file diffs that make up a patch does not affect the ID.
- In particular, two patches produced by comparing the same two trees
- with two different settings for "-O<orderfile>" result in the same
- patch ID signature, thereby allowing the computed result to be used
- as a key to index some meta-information about the change between
- the two trees;
-
- - Result is different from the value produced by git 1.9 and older
- or produced when an "unstable" hash (see --unstable below) is
- configured - even when used on a diff output taken without any use
- of "-O<orderfile>", thereby making existing databases storing such
- "unstable" or historical patch-ids unusable.
-
- - All whitespace within the patch is ignored and does not affect the id.
-
- This is the default if patchid.stable is set to true.
++
+--
+- Reordering file diffs that make up a patch does not affect the ID.
+ In particular, two patches produced by comparing the same two trees
+ with two different settings for "-O<orderfile>" result in the same
+ patch ID signature, thereby allowing the computed result to be used
+ as a key to index some meta-information about the change between
+ the two trees;
+
+- Result is different from the value produced by git 1.9 and older
+ or produced when an "unstable" hash (see --unstable below) is
+ configured - even when used on a diff output taken without any use
+ of "-O<orderfile>", thereby making existing databases storing such
+ "unstable" or historical patch-ids unusable.
+
+- All whitespace within the patch is ignored and does not affect the id.
+--
++
+This is the default if patchid.stable is set to true.
--unstable::
Use an "unstable" hash as the patch ID. With this option,
@@ -61,8 +64,8 @@ OPTIONS
by git 1.9 and older and whitespace is ignored. Users with pre-existing
databases storing patch-ids produced by git 1.9 and older (who do not deal
with reordered patches) may want to use this option.
-
- This is the default.
++
+This is the default.
GIT
---
diff --git a/Documentation/git-push.adoc b/Documentation/git-push.adoc
index 5f5408e2c0..864b0d0467 100644
--- a/Documentation/git-push.adoc
+++ b/Documentation/git-push.adoc
@@ -19,30 +19,35 @@ SYNOPSIS
DESCRIPTION
-----------
-Updates remote refs using local refs, while sending objects
-necessary to complete the given refs.
+Updates one or more branches, tags, or other references in a remote
+repository from your local repository, and sends all necessary data
+that isn't already on the remote.
-You can make interesting things happen to a repository
-every time you push into it, by setting up 'hooks' there. See
-documentation for linkgit:git-receive-pack[1].
+The simplest way to push is `git push <remote> <branch>`.
+`git push origin main` will push the local `main` branch to the `main`
+branch on the remote named `origin`.
-When the command line does not specify where to push with the
-`<repository>` argument, `branch.*.remote` configuration for the
-current branch is consulted to determine where to push. If the
-configuration is missing, it defaults to 'origin'.
+The `<repository>` argument defaults to the upstream for the current branch,
+or `origin` if there's no configured upstream.
-When the command line does not specify what to push with `<refspec>...`
-arguments or `--all`, `--mirror`, `--tags` options, the command finds
-the default `<refspec>` by consulting `remote.*.push` configuration,
-and if it is not found, honors `push.default` configuration to decide
-what to push (See linkgit:git-config[1] for the meaning of `push.default`).
+To decide which branches, tags, or other refs to push, Git uses
+(in order of precedence):
-When neither the command-line nor the configuration specifies what to
-push, the default behavior is used, which corresponds to the `simple`
-value for `push.default`: the current branch is pushed to the
-corresponding upstream branch, but as a safety measure, the push is
-aborted if the upstream branch does not have the same name as the
-local one.
+1. The `<refspec>` argument(s) (for example `main` in `git push origin main`)
+ or the `--all`, `--mirror`, or `--tags` options
+2. The `remote.*.push` configuration for the repository being pushed to
+3. The `push.default` configuration. The default is `push.default=simple`,
+ which will push to a branch with the same name as the current branch.
+ See the <<CONFIGURATION,CONFIGURATION>> section below for more on `push.default`.
+
+`git push` may fail if you haven't set an upstream for the current branch,
+depending on what `push.default` is set to.
+See the <<UPSTREAM-BRANCHES,UPSTREAM BRANCHES>> section below for more
+on how to set and use upstreams.
+
+You can make interesting things happen to a repository
+every time you push into it, by setting up 'hooks' there. See
+documentation for linkgit:git-receive-pack[1].
OPTIONS[[OPTIONS]]
@@ -55,96 +60,66 @@ OPTIONS[[OPTIONS]]
<refspec>...::
Specify what destination ref to update with what source object.
- The format of a <refspec> parameter is an optional plus
- `+`, followed by the source object <src>, followed
- by a colon `:`, followed by the destination ref <dst>.
-+
-The <src> is often the name of the branch you would want to push, but
-it can be any arbitrary "SHA-1 expression", such as `master~4` or
-`HEAD` (see linkgit:gitrevisions[7]).
-+
-The <dst> tells which ref on the remote side is updated with this
-push. Arbitrary expressions cannot be used here, an actual ref must
-be named.
-If `git push [<repository>]` without any `<refspec>` argument is set to
-update some ref at the destination with `<src>` with
-`remote.<repository>.push` configuration variable, `:<dst>` part can
-be omitted--such a push will update a ref that `<src>` normally updates
-without any `<refspec>` on the command line. Otherwise, missing
-`:<dst>` means to update the same ref as the `<src>`.
-+
-If <dst> doesn't start with `refs/` (e.g. `refs/heads/master`) we will
-try to infer where in `refs/*` on the destination <repository> it
-belongs based on the type of <src> being pushed and whether <dst>
-is ambiguous.
+
---
-* If <dst> unambiguously refers to a ref on the <repository> remote,
- then push to that ref.
-
-* If <src> resolves to a ref starting with refs/heads/ or refs/tags/,
- then prepend that to <dst>.
-
-* Other ambiguity resolutions might be added in the future, but for
- now any other cases will error out with an error indicating what we
- tried, and depending on the `advice.pushUnqualifiedRefname`
- configuration (see linkgit:git-config[1]) suggest what refs/
- namespace you may have wanted to push to.
-
---
-+
-The object referenced by <src> is used to update the <dst> reference
-on the remote side. Whether this is allowed depends on where in
-`refs/*` the <dst> reference lives as described in detail below, in
-those sections "update" means any modifications except deletes, which
-as noted after the next few sections are treated differently.
-+
-The `refs/heads/*` namespace will only accept commit objects, and
-updates only if they can be fast-forwarded.
-+
-The `refs/tags/*` namespace will accept any kind of object (as
-commits, trees and blobs can be tagged), and any updates to them will
-be rejected.
-+
-It's possible to push any type of object to any namespace outside of
-`refs/{tags,heads}/*`. In the case of tags and commits, these will be
-treated as if they were the commits inside `refs/heads/*` for the
-purposes of whether the update is allowed.
-+
-I.e. a fast-forward of commits and tags outside `refs/{tags,heads}/*`
-is allowed, even in cases where what's being fast-forwarded is not a
-commit, but a tag object which happens to point to a new commit which
-is a fast-forward of the commit the last tag (or commit) it's
-replacing. Replacing a tag with an entirely different tag is also
-allowed, if it points to the same commit, as well as pushing a peeled
-tag, i.e. pushing the commit that existing tag object points to, or a
-new tag object which an existing commit points to.
-+
-Tree and blob objects outside of `refs/{tags,heads}/*` will be treated
-the same way as if they were inside `refs/tags/*`, any update of them
-will be rejected.
-+
-All of the rules described above about what's not allowed as an update
-can be overridden by adding an the optional leading `+` to a refspec
-(or using `--force` command line option). The only exception to this
-is that no amount of forcing will make the `refs/heads/*` namespace
-accept a non-commit object. Hooks and configuration can also override
-or amend these rules, see e.g. `receive.denyNonFastForwards` in
-linkgit:git-config[1] and `pre-receive` and `update` in
-linkgit:githooks[5].
-+
-Pushing an empty <src> allows you to delete the <dst> ref from the
-remote repository. Deletions are always accepted without a leading `+`
-in the refspec (or `--force`), except when forbidden by configuration
-or hooks. See `receive.denyDeletes` in linkgit:git-config[1] and
-`pre-receive` and `update` in linkgit:githooks[5].
-+
-The special refspec `:` (or `+:` to allow non-fast-forward updates)
-directs Git to push "matching" branches: for every branch that exists on
-the local side, the remote side is updated if a branch of the same name
-already exists on the remote side.
-+
-`tag <tag>` means the same as `refs/tags/<tag>:refs/tags/<tag>`.
+The format for a refspec is [+]<src>[:<dst>], for example `main`,
+`main:other`, or `HEAD^:refs/heads/main`.
++
+The `<src>` is often the name of the local branch to push, but it can be
+any arbitrary "SHA-1 expression" (see linkgit:gitrevisions[7]).
++
+The `<dst>` determines what ref to update on the remote side. It must be the
+name of a branch, tag, or other ref, not an arbitrary expression.
++
+The `+` is optional and does the same thing as `--force`.
++
+You can write a refspec using the fully expanded form (for
+example `refs/heads/main:refs/heads/main`) which specifies the exact source
+and destination, or with a shorter form (for example `main` or
+`main:other`). Here are the rules for how refspecs are expanded,
+as well as various other special refspec forms:
++
+ * `<src>` without a `:<dst>` means to update the same ref as the
+ `<src>`, unless the `remote.<repository>.push` configuration specifies a
+ different <dst>. For example, if `main` is a branch, then the refspec
+ `main` expands to `main:refs/heads/main`.
+ * If `<dst>` unambiguously refers to a ref on the <repository> remote,
+ then expand it to that ref. For example, if `v1.0` is a tag on the
+ remote, then `HEAD:v1.0` expands to `HEAD:refs/tags/v1.0`.
+ * If `<src>` resolves to a ref starting with `refs/heads/` or `refs/tags/`,
+ then prepend that to <dst>. For example, if `main` is a branch, then
+ `main:other` expands to `main:refs/heads/other`
+ * The special refspec `:` (or `+:` to allow non-fast-forward updates)
+ directs Git to push "matching" branches: for every branch that exists on
+ the local side, the remote side is updated if a branch of the same name
+ already exists on the remote side.
+ * <src> may contain a * to indicate a simple pattern match.
+ This works like a glob that matches any ref matching the pattern.
+ There must be only one * in both the `<src>` and `<dst>`.
+ It will map refs to the destination by replacing the * with the
+ contents matched from the source. For example, `refs/heads/*:refs/heads/*`
+ will push all branches.
+ * A refspec starting with `^` is a negative refspec.
+ This specifies refs to exclude. A ref will be considered to
+ match if it matches at least one positive refspec, and does not
+ match any negative refspec. Negative refspecs can be pattern refspecs.
+ They must only contain a `<src>`.
+ Fully spelled out hex object names are also not supported.
+ For example, `git push origin 'refs/heads/*' '^refs/heads/dev-*'`
+ will push all branches except for those starting with `dev-`
+ * If `<src>` is empty, it deletes the `<dst>` ref from the remote
+ repository. For example, `git push origin :dev` will
+ delete the `dev` branch.
+ * `tag <tag>` expands to `refs/tags/<tag>:refs/tags/<tag>`.
+ This is technically a special syntax for `git push` and not a refspec,
+ since in `git push origin tag v1.0` the arguments `tag` and `v1.0`
+ are separate.
+ * If the refspec can't be expanded unambiguously, error out
+ with an error indicating what was tried, and depending
+ on the `advice.pushUnqualifiedRefname` configuration (see
+ linkgit:git-config[1]) suggest what refs/ namespace you may have
+ wanted to push to.
+
+Not all updates are allowed: see PUSH RULES below for the details.
--all::
--branches::
@@ -335,14 +310,12 @@ allowing a forced update.
-f::
--force::
- Usually, the command refuses to update a remote ref that is
- not an ancestor of the local ref used to overwrite it.
- Also, when `--force-with-lease` option is used, the command refuses
- to update a remote ref whose current value does not match
- what is expected.
+ Usually, `git push` will refuse to update a branch that is not an
+ ancestor of the commit being pushed.
+
-This flag disables these checks, and can cause the remote repository
-to lose commits; use it with care.
+This flag disables that check, the other safety checks in PUSH RULES
+below, and the checks in --force-with-lease. It can cause the remote
+repository to lose commits; use it with care.
+
Note that `--force` applies to all the refs that are pushed, hence
using it with `push.default` set to `matching` or with multiple push
@@ -514,6 +487,45 @@ reason::
refs, no explanation is needed. For a failed ref, the reason for
failure is described.
+PUSH RULES
+----------
+
+As a safety feature, the `git push` command only allows certain kinds of
+updates to prevent you from accidentally losing data on the remote.
+
+Because branches and tags are intended to be used differently, the
+safety rules for pushing to a branch are different from the rules
+for pushing to a tag. In the following rules "update" means any
+modifications except deletions and creations. Deletions and creations
+are always allowed, except when forbidden by configuration or hooks.
+
+1. If the push destination is a **branch** (`refs/heads/*`): only
+ fast-forward updates are allowed, which means the destination must be
+ an ancestor of the source commit. The source must be a commit.
+2. If the push destination is a **tag** (`refs/tags/*`): all updates will
+ be rejected. The source can be any object.
+3. If the push destination is not a branch or tag:
+ * If the source is a tree or blob object, any updates will be rejected
+ * If the source is a tag or commit object, any fast-forward update
+ is allowed, even in cases where what's being fast-forwarded is not a
+ commit, but a tag object which happens to point to a new commit which
+ is a fast-forward of the commit the last tag (or commit) it's
+ replacing. Replacing a tag with an entirely different tag is also
+ allowed, if it points to the same commit, as well as pushing a peeled
+ tag, i.e. pushing the commit that existing tag object points to, or a
+ new tag object which an existing commit points to.
+
+You can override these rules by passing `--force` or by adding the
+optional leading `+` to a refspec. The only exceptions are that no
+amount of forcing will make a branch accept a non-commit object,
+and forcing won't make the remote repository accept a push that it's
+configured to deny.
+
+Hooks and configuration can also override or amend these rules,
+see e.g. `receive.denyNonFastForwards` and `receive.denyDeletes`
+in linkgit:git-config[1] and `pre-receive` and `update` in
+linkgit:githooks[5].
+
NOTE ABOUT FAST-FORWARDS
------------------------
@@ -703,6 +715,7 @@ a `git gc` command on the origin repository.
include::transfer-data-leaks.adoc[]
+[[CONFIGURATION]]
CONFIGURATION
-------------
diff --git a/Documentation/git-refs.adoc b/Documentation/git-refs.adoc
index bfa9b3ea2d..fa33680cc7 100644
--- a/Documentation/git-refs.adoc
+++ b/Documentation/git-refs.adoc
@@ -19,6 +19,7 @@ git refs list [--count=<count>] [--shell|--perl|--python|--tcl]
[(--exclude=<pattern>)...] [--start-after=<marker>]
[ --stdin | (<pattern>...)]
git refs exists <ref>
+git refs optimize [--all] [--no-prune] [--auto] [--include <pattern>] [--exclude <pattern>]
DESCRIPTION
-----------
@@ -45,6 +46,11 @@ exists::
failed with an error other than the reference being missing. This does
not verify whether the reference resolves to an actual object.
+optimize::
+ Optimizes references to improve repository performance and reduce disk
+ usage. This subcommand is an alias for linkgit:git-pack-refs[1] and
+ offers identical functionality.
+
OPTIONS
-------
@@ -80,6 +86,10 @@ The following options are specific to 'git refs list':
include::for-each-ref-options.adoc[]
+The following options are specific to 'git refs optimize':
+
+include::pack-refs-options.adoc[]
+
KNOWN LIMITATIONS
-----------------
diff --git a/Documentation/git-stash.adoc b/Documentation/git-stash.adoc
index e2300a19a2..235d57ddd8 100644
--- a/Documentation/git-stash.adoc
+++ b/Documentation/git-stash.adoc
@@ -7,24 +7,24 @@ git-stash - Stash the changes in a dirty working directory away
SYNOPSIS
--------
-[verse]
-'git stash' list [<log-options>]
-'git stash' show [-u | --include-untracked | --only-untracked] [<diff-options>] [<stash>]
-'git stash' drop [-q | --quiet] [<stash>]
-'git stash' pop [--index] [-q | --quiet] [<stash>]
-'git stash' apply [--index] [-q | --quiet] [<stash>]
-'git stash' branch <branchname> [<stash>]
-'git stash' [push [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]
+[synopsis]
+git stash list [<log-options>]
+git stash show [-u | --include-untracked | --only-untracked] [<diff-options>] [<stash>]
+git stash drop [-q | --quiet] [<stash>]
+git stash pop [--index] [-q | --quiet] [<stash>]
+git stash apply [--index] [-q | --quiet] [<stash>]
+git stash branch <branchname> [<stash>]
+git stash [push [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]
[-u | --include-untracked] [-a | --all] [(-m | --message) <message>]
[--pathspec-from-file=<file> [--pathspec-file-nul]]
[--] [<pathspec>...]]
-'git stash' save [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]
- [-u | --include-untracked] [-a | --all] [<message>]
-'git stash' clear
-'git stash' create [<message>]
-'git stash' store [(-m | --message) <message>] [-q | --quiet] <commit>
-'git stash' export (--print | --to-ref <ref>) [<stash>...]
-'git stash' import <commit>
+git stash save [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-q | --quiet]
+ [-u | --include-untracked] [-a | --all] [<message>]
+git stash clear
+git stash create [<message>]
+git stash store [(-m | --message) <message>] [-q | --quiet] <commit>
+git stash export (--print | --to-ref <ref>) [<stash>...]
+git stash import <commit>
DESCRIPTION
-----------
@@ -38,7 +38,7 @@ The modifications stashed away by this command can be listed with
`git stash list`, inspected with `git stash show`, and restored
(potentially on top of a different commit) with `git stash apply`.
Calling `git stash` without any arguments is equivalent to `git stash push`.
-A stash is by default listed as "WIP on 'branchname' ...", but
+A stash is by default listed as "WIP on '<branchname>' ...", but
you can give a more descriptive message on the command line when
you create one.
@@ -47,16 +47,16 @@ stashes are found in the reflog of this reference and can be named using
the usual reflog syntax (e.g. `stash@{0}` is the most recently
created stash, `stash@{1}` is the one before it, `stash@{2.hours.ago}`
is also possible). Stashes may also be referenced by specifying just the
-stash index (e.g. the integer `n` is equivalent to `stash@{n}`).
+stash index (e.g. the integer `<n>` is equivalent to `stash@{<n>}`).
COMMANDS
--------
-push [-p|--patch] [-S|--staged] [-k|--[no-]keep-index] [-u|--include-untracked] [-a|--all] [-q|--quiet] [(-m|--message) <message>] [--pathspec-from-file=<file> [--pathspec-file-nul]] [--] [<pathspec>...]::
+`push [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-u | --include-untracked] [ -a | --all] [-q | --quiet] [(-m|--message) <message>] [--pathspec-from-file=<file> [--pathspec-file-nul]] [--] [<pathspec>...]`::
Save your local modifications to a new 'stash entry' and roll them
- back to HEAD (in the working tree and in the index).
- The <message> part is optional and gives
+ back to `HEAD` (in the working tree and in the index).
+ The _<message>_ part is optional and gives
the description along with the stashed state.
+
For quickly making a snapshot, you can omit "push". In this mode,
@@ -65,14 +65,14 @@ subcommand from making an unwanted stash entry. The two exceptions to this
are `stash -p` which acts as alias for `stash push -p` and pathspec elements,
which are allowed after a double hyphen `--` for disambiguation.
-save [-p|--patch] [-S|--staged] [-k|--[no-]keep-index] [-u|--include-untracked] [-a|--all] [-q|--quiet] [<message>]::
+`save [-p | --patch] [-S | --staged] [-k | --[no-]keep-index] [-u | --include-untracked] [-a | --all] [-q | --quiet] [<message>]`::
This option is deprecated in favour of 'git stash push'. It
differs from "stash push" in that it cannot take pathspec.
Instead, all non-option arguments are concatenated to form the stash
message.
-list [<log-options>]::
+`list [<log-options>]`::
List the stash entries that you currently have. Each 'stash entry' is
listed with its name (e.g. `stash@{0}` is the latest entry, `stash@{1}` is
@@ -88,7 +88,7 @@ stash@{1}: On master: 9cc0589... Add git-stash
The command takes options applicable to the 'git log'
command to control what is shown and how. See linkgit:git-log[1].
-show [-u|--include-untracked|--only-untracked] [<diff-options>] [<stash>]::
+`show [-u | --include-untracked | --only-untracked] [<diff-options>] [<stash>]`::
Show the changes recorded in the stash entry as a diff between the
stashed contents and the commit back when the stash entry was first
@@ -96,12 +96,12 @@ show [-u|--include-untracked|--only-untracked] [<diff-options>] [<stash>]::
By default, the command shows the diffstat, but it will accept any
format known to 'git diff' (e.g., `git stash show -p stash@{1}`
to view the second most recent entry in patch form).
- If no `<diff-option>` is provided, the default behavior will be given
+ If no _<diff-option>_ is provided, the default behavior will be given
by the `stash.showStat`, and `stash.showPatch` config variables. You
can also use `stash.showIncludeUntracked` to set whether
`--include-untracked` is enabled by default.
-pop [--index] [-q|--quiet] [<stash>]::
+`pop [--index] [-q | --quiet] [<stash>]`::
Remove a single stashed state from the stash list and apply it
on top of the current working tree state, i.e., do the inverse
@@ -112,19 +112,19 @@ Applying the state can fail with conflicts; in this case, it is not
removed from the stash list. You need to resolve the conflicts by hand
and call `git stash drop` manually afterwards.
-apply [--index] [-q|--quiet] [<stash>]::
+`apply [--index] [-q | --quiet] [<stash>]`::
Like `pop`, but do not remove the state from the stash list. Unlike `pop`,
`<stash>` may be any commit that looks like a commit created by
`stash push` or `stash create`.
-branch <branchname> [<stash>]::
+`branch <branchname> [<stash>]`::
- Creates and checks out a new branch named `<branchname>` starting from
- the commit at which the `<stash>` was originally created, applies the
- changes recorded in `<stash>` to the new working tree and index.
- If that succeeds, and `<stash>` is a reference of the form
- `stash@{<revision>}`, it then drops the `<stash>`.
+ Creates and checks out a new branch named _<branchname>_ starting from
+ the commit at which the _<stash>_ was originally created, applies the
+ changes recorded in _<stash>_ to the new working tree and index.
+ If that succeeds, and _<stash>_ is a reference of the form
+ `stash@{<revision>}`, it then drops the _<stash>_.
+
This is useful if the branch on which you ran `git stash push` has
changed enough that `git stash apply` fails due to conflicts. Since
@@ -132,54 +132,51 @@ the stash entry is applied on top of the commit that was HEAD at the
time `git stash` was run, it restores the originally stashed state
with no conflicts.
-clear::
+`clear`::
Remove all the stash entries. Note that those entries will then
be subject to pruning, and may be impossible to recover (see
- 'Examples' below for a possible strategy).
-
-drop [-q|--quiet] [<stash>]::
+ 'EXAMPLES' below for a possible strategy).
+`drop [-q | --quiet] [<stash>]`::
Remove a single stash entry from the list of stash entries.
-create::
-
+`create`::
Create a stash entry (which is a regular commit object) and
return its object name, without storing it anywhere in the ref
namespace.
This is intended to be useful for scripts. It is probably not
the command you want to use; see "push" above.
-store::
+`store`::
Store a given stash created via 'git stash create' (which is a
dangling merge commit) in the stash ref, updating the stash
reflog. This is intended to be useful for scripts. It is
probably not the command you want to use; see "push" above.
-export ( --print | --to-ref <ref> ) [<stash>...]::
+`export ( --print | --to-ref <ref> ) [<stash>...]`::
Export the specified stashes, or all of them if none are specified, to
a chain of commits which can be transferred using the normal fetch and
push mechanisms, then imported using the `import` subcommand.
-import <commit>::
-
+`import <commit>`::
Import the specified stashes from the specified commit, which must have been
created by `export`, and add them to the list of stashes. To replace the
existing stashes, use `clear` first.
OPTIONS
-------
--a::
---all::
+`-a`::
+`--all`::
This option is only valid for `push` and `save` commands.
+
All ignored and untracked files are also stashed and then cleaned
up with `git clean`.
--u::
---include-untracked::
---no-include-untracked::
+`-u`::
+`--include-untracked`::
+`--no-include-untracked`::
When used with the `push` and `save` commands,
all untracked files are also stashed and then cleaned up with
`git clean`.
@@ -187,12 +184,12 @@ up with `git clean`.
When used with the `show` command, show the untracked files in the stash
entry as part of the diff.
---only-untracked::
+`--only-untracked`::
This option is only valid for the `show` command.
+
Show only the untracked files in the stash entry as part of the diff.
---index::
+`--index`::
This option is only valid for `pop` and `apply` commands.
+
Tries to reinstate not only the working tree's changes, but also
@@ -200,15 +197,15 @@ the index's ones. However, this can fail, when you have conflicts
(which are stored in the index, where you therefore can no longer
apply the changes as they were originally).
--k::
---keep-index::
---no-keep-index::
+`-k`::
+`--keep-index`::
+`--no-keep-index`::
This option is only valid for `push` and `save` commands.
+
All changes already added to the index are left intact.
--p::
---patch::
+`-p`::
+`--patch`::
This option is only valid for `push` and `save` commands.
+
Interactively select hunks from the diff between HEAD and the
@@ -224,8 +221,8 @@ The `--patch` option implies `--keep-index`. You can use
include::diff-context-options.adoc[]
--S::
---staged::
+`-S`::
+`--staged`::
This option is only valid for `push` and `save` commands.
+
Stash only the changes that are currently staged. This is similar to
@@ -234,49 +231,49 @@ of current branch.
+
The `--patch` option has priority over this one.
---pathspec-from-file=<file>::
+`--pathspec-from-file=<file>`::
This option is only valid for `push` command.
+
-Pathspec is passed in `<file>` instead of commandline args. If
-`<file>` is exactly `-` then standard input is used. Pathspec
+Pathspec is passed in _<file>_ instead of commandline args. If
+_<file>_ is exactly `-` then standard input is used. Pathspec
elements are separated by LF or CR/LF. Pathspec elements can be
quoted as explained for the configuration variable `core.quotePath`
(see linkgit:git-config[1]). See also `--pathspec-file-nul` and
global `--literal-pathspecs`.
---pathspec-file-nul::
+`--pathspec-file-nul`::
This option is only valid for `push` command.
+
Only meaningful with `--pathspec-from-file`. Pathspec elements are
separated with NUL character and all other characters are taken
literally (including newlines and quotes).
--q::
---quiet::
+`-q`::
+`--quiet`::
This option is only valid for `apply`, `drop`, `pop`, `push`,
`save`, `store` commands.
+
Quiet, suppress feedback messages.
---print::
+`--print`::
This option is only valid for the `export` command.
+
Create the chain of commits representing the exported stashes without
storing it anywhere in the ref namespace and print the object ID to
standard output. This is designed for scripts.
---to-ref::
+`--to-ref`::
This option is only valid for the `export` command.
+
Create the chain of commits representing the exported stashes and store
it to the specified ref.
-\--::
+`--`::
This option is only valid for `push` command.
+
Separates pathspec from options for disambiguation purposes.
-<pathspec>...::
+`<pathspec>...`::
This option is only valid for `push` command.
+
The new stash entry records the modified states only for the files
@@ -286,11 +283,11 @@ too, leaving files that do not match the pathspec intact.
+
For more details, see the 'pathspec' entry in linkgit:gitglossary[7].
-<stash>::
+_<stash>_::
This option is only valid for `apply`, `branch`, `drop`, `pop`,
`show`, and `export` commands.
+
-A reference of the form `stash@{<revision>}`. When no `<stash>` is
+A reference of the form `stash@{<revision>}`. When no _<stash>_ is
given, the latest stash is assumed (that is, `stash@{0}`).
DISCUSSION
@@ -419,6 +416,7 @@ CONFIGURATION
include::includes/cmd-config-section-all.adoc[]
+:git-stash: 1
include::config/stash.adoc[]
diff --git a/Documentation/git-tag.adoc b/Documentation/git-tag.adoc
index a4b1c0ec05..0f7badc116 100644
--- a/Documentation/git-tag.adoc
+++ b/Documentation/git-tag.adoc
@@ -8,21 +8,21 @@ git-tag - Create, list, delete or verify a tag object signed with GPG
SYNOPSIS
--------
-[verse]
-'git tag' [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>] [-e]
+[synopsis]
+git tag [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>] [-e]
[(--trailer <token>[(=|:)<value>])...]
<tagname> [<commit> | <object>]
-'git tag' -d <tagname>...
-'git tag' [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>]
+git tag -d <tagname>...
+git tag [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>]
[--points-at <object>] [--column[=<options>] | --no-column]
[--create-reflog] [--sort=<key>] [--format=<format>]
[--merged <commit>] [--no-merged <commit>] [<pattern>...]
-'git tag' -v [--format=<format>] <tagname>...
+git tag -v [--format=<format>] <tagname>...
DESCRIPTION
-----------
-Add a tag reference in `refs/tags/`, unless `-d/-l/-v` is given
+Add a tag reference in `refs/tags/`, unless `-d`/`-l`/`-v` is given
to delete, list or verify tags.
Unless `-f` is given, the named tag must not yet exist.
@@ -58,129 +58,129 @@ lightweight tags by default.
OPTIONS
-------
--a::
---annotate::
+`-a`::
+`--annotate`::
Make an unsigned, annotated tag object
--s::
---sign::
+`-s`::
+`--sign`::
Make a GPG-signed tag, using the default e-mail address's key.
The default behavior of tag GPG-signing is controlled by `tag.gpgSign`
configuration variable if it exists, or disabled otherwise.
See linkgit:git-config[1].
---no-sign::
+`--no-sign`::
Override `tag.gpgSign` configuration variable that is
set to force each and every tag to be signed.
--u <key-id>::
---local-user=<key-id>::
+`-u <key-id>`::
+`--local-user=<key-id>`::
Make a GPG-signed tag, using the given key.
--f::
---force::
+`-f`::
+`--force`::
Replace an existing tag with the given name (instead of failing)
--d::
---delete::
+`-d`::
+`--delete`::
Delete existing tags with the given names.
--v::
---verify::
+`-v`::
+`--verify`::
Verify the GPG signature of the given tag names.
--n<num>::
- <num> specifies how many lines from the annotation, if any,
- are printed when using -l. Implies `--list`.
+`-n<num>`::
+ _<num>_ specifies how many lines from the annotation, if any,
+ are printed when using `-l`. Implies `--list`.
+
The default is not to print any annotation lines.
If no number is given to `-n`, only the first line is printed.
If the tag is not annotated, the commit message is displayed instead.
--l::
---list::
+`-l`::
+`--list`::
List tags. With optional `<pattern>...`, e.g. `git tag --list
'v-*'`, list only the tags that match the pattern(s).
+
-Running "git tag" without arguments also lists all tags. The pattern
-is a shell wildcard (i.e., matched using fnmatch(3)). Multiple
+Running `git tag` without arguments also lists all tags. The pattern
+is a shell wildcard (i.e., matched using `fnmatch`(3)). Multiple
patterns may be given; if any of them matches, the tag is shown.
+
This option is implicitly supplied if any other list-like option such
as `--contains` is provided. See the documentation for each of those
options for details.
---sort=<key>::
+`--sort=<key>`::
Sort based on the key given. Prefix `-` to sort in
- descending order of the value. You may use the --sort=<key> option
- multiple times, in which case the last key becomes the primary
- key. Also supports "version:refname" or "v:refname" (tag
- names are treated as versions). The "version:refname" sort
- order can also be affected by the "versionsort.suffix"
+ descending order of the value. You may use the `--sort=<key>` option
+ multiple times, in which case the last _<key>_ becomes the primary
+ key. Also supports "`version:refname`" or "`v:refname`" (tag
+ names are treated as versions). The "`version:refname`" sort
+ order can also be affected by the "`versionsort.suffix`"
configuration variable.
The keys supported are the same as those in `git for-each-ref`.
Sort order defaults to the value configured for the `tag.sort`
variable if it exists, or lexicographic order otherwise. See
linkgit:git-config[1].
---color[=<when>]::
+`--color[=<when>]`::
Respect any colors specified in the `--format` option. The
- `<when>` field must be one of `always`, `never`, or `auto` (if
- `<when>` is absent, behave as if `always` was given).
+ _<when>_ field must be one of `always`, `never`, or `auto` (if
+ _<when>_ is absent, behave as if `always` was given).
--i::
---ignore-case::
+`-i`::
+`--ignore-case`::
Sorting and filtering tags are case insensitive.
---omit-empty::
+`--omit-empty`::
Do not print a newline after formatted refs where the format expands
to the empty string.
---column[=<options>]::
---no-column::
+`--column[=<options>]`::
+`--no-column`::
Display tag listing in columns. See configuration variable
`column.tag` for option syntax. `--column` and `--no-column`
- without options are equivalent to 'always' and 'never' respectively.
+ without options are equivalent to `always` and `never` respectively.
+
This option is only applicable when listing tags without annotation lines.
---contains [<commit>]::
- Only list tags which contain the specified commit (HEAD if not
+`--contains [<commit>]`::
+ Only list tags which contain _<commit>_ (`HEAD` if not
specified). Implies `--list`.
---no-contains [<commit>]::
- Only list tags which don't contain the specified commit (HEAD if
+`--no-contains [<commit>]`::
+ Only list tags which don't contain _<commit>_ (`HEAD` if
not specified). Implies `--list`.
---merged [<commit>]::
- Only list tags whose commits are reachable from the specified
- commit (`HEAD` if not specified).
+`--merged [<commit>]`::
+ Only list tags whose commits are reachable from
+ _<commit>_ (`HEAD` if not specified).
---no-merged [<commit>]::
- Only list tags whose commits are not reachable from the specified
- commit (`HEAD` if not specified).
+`--no-merged [<commit>]`::
+ Only list tags whose commits are not reachable from
+ _<commit>_ (`HEAD` if not specified).
---points-at <object>::
- Only list tags of the given object (HEAD if not
+`--points-at [<object>]`::
+ Only list tags of _<object>_ (`HEAD` if not
specified). Implies `--list`.
--m <msg>::
---message=<msg>::
- Use the given tag message (instead of prompting).
+`-m <msg>`::
+`--message=<msg>`::
+ Use _<msg>_ (instead of prompting).
If multiple `-m` options are given, their values are
concatenated as separate paragraphs.
Implies `-a` if none of `-a`, `-s`, or `-u <key-id>`
is given.
--F <file>::
---file=<file>::
- Take the tag message from the given file. Use '-' to
+`-F <file>`::
+`--file=<file>`::
+ Take the tag message from _<file>_. Use `-` to
read the message from the standard input.
Implies `-a` if none of `-a`, `-s`, or `-u <key-id>`
is given.
---trailer <token>[(=|:)<value>]::
- Specify a (<token>, <value>) pair that should be applied as a
+`--trailer <token>[(=|:)<value>]`::
+ Specify a (_<token>_, _<value>_) pair that should be applied as a
trailer. (e.g. `git tag --trailer "Custom-Key: value"`
will add a "Custom-Key" trailer to the tag message.)
The `trailer.*` configuration variables
@@ -190,46 +190,45 @@ This option is only applicable when listing tags without annotation lines.
The trailers can be extracted in `git tag --list`, using
`--format="%(trailers)"` placeholder.
--e::
---edit::
- The message taken from file with `-F` and command line with
- `-m` are usually used as the tag message unmodified.
- This option lets you further edit the message taken from these sources.
+`-e`::
+`--edit`::
+ Let further edit the message taken from file with `-F` and command line with
+ `-m`.
---cleanup=<mode>::
- This option sets how the tag message is cleaned up.
- The '<mode>' can be one of 'verbatim', 'whitespace' and 'strip'. The
- 'strip' mode is default. The 'verbatim' mode does not change message at
- all, 'whitespace' removes just leading/trailing whitespace lines and
- 'strip' removes both whitespace and commentary.
+`--cleanup=<mode>`::
+ Set how the tag message is cleaned up.
+ The _<mode>_ can be one of `verbatim`, `whitespace` and `strip`. The
+ `strip` mode is default. The `verbatim` mode does not change message at
+ all, `whitespace` removes just leading/trailing whitespace lines and
+ `strip` removes both whitespace and commentary.
---create-reflog::
+`--create-reflog`::
Create a reflog for the tag. To globally enable reflogs for tags, see
`core.logAllRefUpdates` in linkgit:git-config[1].
The negated form `--no-create-reflog` only overrides an earlier
`--create-reflog`, but currently does not negate the setting of
`core.logAllRefUpdates`.
---format=<format>::
+`--format=<format>`::
A string that interpolates `%(fieldname)` from a tag ref being shown
and the object it points at. The format is the same as
that of linkgit:git-for-each-ref[1]. When unspecified,
defaults to `%(refname:strip=2)`.
-<tagname>::
+_<tagname>_::
The name of the tag to create, delete, or describe.
The new tag name must pass all checks defined by
linkgit:git-check-ref-format[1]. Some of these checks
may restrict the characters allowed in a tag name.
-<commit>::
-<object>::
+_<commit>_::
+_<object>_::
The object that the new tag will refer to, usually a commit.
- Defaults to HEAD.
+ Defaults to `HEAD`.
CONFIGURATION
-------------
-By default, 'git tag' in sign-with-default mode (-s) will use your
+By default, `git tag` in sign-with-default mode (`-s`) will use your
committer identity (of the form `Your Name <your@email.address>`) to
find a key. If you want to use a different default key, you can specify
it in the repository configuration as follows:
@@ -252,7 +251,7 @@ On Re-tagging
What should you do when you tag a wrong commit and you would
want to re-tag?
-If you never pushed anything out, just re-tag it. Use "-f" to
+If you never pushed anything out, just re-tag it. Use `-f` to
replace the old one. And you're done.
But if you have pushed things out (or others could just read
@@ -268,12 +267,12 @@ the old tag. In that case you can do one of two things:
. The insane thing.
You really want to call the new version "X" too, 'even though'
- others have already seen the old one. So just use 'git tag -f'
+ others have already seen the old one. So just use `git tag -f`
again, as if you hadn't already published the old one.
However, Git does *not* (and it should not) change tags behind
users back. So if somebody already got the old tag, doing a
-'git pull' on your tree shouldn't just make them overwrite the old
+`git pull` on your tree shouldn't just make them overwrite the old
one.
If somebody got a release tag from you, you cannot just change
@@ -325,7 +324,7 @@ private anchor point tags from the other person.
Often, "please pull" messages on the mailing list just provide
two pieces of information: a repo URL and a branch name; this
-is designed to be easily cut&pasted at the end of a 'git fetch'
+is designed to be easily cut&pasted at the end of a `git fetch`
command line:
------------
@@ -403,6 +402,14 @@ FILES
user in an editor session will be available in this file, but
may be overwritten by the next invocation of `git tag`.
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.adoc[]
+
+:git-tag: 1
+include::config/tag.adoc[]
+
NOTES
-----
diff --git a/Documentation/git-whatchanged.adoc b/Documentation/git-whatchanged.adoc
index d21484026f..436e219b7d 100644
--- a/Documentation/git-whatchanged.adoc
+++ b/Documentation/git-whatchanged.adoc
@@ -15,7 +15,7 @@ WARNING
-------
`git whatchanged` has been deprecated and is scheduled for removal in
a future version of Git, as it is merely `git log` with different
-default; `whatchanged` is not even shorter to type than `log --raw`.
+defaults.
DESCRIPTION
-----------
@@ -24,7 +24,11 @@ Shows commit logs and diff output each commit introduces.
New users are encouraged to use linkgit:git-log[1] instead. The
`whatchanged` command is essentially the same as linkgit:git-log[1]
-but defaults to showing the raw format diff output and skipping merges.
+but defaults to showing the raw format diff output and skipping merges:
+
+----
+git log --raw --no-merges
+----
The command is primarily kept for historical reasons; fingers of
many people who learned Git long before `git log` was invented by
diff --git a/Documentation/git-worktree.adoc b/Documentation/git-worktree.adoc
index 389e669ac0..f272f79783 100644
--- a/Documentation/git-worktree.adoc
+++ b/Documentation/git-worktree.adoc
@@ -8,16 +8,16 @@ git-worktree - Manage multiple working trees
SYNOPSIS
--------
-[verse]
-'git worktree add' [-f] [--detach] [--checkout] [--lock [--reason <string>]]
- [--orphan] [(-b | -B) <new-branch>] <path> [<commit-ish>]
-'git worktree list' [-v | --porcelain [-z]]
-'git worktree lock' [--reason <string>] <worktree>
-'git worktree move' <worktree> <new-path>
-'git worktree prune' [-n] [-v] [--expire <expire>]
-'git worktree remove' [-f] <worktree>
-'git worktree repair' [<path>...]
-'git worktree unlock' <worktree>
+[synopsis]
+git worktree add [-f] [--detach] [--checkout] [--lock [--reason <string>]]
+ [--orphan] [(-b | -B) <new-branch>] <path> [<commit-ish>]
+git worktree list [-v | --porcelain [-z]]
+git worktree lock [--reason <string>] <worktree>
+git worktree move <worktree> <new-path>
+git worktree prune [-n] [-v] [--expire <expire>]
+git worktree remove [-f] <worktree>
+git worktree repair [<path>...]
+git worktree unlock <worktree>
DESCRIPTION
-----------
@@ -37,7 +37,7 @@ zero or more linked worktrees. When you are done with a linked worktree,
remove it with `git worktree remove`.
In its simplest form, `git worktree add <path>` automatically creates a
-new branch whose name is the final component of `<path>`, which is
+new branch whose name is the final component of _<path>_, which is
convenient if you plan to work on a new topic. For instance, `git
worktree add ../hotfix` creates new branch `hotfix` and checks it out at
path `../hotfix`. To instead work on an existing branch in a new worktree,
@@ -63,16 +63,16 @@ locked.
COMMANDS
--------
-add <path> [<commit-ish>]::
+`add <path> [<commit-ish>]`::
-Create a worktree at `<path>` and checkout `<commit-ish>` into it. The new worktree
+Create a worktree at _<path>_ and checkout _<commit-ish>_ into it. The new worktree
is linked to the current repository, sharing everything except per-worktree
-files such as `HEAD`, `index`, etc. As a convenience, `<commit-ish>` may
+files such as `HEAD`, `index`, etc. As a convenience, _<commit-ish>_ may
be a bare "`-`", which is synonymous with `@{-1}`.
+
-If `<commit-ish>` is a branch name (call it `<branch>`) and is not found,
+If _<commit-ish>_ is a branch name (call it _<branch>_) and is not found,
and neither `-b` nor `-B` nor `--detach` are used, but there does
-exist a tracking branch in exactly one remote (call it `<remote>`)
+exist a tracking branch in exactly one remote (call it _<remote>_)
with a matching name, treat as equivalent to:
+
------------
@@ -81,32 +81,32 @@ $ git worktree add --track -b <branch> <path> <remote>/<branch>
+
If the branch exists in multiple remotes and one of them is named by
the `checkout.defaultRemote` configuration variable, we'll use that
-one for the purposes of disambiguation, even if the `<branch>` isn't
+one for the purposes of disambiguation, even if the _<branch>_ isn't
unique across all remotes. Set it to
e.g. `checkout.defaultRemote=origin` to always checkout remote
-branches from there if `<branch>` is ambiguous but exists on the
+branches from there if _<branch>_ is ambiguous but exists on the
`origin` remote. See also `checkout.defaultRemote` in
linkgit:git-config[1].
+
-If `<commit-ish>` is omitted and neither `-b` nor `-B` nor `--detach` used,
+If _<commit-ish>_ is omitted and neither `-b` nor `-B` nor `--detach` used,
then, as a convenience, the new worktree is associated with a branch (call
-it `<branch>`) named after `$(basename <path>)`. If `<branch>` doesn't
+it _<branch>_) named after `$(basename <path>)`. If _<branch>_ doesn't
exist, a new branch based on `HEAD` is automatically created as if
-`-b <branch>` was given. If `<branch>` does exist, it will be checked out
+`-b <branch>` was given. If _<branch>_ does exist, it will be checked out
in the new worktree, if it's not checked out anywhere else, otherwise the
command will refuse to create the worktree (unless `--force` is used).
+
-If `<commit-ish>` is omitted, neither `--detach`, or `--orphan` is
+If _<commit-ish>_ is omitted, neither `--detach`, or `--orphan` is
used, and there are no valid local branches (or remote branches if
`--guess-remote` is specified) then, as a convenience, the new worktree is
-associated with a new unborn branch named `<branch>` (after
+associated with a new unborn branch named _<branch>_ (after
`$(basename <path>)` if neither `-b` or `-B` is used) as if `--orphan` was
passed to the command. In the event the repository has a remote and
`--guess-remote` is used, but no remote or local branches exist, then the
command fails with a warning reminding the user to fetch from their remote
first (or override by using `-f/--force`).
-list::
+`list`::
List details of each worktree. The main worktree is listed first,
followed by each of the linked worktrees. The output details include
@@ -115,32 +115,32 @@ branch currently checked out (or "detached HEAD" if none), "locked" if
the worktree is locked, "prunable" if the worktree can be pruned by the
`prune` command.
-lock::
+`lock`::
If a worktree is on a portable device or network share which is not always
mounted, lock it to prevent its administrative files from being pruned
automatically. This also prevents it from being moved or deleted.
Optionally, specify a reason for the lock with `--reason`.
-move::
+`move`::
Move a worktree to a new location. Note that the main worktree or linked
worktrees containing submodules cannot be moved with this command. (The
`git worktree repair` command, however, can reestablish the connection
with linked worktrees if you move the main worktree manually.)
-prune::
+`prune`::
Prune worktree information in `$GIT_DIR/worktrees`.
-remove::
+`remove`::
Remove a worktree. Only clean worktrees (no untracked files and no
modification in tracked files) can be removed. Unclean worktrees or ones
with submodules can be removed with `--force`. The main worktree cannot be
removed.
-repair [<path>...]::
+`repair [<path>...]`::
Repair worktree administrative files, if possible, if they have become
corrupted or outdated due to external factors.
@@ -154,72 +154,72 @@ Similarly, if the working tree for a linked worktree is moved without
using `git worktree move`, the main worktree (or bare repository) will be
unable to locate it. Running `repair` within the recently-moved worktree
will reestablish the connection. If multiple linked worktrees are moved,
-running `repair` from any worktree with each tree's new `<path>` as an
+running `repair` from any worktree with each tree's new _<path>_ as an
argument, will reestablish the connection to all the specified paths.
+
If both the main worktree and linked worktrees have been moved or copied manually,
-then running `repair` in the main worktree and specifying the new `<path>`
+then running `repair` in the main worktree and specifying the new _<path>_
of each linked worktree will reestablish all connections in both
directions.
-unlock::
+`unlock`::
Unlock a worktree, allowing it to be pruned, moved or deleted.
OPTIONS
-------
--f::
---force::
+`-f`::
+`--force`::
By default, `add` refuses to create a new worktree when
- `<commit-ish>` is a branch name and is already checked out by
- another worktree, or if `<path>` is already assigned to some
- worktree but is missing (for instance, if `<path>` was deleted
+ _<commit-ish>_ is a branch name and is already checked out by
+ another worktree, or if _<path>_ is already assigned to some
+ worktree but is missing (for instance, if _<path>_ was deleted
manually). This option overrides these safeguards. To add a missing but
locked worktree path, specify `--force` twice.
+
`move` refuses to move a locked worktree unless `--force` is specified
twice. If the destination is already assigned to some other worktree but is
-missing (for instance, if `<new-path>` was deleted manually), then `--force`
+missing (for instance, if _<new-path>_ was deleted manually), then `--force`
allows the move to proceed; use `--force` twice if the destination is locked.
+
`remove` refuses to remove an unclean worktree unless `--force` is used.
To remove a locked worktree, specify `--force` twice.
--b <new-branch>::
--B <new-branch>::
- With `add`, create a new branch named `<new-branch>` starting at
- `<commit-ish>`, and check out `<new-branch>` into the new worktree.
- If `<commit-ish>` is omitted, it defaults to `HEAD`.
+`-b <new-branch>`::
+`-B <new-branch>`::
+ With `add`, create a new branch named _<new-branch>_ starting at
+ _<commit-ish>_, and check out _<new-branch>_ into the new worktree.
+ If _<commit-ish>_ is omitted, it defaults to `HEAD`.
By default, `-b` refuses to create a new branch if it already
- exists. `-B` overrides this safeguard, resetting `<new-branch>` to
- `<commit-ish>`.
+ exists. `-B` overrides this safeguard, resetting _<new-branch>_ to
+ _<commit-ish>_.
--d::
---detach::
+`-d`::
+`--detach`::
With `add`, detach `HEAD` in the new worktree. See "DETACHED HEAD"
in linkgit:git-checkout[1].
---checkout::
---no-checkout::
- By default, `add` checks out `<commit-ish>`, however, `--no-checkout` can
+`--checkout`::
+`--no-checkout`::
+ By default, `add` checks out _<commit-ish>_, however, `--no-checkout` can
be used to suppress checkout in order to make customizations,
such as configuring sparse-checkout. See "Sparse checkout"
in linkgit:git-read-tree[1].
---guess-remote::
---no-guess-remote::
- With `worktree add <path>`, without `<commit-ish>`, instead
+`--guess-remote`::
+`--no-guess-remote`::
+ With `worktree add <path>`, without _<commit-ish>_, instead
of creating a new branch from `HEAD`, if there exists a tracking
- branch in exactly one remote matching the basename of `<path>`,
+ branch in exactly one remote matching the basename of _<path>_,
base the new branch on the remote-tracking branch, and mark
the remote-tracking branch as "upstream" from the new branch.
+
This can also be set up as the default behaviour by using the
`worktree.guessRemote` config option.
---relative-paths::
---no-relative-paths::
+`--relative-paths`::
+`--no-relative-paths`::
Link worktrees using relative paths or absolute paths (default).
Overrides the `worktree.useRelativePaths` config option, see
linkgit:git-config[1].
@@ -227,60 +227,60 @@ This can also be set up as the default behaviour by using the
With `repair`, the linking files will be updated if there's an absolute/relative
mismatch, even if the links are correct.
---track::
---no-track::
- When creating a new branch, if `<commit-ish>` is a branch,
+`--track`::
+`--no-track`::
+ When creating a new branch, if _<commit-ish>_ is a branch,
mark it as "upstream" from the new branch. This is the
- default if `<commit-ish>` is a remote-tracking branch. See
+ default if _<commit-ish>_ is a remote-tracking branch. See
`--track` in linkgit:git-branch[1] for details.
---lock::
+`--lock`::
Keep the worktree locked after creation. This is the
equivalent of `git worktree lock` after `git worktree add`,
but without a race condition.
--n::
---dry-run::
+`-n`::
+`--dry-run`::
With `prune`, do not remove anything; just report what it would
remove.
---orphan::
+`--orphan`::
With `add`, make the new worktree and index empty, associating
- the worktree with a new unborn branch named `<new-branch>`.
+ the worktree with a new unborn branch named _<new-branch>_.
---porcelain::
+`--porcelain`::
With `list`, output in an easy-to-parse format for scripts.
This format will remain stable across Git versions and regardless of user
configuration. It is recommended to combine this with `-z`.
See below for details.
--z::
- Terminate each line with a NUL rather than a newline when
+`-z`::
+ Terminate each line with a _NUL_ rather than a newline when
`--porcelain` is specified with `list`. This makes it possible
to parse the output when a worktree path contains a newline
character.
--q::
---quiet::
+`-q`::
+`--quiet`::
With `add`, suppress feedback messages.
--v::
---verbose::
+`-v`::
+`--verbose`::
With `prune`, report all removals.
+
With `list`, output additional information about worktrees (see below).
---expire <time>::
- With `prune`, only expire unused worktrees older than `<time>`.
+`--expire <time>`::
+ With `prune`, only expire unused worktrees older than _<time>_.
+
With `list`, annotate missing worktrees as prunable if they are older than
-`<time>`.
+_<time>_.
---reason <string>::
+`--reason <string>`::
With `lock` or with `add --lock`, an explanation why the worktree
is locked.
-<worktree>::
+_<worktree>_::
Worktrees can be identified by path, either relative or absolute.
+
If the last path components in the worktree's path is unique among
@@ -522,6 +522,13 @@ $ popd
$ git worktree remove ../temp
------------
+CONFIGURATION
+-------------
+
+include::includes/cmd-config-section-all.adoc[]
+
+include::config/worktree.adoc[]
+
BUGS
----
Multiple checkout in general is still experimental, and the support
diff --git a/Documentation/git.adoc b/Documentation/git.adoc
index 03e9e69d25..ce099e78b8 100644
--- a/Documentation/git.adoc
+++ b/Documentation/git.adoc
@@ -219,7 +219,8 @@ If you just want to run git as if it was started in `<path>` then use
List commands by group. This is an internal/experimental
option and may change or be removed in the future. Supported
groups are: builtins, parseopt (builtin commands that use
- parse-options), main (all commands in libexec directory),
+ parse-options), deprecated (deprecated builtins),
+ main (all commands in libexec directory),
others (all other commands in `$PATH` that have git- prefix),
list-<category> (see categories in command-list.txt),
nohelpers (exclude helper commands), alias and config
diff --git a/Documentation/gitcli.adoc b/Documentation/gitcli.adoc
index 1ea681b59d..ef2a0a399d 100644
--- a/Documentation/gitcli.adoc
+++ b/Documentation/gitcli.adoc
@@ -216,6 +216,20 @@ $ git describe --abbrev=10 HEAD # correct
$ git describe --abbrev 10 HEAD # NOT WHAT YOU MEANT
----------------------------
+
+Magic filename options
+~~~~~~~~~~~~~~~~~~~~~~
+Options that take a filename allow a prefix `:(optional)`. For example:
+
+----------------------------
+git commit -F :(optional)COMMIT_EDITMSG
+# if COMMIT_EDITMSG does not exist, equivalent to
+git commit
+----------------------------
+
+Like with configuration values, if the named file is missing Git behaves as if
+the option was not given at all. See "Values" in linkgit:git-config[1].
+
NOTES ON FREQUENTLY CONFUSED OPTIONS
------------------------------------
diff --git a/Documentation/gitcredentials.adoc b/Documentation/gitcredentials.adoc
index 3337bb475d..60c2cc4ade 100644
--- a/Documentation/gitcredentials.adoc
+++ b/Documentation/gitcredentials.adoc
@@ -150,9 +150,8 @@ pattern in the config file. For example, if you have this in your config file:
username = foo
--------------------------------------
-then we will match: both protocols are the same, both hosts are the same, and
-the "pattern" URL does not care about the path component at all. However, this
-context would not match:
+then we will match: both protocols are the same and both hosts are the same.
+However, this context would not match:
--------------------------------------
[credential "https://kernel.org"]
@@ -166,11 +165,11 @@ match: Git compares the protocols exactly. However, you may use wildcards in
the domain name and other pattern matching techniques as with the `http.<URL>.*`
options.
-If the "pattern" URL does include a path component, then this too must match
-exactly: the context `https://example.com/bar/baz.git` will match a config
-entry for `https://example.com/bar/baz.git` (in addition to matching the config
-entry for `https://example.com`) but will not match a config entry for
-`https://example.com/bar`.
+If the "pattern" URL does include a path component, then this must match
+as a prefix path: the context `https://example.com/bar` will match a config
+entry for `https://example.com/bar/baz.git` but will not match a config entry for
+`https://example.com/other/repo.git` or `https://example.com/barry/repo.git`
+(even though it is a string prefix).
CONFIGURATION OPTIONS
diff --git a/Documentation/howto/meson.build b/Documentation/howto/meson.build
index 81000028c0..ece20244af 100644
--- a/Documentation/howto/meson.build
+++ b/Documentation/howto/meson.build
@@ -29,7 +29,7 @@ howto_index = custom_target(
output: 'howto-index.adoc',
)
-custom_target(
+doc_targets += custom_target(
command: asciidoc_html_options,
input: howto_index,
output: 'howto-index.html',
@@ -51,7 +51,7 @@ foreach howto : howto_sources
capture: true,
)
- custom_target(
+ doc_targets += custom_target(
command: asciidoc_html_options,
input: howto_stripped,
output: fs.stem(howto_stripped.full_path()) + '.html',
diff --git a/Documentation/meson.build b/Documentation/meson.build
index e34965c5b0..44f94cdb7b 100644
--- a/Documentation/meson.build
+++ b/Documentation/meson.build
@@ -377,7 +377,7 @@ foreach manpage, category : manpages
output: fs.stem(manpage) + '.xml',
)
- custom_target(
+ doc_targets += custom_target(
command: [
xmlto,
'-m', '@INPUT0@',
@@ -400,7 +400,7 @@ foreach manpage, category : manpages
endif
if get_option('docs').contains('html')
- custom_target(
+ doc_targets += custom_target(
command: asciidoc_common_options + [
'--backend=' + asciidoc_html,
'--doctype=manpage',
@@ -452,7 +452,7 @@ if get_option('docs').contains('html')
depends: documentation_deps,
)
- custom_target(
+ doc_targets += custom_target(
command: [
xsltproc,
'--xinclude',
@@ -481,7 +481,7 @@ if get_option('docs').contains('html')
]
foreach article : articles
- custom_target(
+ doc_targets += custom_target(
command: asciidoc_common_options + [
'--backend=' + asciidoc_html,
'--out-file=@OUTPUT@',
diff --git a/Documentation/pack-refs-options.adoc b/Documentation/pack-refs-options.adoc
new file mode 100644
index 0000000000..0b11282941
--- /dev/null
+++ b/Documentation/pack-refs-options.adoc
@@ -0,0 +1,52 @@
+--all::
+
+The command by default packs all tags and refs that are already
+packed, and leaves other refs
+alone. This is because branches are expected to be actively
+developed and packing their tips does not help performance.
+This option causes all refs to be packed as well, with the exception
+of hidden refs, broken refs, and symbolic refs. Useful for a repository
+with many branches of historical interests.
+
+--no-prune::
+
+The command usually removes loose refs under `$GIT_DIR/refs`
+hierarchy after packing them. This option tells it not to.
+
+--auto::
+
+Pack refs as needed depending on the current state of the ref database. The
+behavior depends on the ref format used by the repository and may change in the
+future.
++
+ - "files": Loose references are packed into the `packed-refs` file
+ based on the ratio of loose references to the size of the
+ `packed-refs` file. The bigger the `packed-refs` file, the more loose
+ references need to exist before we repack.
++
+ - "reftable": Tables are compacted such that they form a geometric
+ sequence. For two tables N and N+1, where N+1 is newer, this
+ maintains the property that N is at least twice as big as N+1. Only
+ tables that violate this property are compacted.
+
+--include <pattern>::
+
+Pack refs based on a `glob(7)` pattern. Repetitions of this option
+accumulate inclusion patterns. If a ref is both included in `--include` and
+`--exclude`, `--exclude` takes precedence. Using `--include` will preclude all
+tags from being included by default. Symbolic refs and broken refs will never
+be packed. When used with `--all`, it will be a noop. Use `--no-include` to clear
+and reset the list of patterns.
+
+--exclude <pattern>::
+
+Do not pack refs matching the given `glob(7)` pattern. Repetitions of this option
+accumulate exclusion patterns. Use `--no-exclude` to clear and reset the list of
+patterns. If a ref is already packed, including it with `--exclude` will not
+unpack it.
++
+When used with `--all`, pack only loose refs which do not match any of
+the provided `--exclude` patterns.
++
+When used with `--include`, refs provided to `--include`, minus refs that are
+provided to `--exclude` will be packed.
diff --git a/Documentation/pretty-formats.adoc b/Documentation/pretty-formats.adoc
index 618ddc4a0c..2121e8e1df 100644
--- a/Documentation/pretty-formats.adoc
+++ b/Documentation/pretty-formats.adoc
@@ -232,7 +232,7 @@ ref names with custom decorations. The `decorate` string may be followed by a
colon and zero or more comma-separated options. Option values may contain
literal formatting codes. These must be used for commas (`%x2C`) and closing
parentheses (`%x29`), due to their role in the option syntax.
-+
+
** `prefix=<value>`: Shown before the list of ref names. Defaults to "{nbsp}++(++".
** `suffix=<value>`: Shown after the list of ref names. Defaults to "+)+".
** `separator=<value>`: Shown between ref names. Defaults to "+,+{nbsp}".
@@ -241,10 +241,12 @@ parentheses (`%x29`), due to their role in the option syntax.
** `tag=<value>`: Shown before tag names. Defaults to "`tag:`{nbsp}".
+
+--
For example, to produce decorations with no wrapping
or tag annotations, and spaces as separators:
-+
+
++%(decorate:prefix=,suffix=,tag=,separator= )++
+--
++%(describe++`[:<option>,...]`++)++::
human-readable name, like linkgit:git-describe[1]; empty string for
diff --git a/Documentation/pretty-options.adoc b/Documentation/pretty-options.adoc
index 8aac51dbe7..658e462b25 100644
--- a/Documentation/pretty-options.adoc
+++ b/Documentation/pretty-options.adoc
@@ -61,15 +61,16 @@ and `fuller`).
ifndef::git-rev-list[]
`--notes[=<ref>]`::
Show the notes (see linkgit:git-notes[1]) that annotate the
- commit, when showing the commit log message. This is the default
+ commit, when showing the commit log message.
ifndef::with-breaking-changes[]
- for `git log`, `git show` and `git whatchanged` commands when
+This is the default for `git log`, `git show` and `git whatchanged`
+commands when there is no `--pretty`, `--format`, or `--oneline` option given
+on the command line.
endif::with-breaking-changes[]
ifdef::with-breaking-changes[]
- for `git log` and `git show` commands when
+This is the default for `git log` and `git show` commands when there is no
+`--pretty`, `--format`, or `--oneline` option given on the command line.
endif::with-breaking-changes[]
- there is no `--pretty`, `--format`, or `--oneline` option given
- on the command line.
+
By default, the notes shown are from the notes refs listed in the
`core.notesRef` and `notes.displayRef` variables (or corresponding
diff --git a/Documentation/technical/meson.build b/Documentation/technical/meson.build
index a13aafcfbb..858af811a7 100644
--- a/Documentation/technical/meson.build
+++ b/Documentation/technical/meson.build
@@ -46,7 +46,7 @@ api_index = custom_target(
output: 'api-index.adoc',
)
-custom_target(
+doc_targets += custom_target(
command: asciidoc_html_options,
input: api_index,
output: 'api-index.html',
@@ -56,7 +56,7 @@ custom_target(
)
foreach article : api_docs + articles
- custom_target(
+ doc_targets += custom_target(
command: asciidoc_html_options,
input: article,
output: fs.stem(article) + '.html',
diff --git a/Documentation/urls-remotes.adoc b/Documentation/urls-remotes.adoc
index 9b10151198..57b1646d3e 100644
--- a/Documentation/urls-remotes.adoc
+++ b/Documentation/urls-remotes.adoc
@@ -92,5 +92,47 @@ git push uses:
------------
+[[UPSTREAM-BRANCHES]]
+UPSTREAM BRANCHES
+-----------------
+
+Branches in Git can optionally have an upstream remote branch.
+Git defaults to using the upstream branch for remote operations, for example:
+
+* It's the default for `git pull` or `git fetch` with no arguments.
+* It's the default for `git push` with no arguments, with some exceptions.
+ For example, you can use the `branch.<name>.pushRemote` option to push
+ to a different remote than you pull from, and by default with
+ `push.default=simple` the upstream branch you configure must have
+ the same name.
+* Various commands, including `git checkout` and `git status`, will
+ show you how many commits have been added to your current branch and
+ the upstream since you forked from it, for example "Your branch and
+ 'origin/main' have diverged, and have 2 and 3 different commits each
+ respectively".
+
+The upstream is stored in `.git/config`, in the "remote" and "merge"
+fields. For example, if `main`'s upstream is `origin/main`:
+------------
+[branch "main"]
+ remote = origin
+ merge = refs/heads/main
+------------
+You can set an upstream branch explicitly with
+`git push --set-upstream <remote> <branch>`
+but Git will often automatically set the upstream for you, for example:
+
+* When you clone a repository, Git will automatically set the upstream
+ for the default branch.
+* If you have the `push.autoSetupRemote` configuration option set,
+ `git push` will automatically set the upstream the first time you push
+ a branch.
+* Checking out a remote-tracking branch with `git checkout <branch>`
+ will automatically create a local branch with that name and set
+ the upstream to the remote branch.
+
+[NOTE]
+Upstream branches are sometimes referred to as "tracking information",
+as in "set the branch's tracking information".
diff --git a/Makefile b/Makefile
index a6a14b038b..1919d35bf3 100644
--- a/Makefile
+++ b/Makefile
@@ -483,6 +483,14 @@ include shared.mak
# Define LIBPCREDIR=/foo/bar if your PCRE header and library files are
# in /foo/bar/include and /foo/bar/lib directories.
#
+# == Optional Rust support ==
+#
+# Define WITH_RUST if you want to include features and subsystems written in
+# Rust into Git. For now, Rust is still an optional feature of the build
+# process. With Git 3.0 though, Rust will always be enabled.
+#
+# Building Rust code requires Cargo.
+#
# == SHA-1 and SHA-256 defines ==
#
# === SHA-1 backend ===
@@ -683,6 +691,7 @@ OBJECTS =
OTHER_PROGRAMS =
PROGRAM_OBJS =
PROGRAMS =
+RUST_SOURCES =
EXCLUDED_PROGRAMS =
SCRIPT_PERL =
SCRIPT_PYTHON =
@@ -883,7 +892,9 @@ BUILT_INS += git-stage$X
BUILT_INS += git-status$X
BUILT_INS += git-switch$X
BUILT_INS += git-version$X
+ifndef WITH_BREAKING_CHANGES
BUILT_INS += git-whatchanged$X
+endif
# what 'all' will build but not install in gitexecdir
OTHER_PROGRAMS += git$X
@@ -916,6 +927,107 @@ export PYTHON_PATH
TEST_SHELL_PATH = $(SHELL_PATH)
LIB_FILE = libgit.a
+ifdef DEBUG
+RUST_LIB = target/debug/libgitcore.a
+else
+RUST_LIB = target/release/libgitcore.a
+endif
+
+GITLIBS = common-main.o $(LIB_FILE)
+EXTLIBS =
+
+GIT_USER_AGENT = git/$(GIT_VERSION)
+
+ifeq ($(wildcard sha1collisiondetection/lib/sha1.h),sha1collisiondetection/lib/sha1.h)
+DC_SHA1_SUBMODULE = auto
+endif
+
+# Set CFLAGS, LDFLAGS and other *FLAGS variables. These might be
+# tweaked by config.* below as well as the command-line, both of
+# which'll override these defaults.
+# Older versions of GCC may require adding "-std=gnu99" at the end.
+CFLAGS = -g -O2 -Wall
+LDFLAGS =
+CC_LD_DYNPATH = -Wl,-rpath,
+BASIC_CFLAGS = -I.
+BASIC_LDFLAGS =
+
+# library flags
+ARFLAGS = rcs
+PTHREAD_CFLAGS =
+
+# Rust flags
+CARGO_ARGS =
+ifndef V
+CARGO_ARGS += --quiet
+endif
+ifndef DEBUG
+CARGO_ARGS += --release
+endif
+
+# For the 'sparse' target
+SPARSE_FLAGS ?= -std=gnu99 -D__STDC_NO_VLA__
+SP_EXTRA_FLAGS =
+
+# For informing GIT-BUILD-OPTIONS of the SANITIZE=leak,address targets
+SANITIZE_LEAK =
+SANITIZE_ADDRESS =
+
+# For the 'coccicheck' target
+SPATCH_INCLUDE_FLAGS = --all-includes
+SPATCH_FLAGS =
+SPATCH_TEST_FLAGS =
+
+# If *.o files are present, have "coccicheck" depend on them, with
+# COMPUTE_HEADER_DEPENDENCIES this will speed up the common-case of
+# only needing to re-generate coccicheck results for the users of a
+# given API if it's changed, and not all files in the project. If
+# COMPUTE_HEADER_DEPENDENCIES=no this will be unset too.
+SPATCH_USE_O_DEPENDENCIES = YesPlease
+
+# Set SPATCH_CONCAT_COCCI to concatenate the contrib/cocci/*.cocci
+# files into a single contrib/cocci/ALL.cocci before running
+# "coccicheck".
+#
+# Pros:
+#
+# - Speeds up a one-shot run of "make coccicheck", as we won't have to
+# parse *.[ch] files N times for the N *.cocci rules
+#
+# Cons:
+#
+# - Will make incremental development of *.cocci slower, as
+# e.g. changing strbuf.cocci will re-run all *.cocci.
+#
+# - Makes error and performance analysis harder, as rules will be
+# applied from a monolithic ALL.cocci, rather than
+# e.g. strbuf.cocci. To work around this either undefine this, or
+# generate a specific patch, e.g. this will always use strbuf.cocci,
+# not ALL.cocci:
+#
+# make contrib/coccinelle/strbuf.cocci.patch
+SPATCH_CONCAT_COCCI = YesPlease
+
+# Rebuild 'coccicheck' if $(SPATCH), its flags etc. change
+TRACK_SPATCH_DEFINES =
+TRACK_SPATCH_DEFINES += $(SPATCH)
+TRACK_SPATCH_DEFINES += $(SPATCH_INCLUDE_FLAGS)
+TRACK_SPATCH_DEFINES += $(SPATCH_FLAGS)
+TRACK_SPATCH_DEFINES += $(SPATCH_TEST_FLAGS)
+GIT-SPATCH-DEFINES: FORCE
+ @FLAGS='$(TRACK_SPATCH_DEFINES)'; \
+ if test x"$$FLAGS" != x"`cat GIT-SPATCH-DEFINES 2>/dev/null`" ; then \
+ echo >&2 " * new spatch flags"; \
+ echo "$$FLAGS" >GIT-SPATCH-DEFINES; \
+ fi
+
+include config.mak.uname
+-include config.mak.autogen
+-include config.mak
+
+ifdef DEVELOPER
+include config.mak.dev
+endif
GENERATED_H += command-list.h
GENERATED_H += config-list.h
@@ -972,7 +1084,6 @@ LIB_OBJS += blame.o
LIB_OBJS += blob.o
LIB_OBJS += bloom.o
LIB_OBJS += branch.o
-LIB_OBJS += bulk-checkin.o
LIB_OBJS += bundle-uri.o
LIB_OBJS += bundle.o
LIB_OBJS += cache-tree.o
@@ -1092,6 +1203,7 @@ LIB_OBJS += pack-bitmap.o
LIB_OBJS += pack-check.o
LIB_OBJS += pack-mtimes.o
LIB_OBJS += pack-objects.o
+LIB_OBJS += pack-refs.o
LIB_OBJS += pack-revindex.o
LIB_OBJS += pack-write.o
LIB_OBJS += packfile.o
@@ -1134,9 +1246,10 @@ LIB_OBJS += refs/packed-backend.o
LIB_OBJS += refs/ref-cache.o
LIB_OBJS += refspec.o
LIB_OBJS += reftable/basics.o
-LIB_OBJS += reftable/error.o
LIB_OBJS += reftable/block.o
LIB_OBJS += reftable/blocksource.o
+LIB_OBJS += reftable/error.o
+LIB_OBJS += reftable/fsck.o
LIB_OBJS += reftable/iter.o
LIB_OBJS += reftable/merged.o
LIB_OBJS += reftable/pq.o
@@ -1207,7 +1320,9 @@ LIB_OBJS += urlmatch.o
LIB_OBJS += usage.o
LIB_OBJS += userdiff.o
LIB_OBJS += utf8.o
+ifndef WITH_RUST
LIB_OBJS += varint.o
+endif
LIB_OBJS += version.o
LIB_OBJS += versioncmp.o
LIB_OBJS += walker.o
@@ -1406,92 +1521,8 @@ CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/unit-test.o
UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/test-lib.o
-GITLIBS = common-main.o $(LIB_FILE)
-EXTLIBS =
-
-GIT_USER_AGENT = git/$(GIT_VERSION)
-
-ifeq ($(wildcard sha1collisiondetection/lib/sha1.h),sha1collisiondetection/lib/sha1.h)
-DC_SHA1_SUBMODULE = auto
-endif
-
-# Set CFLAGS, LDFLAGS and other *FLAGS variables. These might be
-# tweaked by config.* below as well as the command-line, both of
-# which'll override these defaults.
-# Older versions of GCC may require adding "-std=gnu99" at the end.
-CFLAGS = -g -O2 -Wall
-LDFLAGS =
-CC_LD_DYNPATH = -Wl,-rpath,
-BASIC_CFLAGS = -I.
-BASIC_LDFLAGS =
-
-# library flags
-ARFLAGS = rcs
-PTHREAD_CFLAGS =
-
-# For the 'sparse' target
-SPARSE_FLAGS ?= -std=gnu99 -D__STDC_NO_VLA__
-SP_EXTRA_FLAGS =
-
-# For informing GIT-BUILD-OPTIONS of the SANITIZE=leak,address targets
-SANITIZE_LEAK =
-SANITIZE_ADDRESS =
-
-# For the 'coccicheck' target
-SPATCH_INCLUDE_FLAGS = --all-includes
-SPATCH_FLAGS =
-SPATCH_TEST_FLAGS =
-
-# If *.o files are present, have "coccicheck" depend on them, with
-# COMPUTE_HEADER_DEPENDENCIES this will speed up the common-case of
-# only needing to re-generate coccicheck results for the users of a
-# given API if it's changed, and not all files in the project. If
-# COMPUTE_HEADER_DEPENDENCIES=no this will be unset too.
-SPATCH_USE_O_DEPENDENCIES = YesPlease
-
-# Set SPATCH_CONCAT_COCCI to concatenate the contrib/cocci/*.cocci
-# files into a single contrib/cocci/ALL.cocci before running
-# "coccicheck".
-#
-# Pros:
-#
-# - Speeds up a one-shot run of "make coccicheck", as we won't have to
-# parse *.[ch] files N times for the N *.cocci rules
-#
-# Cons:
-#
-# - Will make incremental development of *.cocci slower, as
-# e.g. changing strbuf.cocci will re-run all *.cocci.
-#
-# - Makes error and performance analysis harder, as rules will be
-# applied from a monolithic ALL.cocci, rather than
-# e.g. strbuf.cocci. To work around this either undefine this, or
-# generate a specific patch, e.g. this will always use strbuf.cocci,
-# not ALL.cocci:
-#
-# make contrib/coccinelle/strbuf.cocci.patch
-SPATCH_CONCAT_COCCI = YesPlease
-
-# Rebuild 'coccicheck' if $(SPATCH), its flags etc. change
-TRACK_SPATCH_DEFINES =
-TRACK_SPATCH_DEFINES += $(SPATCH)
-TRACK_SPATCH_DEFINES += $(SPATCH_INCLUDE_FLAGS)
-TRACK_SPATCH_DEFINES += $(SPATCH_FLAGS)
-TRACK_SPATCH_DEFINES += $(SPATCH_TEST_FLAGS)
-GIT-SPATCH-DEFINES: FORCE
- @FLAGS='$(TRACK_SPATCH_DEFINES)'; \
- if test x"$$FLAGS" != x"`cat GIT-SPATCH-DEFINES 2>/dev/null`" ; then \
- echo >&2 " * new spatch flags"; \
- echo "$$FLAGS" >GIT-SPATCH-DEFINES; \
- fi
-
-include config.mak.uname
--include config.mak.autogen
--include config.mak
-
-ifdef DEVELOPER
-include config.mak.dev
-endif
+RUST_SOURCES += src/lib.rs
+RUST_SOURCES += src/varint.rs
GIT-VERSION-FILE: FORCE
@OLD=$$(cat $@ 2>/dev/null || :) && \
@@ -1522,6 +1553,11 @@ endif
ALL_CFLAGS = $(DEVELOPER_CFLAGS) $(CPPFLAGS) $(CFLAGS) $(CFLAGS_APPEND)
ALL_LDFLAGS = $(LDFLAGS) $(LDFLAGS_APPEND)
+ifdef WITH_RUST
+BASIC_CFLAGS += -DWITH_RUST
+GITLIBS += $(RUST_LIB)
+endif
+
ifdef SANITIZE
SANITIZERS := $(foreach flag,$(subst $(comma),$(space),$(SANITIZE)),$(flag))
BASIC_CFLAGS += -fsanitize=$(SANITIZE) -fno-sanitize-recover=$(SANITIZE)
@@ -2911,6 +2947,12 @@ scalar$X: scalar.o GIT-LDFLAGS $(GITLIBS)
$(LIB_FILE): $(LIB_OBJS)
$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
+$(RUST_LIB): Cargo.toml $(RUST_SOURCES)
+ $(QUIET_CARGO)cargo build $(CARGO_ARGS)
+
+.PHONY: rust
+rust: $(RUST_LIB)
+
export DEFAULT_EDITOR DEFAULT_PAGER
Documentation/GIT-EXCLUDED-PROGRAMS: FORCE
@@ -3755,6 +3797,7 @@ clean: profile-clean coverage-clean cocciclean
$(RM) $(FUZZ_PROGRAMS)
$(RM) $(SP_OBJ)
$(RM) $(HCC)
+ $(RM) -r Cargo.lock target/
$(RM) version-def.h
$(RM) -r $(dep_dirs) $(compdb_dir) compile_commands.json
$(RM) $(test_bindir_programs)
diff --git a/add-interactive.c b/add-interactive.c
index 6ffe64c38d..68fc09547d 100644
--- a/add-interactive.c
+++ b/add-interactive.c
@@ -244,7 +244,8 @@ static void find_unique_prefixes(struct prefix_item_list *list)
static ssize_t find_unique(const char *string, struct prefix_item_list *list)
{
- int index = string_list_find_insert_index(&list->sorted, string, 1);
+ bool exact_match;
+ size_t index = string_list_find_insert_index(&list->sorted, string, &exact_match);
struct string_list_item *item;
if (list->items.nr != list->sorted.nr)
@@ -252,8 +253,8 @@ static ssize_t find_unique(const char *string, struct prefix_item_list *list)
" vs %"PRIuMAX")",
(uintmax_t)list->items.nr, (uintmax_t)list->sorted.nr);
- if (index < 0)
- item = list->sorted.items[-1 - index].util;
+ if (exact_match)
+ item = list->sorted.items[index].util;
else if (index > 0 &&
starts_with(list->sorted.items[index - 1].string, string))
return -1;
diff --git a/add-patch.c b/add-patch.c
index b0389c5d5b..e40e2235a1 100644
--- a/add-patch.c
+++ b/add-patch.c
@@ -956,6 +956,7 @@ static int split_hunk(struct add_p_state *s, struct file_diff *file_diff,
* sizeof(*hunk));
hunk = file_diff->hunk + hunk_index;
hunk->splittable_into = 1;
+ hunk->use = UNDECIDED_HUNK;
memset(hunk + 1, 0, (splittable_into - 1) * sizeof(*hunk));
header = &hunk->header;
@@ -1057,7 +1058,7 @@ next_hunk_line:
hunk++;
hunk->splittable_into = 1;
- hunk->use = hunk[-1].use;
+ hunk->use = UNDECIDED_HUNK;
header = &hunk->header;
header->old_count = header->new_count = context_line_count;
@@ -1184,19 +1185,29 @@ static ssize_t recount_edited_hunk(struct add_p_state *s, struct hunk *hunk,
{
struct hunk_header *header = &hunk->header;
size_t i;
+ char ch, marker = ' ';
+ hunk->splittable_into = 0;
header->old_count = header->new_count = 0;
for (i = hunk->start; i < hunk->end; ) {
- switch(normalize_marker(&s->plain.buf[i])) {
+ ch = normalize_marker(&s->plain.buf[i]);
+ switch (ch) {
case '-':
header->old_count++;
+ if (marker == ' ')
+ hunk->splittable_into++;
+ marker = ch;
break;
case '+':
header->new_count++;
+ if (marker == ' ')
+ hunk->splittable_into++;
+ marker = ch;
break;
case ' ':
header->old_count++;
header->new_count++;
+ marker = ch;
break;
}
diff --git a/blame.c b/blame.c
index f1c0670144..cb0b083423 100644
--- a/blame.c
+++ b/blame.c
@@ -2909,9 +2909,6 @@ void setup_blame_bloom_data(struct blame_scoreboard *sb)
struct blame_bloom_data *bd;
struct bloom_filter_settings *bs;
- if (!sb->repo->objects->commit_graph)
- return;
-
bs = get_bloom_filter_settings(sb->repo);
if (!bs)
return;
diff --git a/bloom.c b/bloom.c
index b86015f6d1..2d7b951e5b 100644
--- a/bloom.c
+++ b/bloom.c
@@ -452,10 +452,12 @@ struct bloom_filter *get_or_compute_bloom_filter(struct repository *r,
filter = bloom_filter_slab_at(&bloom_filters, c);
if (!filter->data) {
+ struct commit_graph *g;
uint32_t graph_pos;
- if (repo_find_commit_pos_in_graph(r, c, &graph_pos))
- load_bloom_filter_from_graph(r->objects->commit_graph,
- filter, graph_pos);
+
+ g = repo_find_commit_pos_in_graph(r, c, &graph_pos);
+ if (g)
+ load_bloom_filter_from_graph(g, filter, graph_pos);
}
if (filter->data && filter->len) {
diff --git a/builtin/add.c b/builtin/add.c
index 4cd3d183f9..32709794b3 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -14,13 +14,14 @@
#include "gettext.h"
#include "pathspec.h"
#include "run-command.h"
+#include "object-file.h"
+#include "odb.h"
#include "parse-options.h"
#include "path.h"
#include "preload-index.h"
#include "diff.h"
#include "read-cache.h"
#include "revision.h"
-#include "bulk-checkin.h"
#include "strvec.h"
#include "submodule.h"
#include "add-interactive.h"
@@ -575,7 +576,7 @@ int cmd_add(int argc,
string_list_clear(&only_match_skip_worktree, 0);
}
- transaction = begin_odb_transaction(repo->objects);
+ transaction = odb_transaction_begin(repo->objects);
ps_matched = xcalloc(pathspec.nr, 1);
if (add_renormalize)
@@ -594,7 +595,7 @@ int cmd_add(int argc,
if (chmod_arg && pathspec.nr)
exit_status |= chmod_pathspec(repo, &pathspec, chmod_arg[0], show_only);
- end_odb_transaction(transaction);
+ odb_transaction_commit(transaction);
finish:
if (write_locked_index(repo->index, &lock_file,
diff --git a/builtin/backfill.c b/builtin/backfill.c
index 80056abe47..e80fc1b694 100644
--- a/builtin/backfill.c
+++ b/builtin/backfill.c
@@ -53,7 +53,7 @@ static void download_batch(struct backfill_context *ctx)
* We likely have a new packfile. Add it to the packed list to
* avoid possible duplicate downloads of the same objects.
*/
- reprepare_packed_git(ctx->repo);
+ odb_reprepare(ctx->repo->objects);
}
static int fill_missing_blobs(const char *path UNUSED,
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index fce0b06451..ee6715fa52 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -852,9 +852,10 @@ static void batch_each_object(struct batch_options *opt,
if (bitmap && !for_each_bitmapped_object(bitmap, &opt->objects_filter,
batch_one_object_bitmapped, &payload)) {
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *pack;
- for (pack = get_all_packs(the_repository); pack; pack = pack->next) {
+ for (pack = packfile_store_get_all_packs(packs); pack; pack = pack->next) {
if (bitmap_index_contains_pack(bitmap, pack) ||
open_pack_index(pack))
continue;
diff --git a/builtin/config.c b/builtin/config.c
index 2348a99dd4..75852bd79d 100644
--- a/builtin/config.c
+++ b/builtin/config.c
@@ -547,24 +547,31 @@ static int git_get_color_config(const char *var, const char *value,
return 0;
}
-static void get_color(const struct config_location_options *opts,
+static int get_color(const struct config_location_options *opts,
const char *var, const char *def_color)
{
struct get_color_config_data data = {
.get_color_slot = var,
.parsed_color[0] = '\0',
};
+ int ret;
config_with_options(git_get_color_config, &data,
&opts->source, the_repository,
&opts->options);
if (!data.get_color_found && def_color) {
- if (color_parse(def_color, data.parsed_color) < 0)
- die(_("unable to parse default color value"));
+ if (color_parse(def_color, data.parsed_color) < 0) {
+ ret = error(_("unable to parse default color value"));
+ goto out;
+ }
}
+ ret = 0;
+
+out:
fputs(data.parsed_color, stdout);
+ return ret;
}
struct get_colorbool_config_data {
@@ -913,10 +920,13 @@ static int cmd_config_get(int argc, const char **argv, const char *prefix,
location_options_init(&location_opts, prefix);
display_options_init(&display_opts);
- setup_auto_pager("config", 1);
+ if (display_opts.type != TYPE_COLOR)
+ setup_auto_pager("config", 1);
if (url)
ret = get_urlmatch(&location_opts, &display_opts, argv[0], url);
+ else if (display_opts.type == TYPE_COLOR && !strlen(argv[0]) && display_opts.default_value)
+ ret = get_color(&location_opts, "", display_opts.default_value);
else
ret = get_value(&location_opts, &display_opts, argv[0], value_pattern,
get_value_flags, flags);
@@ -1391,7 +1401,7 @@ static int cmd_config_actions(int argc, const char **argv, const char *prefix)
}
else if (actions == ACTION_GET_COLOR) {
check_argc(argc, 1, 2);
- get_color(&location_opts, argv[0], argv[1]);
+ ret = get_color(&location_opts, argv[0], argv[1]);
}
else if (actions == ACTION_GET_COLORBOOL) {
check_argc(argc, 1, 2);
diff --git a/builtin/count-objects.c b/builtin/count-objects.c
index a61d3b46aa..f2f407c2a7 100644
--- a/builtin/count-objects.c
+++ b/builtin/count-objects.c
@@ -122,6 +122,7 @@ int cmd_count_objects(int argc,
count_loose, count_cruft, NULL, NULL);
if (verbose) {
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
unsigned long num_pack = 0;
off_t size_pack = 0;
@@ -129,7 +130,7 @@ int cmd_count_objects(int argc,
struct strbuf pack_buf = STRBUF_INIT;
struct strbuf garbage_buf = STRBUF_INIT;
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (!p->pack_local)
continue;
if (open_pack_index(p))
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index c06ee0b213..dc2486f9a8 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -37,8 +37,6 @@ static const char *const fast_export_usage[] = {
NULL
};
-enum sign_mode { SIGN_ABORT, SIGN_VERBATIM, SIGN_STRIP, SIGN_WARN_VERBATIM, SIGN_WARN_STRIP };
-
static int progress;
static enum sign_mode signed_tag_mode = SIGN_ABORT;
static enum sign_mode signed_commit_mode = SIGN_STRIP;
@@ -59,23 +57,16 @@ static struct hashmap anonymized_seeds;
static struct revision_sources revision_sources;
static int parse_opt_sign_mode(const struct option *opt,
- const char *arg, int unset)
+ const char *arg, int unset)
{
enum sign_mode *val = opt->value;
+
if (unset)
return 0;
- else if (!strcmp(arg, "abort"))
- *val = SIGN_ABORT;
- else if (!strcmp(arg, "verbatim") || !strcmp(arg, "ignore"))
- *val = SIGN_VERBATIM;
- else if (!strcmp(arg, "warn-verbatim") || !strcmp(arg, "warn"))
- *val = SIGN_WARN_VERBATIM;
- else if (!strcmp(arg, "warn-strip"))
- *val = SIGN_WARN_STRIP;
- else if (!strcmp(arg, "strip"))
- *val = SIGN_STRIP;
- else
+
+ if (parse_sign_mode(arg, val))
return error("Unknown %s mode: %s", opt->long_name, arg);
+
return 0;
}
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index 2c35f9345d..606c6aea82 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -188,6 +188,8 @@ static int global_argc;
static const char **global_argv;
static const char *global_prefix;
+static enum sign_mode signed_commit_mode = SIGN_VERBATIM;
+
/* Memory pools */
static struct mem_pool fi_mem_pool = {
.block_alloc = 2*1024*1024 - sizeof(struct mp_block),
@@ -897,11 +899,11 @@ static void end_packfile(void)
idx_name = keep_pack(create_index());
/* Register the packfile with core git's machinery. */
- new_p = add_packed_git(pack_data->repo, idx_name, strlen(idx_name), 1);
+ new_p = packfile_store_load_pack(pack_data->repo->objects->packfiles,
+ idx_name, 1);
if (!new_p)
die("core git rejected index %s", idx_name);
all_packs[pack_id] = new_p;
- install_packed_git(the_repository, new_p);
free(idx_name);
/* Print the boundary */
@@ -952,6 +954,7 @@ static int store_object(
struct object_id *oidout,
uintmax_t mark)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
void *out, *delta;
struct object_entry *e;
unsigned char hdr[96];
@@ -975,7 +978,7 @@ static int store_object(
if (e->idx.offset) {
duplicate_count_by_type[type]++;
return 1;
- } else if (find_oid_pack(&oid, get_all_packs(the_repository))) {
+ } else if (find_oid_pack(&oid, packfile_store_get_all_packs(packs))) {
e->type = type;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -1092,6 +1095,7 @@ static void truncate_pack(struct hashfile_checkpoint *checkpoint)
static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
size_t in_sz = 64 * 1024, out_sz = 64 * 1024;
unsigned char *in_buf = xmalloc(in_sz);
unsigned char *out_buf = xmalloc(out_sz);
@@ -1175,7 +1179,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
duplicate_count_by_type[OBJ_BLOB]++;
truncate_pack(&checkpoint);
- } else if (find_oid_pack(&oid, get_all_packs(the_repository))) {
+ } else if (find_oid_pack(&oid, packfile_store_get_all_packs(packs))) {
e->type = OBJ_BLOB;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -2752,6 +2756,15 @@ static void parse_one_signature(struct signature_data *sig, const char *v)
parse_data(&sig->data, 0, NULL);
}
+static void discard_one_signature(void)
+{
+ struct strbuf data = STRBUF_INIT;
+
+ read_next_command();
+ parse_data(&data, 0, NULL);
+ strbuf_release(&data);
+}
+
static void add_gpgsig_to_commit(struct strbuf *commit_data,
const char *header,
struct signature_data *sig)
@@ -2785,6 +2798,22 @@ static void store_signature(struct signature_data *stored_sig,
}
}
+static void import_one_signature(struct signature_data *sig_sha1,
+ struct signature_data *sig_sha256,
+ const char *v)
+{
+ struct signature_data sig = { NULL, NULL, STRBUF_INIT };
+
+ parse_one_signature(&sig, v);
+
+ if (!strcmp(sig.hash_algo, "sha1"))
+ store_signature(sig_sha1, &sig, "SHA-1");
+ else if (!strcmp(sig.hash_algo, "sha256"))
+ store_signature(sig_sha256, &sig, "SHA-256");
+ else
+ die(_("parse_one_signature() returned unknown hash algo"));
+}
+
static void parse_new_commit(const char *arg)
{
static struct strbuf msg = STRBUF_INIT;
@@ -2817,19 +2846,32 @@ static void parse_new_commit(const char *arg)
if (!committer)
die("Expected committer but didn't get one");
- /* Process signatures (up to 2: one "sha1" and one "sha256") */
while (skip_prefix(command_buf.buf, "gpgsig ", &v)) {
- struct signature_data sig = { NULL, NULL, STRBUF_INIT };
-
- parse_one_signature(&sig, v);
+ switch (signed_commit_mode) {
+
+ /* First, modes that don't need the signature to be parsed */
+ case SIGN_ABORT:
+ die("encountered signed commit; use "
+ "--signed-commits=<mode> to handle it");
+ case SIGN_WARN_STRIP:
+ warning(_("stripping a commit signature"));
+ /* fallthru */
+ case SIGN_STRIP:
+ discard_one_signature();
+ break;
- if (!strcmp(sig.hash_algo, "sha1"))
- store_signature(&sig_sha1, &sig, "SHA-1");
- else if (!strcmp(sig.hash_algo, "sha256"))
- store_signature(&sig_sha256, &sig, "SHA-256");
- else
- BUG("parse_one_signature() returned unknown hash algo");
+ /* Second, modes that parse the signature */
+ case SIGN_WARN_VERBATIM:
+ warning(_("importing a commit signature verbatim"));
+ /* fallthru */
+ case SIGN_VERBATIM:
+ import_one_signature(&sig_sha1, &sig_sha256, v);
+ break;
+ /* Third, BUG */
+ default:
+ BUG("invalid signed_commit_mode value %d", signed_commit_mode);
+ }
read_next_command();
}
@@ -3501,6 +3543,9 @@ static int parse_one_option(const char *option)
option_active_branches(option);
} else if (skip_prefix(option, "export-pack-edges=", &option)) {
option_export_pack_edges(option);
+ } else if (skip_prefix(option, "signed-commits=", &option)) {
+ if (parse_sign_mode(option, &signed_commit_mode))
+ usagef(_("unknown --signed-commits mode '%s'"), option);
} else if (!strcmp(option, "quiet")) {
show_stats = 0;
quiet = 1;
diff --git a/builtin/fsck.c b/builtin/fsck.c
index d2eb9d4fbe..8ee95e0d67 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -867,19 +867,20 @@ static int mark_packed_for_connectivity(const struct object_id *oid,
static int check_pack_rev_indexes(struct repository *r, int show_progress)
{
+ struct packfile_store *packs = r->objects->packfiles;
struct progress *progress = NULL;
uint32_t pack_count = 0;
int res = 0;
if (show_progress) {
- for (struct packed_git *p = get_all_packs(r); p; p = p->next)
+ for (struct packed_git *p = packfile_store_get_all_packs(packs); p; p = p->next)
pack_count++;
progress = start_delayed_progress(the_repository,
"Verifying reverse pack-indexes", pack_count);
pack_count = 0;
}
- for (struct packed_git *p = get_all_packs(r); p; p = p->next) {
+ for (struct packed_git *p = packfile_store_get_all_packs(packs); p; p = p->next) {
int load_error = load_pack_revindex_from_disk(p);
if (load_error < 0) {
@@ -999,6 +1000,8 @@ int cmd_fsck(int argc,
for_each_packed_object(the_repository,
mark_packed_for_connectivity, NULL, 0);
} else {
+ struct packfile_store *packs = the_repository->objects->packfiles;
+
odb_prepare_alternates(the_repository->objects);
for (source = the_repository->objects->sources; source; source = source->next)
fsck_source(source);
@@ -1009,7 +1012,7 @@ int cmd_fsck(int argc,
struct progress *progress = NULL;
if (show_progress) {
- for (p = get_all_packs(the_repository); p;
+ for (p = packfile_store_get_all_packs(packs); p;
p = p->next) {
if (open_pack_index(p))
continue;
@@ -1019,7 +1022,7 @@ int cmd_fsck(int argc,
progress = start_progress(the_repository,
_("Checking objects"), total);
}
- for (p = get_all_packs(the_repository); p;
+ for (p = packfile_store_get_all_packs(packs); p;
p = p->next) {
/* verify gives error messages itself */
if (verify_pack(the_repository,
diff --git a/builtin/gc.c b/builtin/gc.c
index 03ae4926b2..e19e13d978 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -487,9 +487,10 @@ static int too_many_loose_objects(struct gc_config *cfg)
static struct packed_git *find_base_packs(struct string_list *packs,
unsigned long limit)
{
+ struct packfile_store *packfiles = the_repository->objects->packfiles;
struct packed_git *p, *base = NULL;
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packfiles); p; p = p->next) {
if (!p->pack_local || p->is_cruft)
continue;
if (limit) {
@@ -508,13 +509,14 @@ static struct packed_git *find_base_packs(struct string_list *packs,
static int too_many_packs(struct gc_config *cfg)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
int cnt;
if (cfg->gc_auto_pack_limit <= 0)
return 0;
- for (cnt = 0, p = get_all_packs(the_repository); p; p = p->next) {
+ for (cnt = 0, p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (!p->pack_local)
continue;
if (p->pack_keep)
@@ -1042,7 +1044,7 @@ int cmd_gc(int argc,
die(FAILED_RUN, "rerere");
report_garbage = report_pack_garbage;
- reprepare_packed_git(the_repository);
+ odb_reprepare(the_repository->objects);
if (pack_garbage.nr > 0) {
close_object_store(the_repository->objects);
clean_pack_garbage();
@@ -1423,7 +1425,7 @@ static int incremental_repack_auto_condition(struct gc_config *cfg UNUSED)
if (incremental_repack_auto_limit < 0)
return 1;
- for (p = get_packed_git(the_repository);
+ for (p = packfile_store_get_packs(the_repository->objects->packfiles);
count < incremental_repack_auto_limit && p;
p = p->next) {
if (!p->multi_pack_index)
@@ -1491,8 +1493,8 @@ static off_t get_auto_pack_size(void)
struct packed_git *p;
struct repository *r = the_repository;
- reprepare_packed_git(r);
- for (p = get_all_packs(r); p; p = p->next) {
+ odb_reprepare(r->objects);
+ for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
if (p->pack_size > max_size) {
second_largest_size = max_size;
max_size = p->pack_size;
diff --git a/builtin/grep.c b/builtin/grep.c
index 1d97eb2a2a..13841fbf00 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -1214,7 +1214,7 @@ int cmd_grep(int argc,
if (recurse_submodules)
repo_read_gitmodules(the_repository, 1);
if (startup_info->have_repository)
- (void)get_packed_git(the_repository);
+ (void)packfile_store_get_packs(the_repository->objects->packfiles);
start_threads(&opt);
} else {
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index f91c301bba..2b78ba7fe4 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -1640,13 +1640,9 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
rename_tmp_packfile(&final_index_name, curr_index_name, &index_name,
hash, "idx", 1);
- if (do_fsck_object) {
- struct packed_git *p;
- p = add_packed_git(the_repository, final_index_name,
- strlen(final_index_name), 0);
- if (p)
- install_packed_git(the_repository, p);
- }
+ if (do_fsck_object)
+ packfile_store_load_pack(the_repository->objects->packfiles,
+ final_index_name, 0);
if (!from_stdin) {
printf("%s\n", hash_to_hex(hash));
diff --git a/builtin/log.c b/builtin/log.c
index 5f552d14c0..c8319b8af3 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -543,7 +543,13 @@ int cmd_whatchanged(int argc,
cmd_log_init(argc, argv, prefix, &rev, &opt, &cfg);
if (!cfg.i_still_use_this)
- you_still_use_that("git whatchanged");
+ you_still_use_that("git whatchanged",
+ _("\n"
+ "hint: You can replace 'git whatchanged <opts>' with:\n"
+ "hint:\tgit log <opts> --raw --no-merges\n"
+ "hint: Or make an alias:\n"
+ "hint:\tgit config set --global alias.whatchanged 'log --raw --no-merges'\n"
+ "\n"));
if (!rev.diffopt.output_format)
rev.diffopt.output_format = DIFF_FORMAT_RAW;
@@ -1400,13 +1406,12 @@ static void make_cover_letter(struct rev_info *rev, int use_separate_file,
* can be added later if deemed desirable.
*/
struct diff_options opts;
- struct strvec other_arg = STRVEC_INIT;
struct range_diff_options range_diff_opts = {
.creation_factor = rev->creation_factor,
.dual_color = 1,
.max_memory = RANGE_DIFF_MAX_MEMORY_DEFAULT,
.diffopt = &opts,
- .other_arg = &other_arg
+ .log_arg = &rev->rdiff_log_arg
};
repo_diff_setup(the_repository, &opts);
@@ -1414,9 +1419,7 @@ static void make_cover_letter(struct rev_info *rev, int use_separate_file,
opts.use_color = rev->diffopt.use_color;
diff_setup_done(&opts);
fprintf_ln(rev->diffopt.file, "%s", rev->rdiff_title);
- get_notes_args(&other_arg, rev);
show_range_diff(rev->rdiff1, rev->rdiff2, &range_diff_opts);
- strvec_clear(&other_arg);
}
}
@@ -2328,6 +2331,7 @@ int cmd_format_patch(int argc,
rev.rdiff_title = diff_title(&rdiff_title, reroll_count,
_("Range-diff:"),
_("Range-diff against v%d:"));
+ get_notes_args(&(rev.rdiff_log_arg), &rev);
}
/*
@@ -2487,6 +2491,7 @@ done:
rev.diffopt.no_free = 0;
release_revisions(&rev);
format_config_release(&cfg);
+ strvec_clear(&rev.rdiff_log_arg);
return 0;
}
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 5856b5f6bf..5bdc44fb2d 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -1748,12 +1748,12 @@ static int want_object_in_pack_mtime(const struct object_id *oid,
}
}
- list_for_each(pos, get_packed_git_mru(the_repository)) {
+ list_for_each(pos, packfile_store_get_packs_mru(the_repository->objects->packfiles)) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
want = want_object_in_pack_one(p, oid, exclude, found_pack, found_offset, found_mtime);
if (!exclude && want > 0)
list_move(&p->mru,
- get_packed_git_mru(the_repository));
+ packfile_store_get_packs_mru(the_repository->objects->packfiles));
if (want != -1)
return want;
}
@@ -3774,7 +3774,7 @@ static void show_object_pack_hint(struct object *object, const char *name,
enum stdin_packs_mode mode = *(enum stdin_packs_mode *)data;
if (mode == STDIN_PACKS_MODE_FOLLOW) {
if (object->type == OBJ_BLOB &&
- !has_object(the_repository, &object->oid, 0))
+ !odb_has_object(the_repository->objects, &object->oid, 0))
return;
add_object_entry(&object->oid, object->type, name, 0);
} else {
@@ -3831,6 +3831,7 @@ static int pack_mtime_cmp(const void *_a, const void *_b)
static void read_packs_list_from_stdin(struct rev_info *revs)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct strbuf buf = STRBUF_INIT;
struct string_list include_packs = STRING_LIST_INIT_DUP;
struct string_list exclude_packs = STRING_LIST_INIT_DUP;
@@ -3855,7 +3856,7 @@ static void read_packs_list_from_stdin(struct rev_info *revs)
string_list_sort(&exclude_packs);
string_list_remove_duplicates(&exclude_packs, 0);
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
const char *pack_name = pack_basename(p);
if ((item = string_list_lookup(&include_packs, pack_name)))
@@ -4076,6 +4077,7 @@ static void enumerate_cruft_objects(void)
static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
struct rev_info revs;
int ret;
@@ -4105,7 +4107,7 @@ static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs
* Re-mark only the fresh packs as kept so that objects in
* unknown packs do not halt the reachability traversal early.
*/
- for (p = get_all_packs(the_repository); p; p = p->next)
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next)
p->pack_keep_in_core = 0;
mark_pack_kept_in_core(fresh_packs, 1);
@@ -4122,6 +4124,7 @@ static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs
static void read_cruft_objects(void)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct strbuf buf = STRBUF_INIT;
struct string_list discard_packs = STRING_LIST_INIT_DUP;
struct string_list fresh_packs = STRING_LIST_INIT_DUP;
@@ -4142,7 +4145,7 @@ static void read_cruft_objects(void)
string_list_sort(&discard_packs);
string_list_sort(&fresh_packs);
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
const char *pack_name = pack_basename(p);
struct string_list_item *item;
@@ -4390,11 +4393,12 @@ static void add_unreachable_loose_objects(struct rev_info *revs)
static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
static struct packed_git *last_found = (void *)1;
struct packed_git *p;
p = (last_found != (void *)1) ? last_found :
- get_all_packs(the_repository);
+ packfile_store_get_all_packs(packs);
while (p) {
if ((!p->pack_local || p->pack_keep ||
@@ -4404,7 +4408,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
return 1;
}
if (p == last_found)
- p = get_all_packs(the_repository);
+ p = packfile_store_get_all_packs(packs);
else
p = p->next;
if (p == last_found)
@@ -4436,12 +4440,13 @@ static int loosened_object_can_be_discarded(const struct object_id *oid,
static void loosen_unused_packed_objects(void)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
uint32_t i;
uint32_t loosened_objects_nr = 0;
struct object_id oid;
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
@@ -4591,8 +4596,8 @@ static int add_objects_by_path(const char *path,
/* Skip objects that do not exist locally. */
if ((exclude_promisor_objects || arg_missing_action != MA_ERROR) &&
- oid_object_info_extended(the_repository, oid, &oi,
- OBJECT_INFO_FOR_PREFETCH) < 0)
+ odb_read_object_info_extended(the_repository->objects, oid, &oi,
+ OBJECT_INFO_FOR_PREFETCH) < 0)
continue;
exclude = is_oid_uninteresting(the_repository, oid);
@@ -4742,12 +4747,13 @@ static void get_object_list(struct rev_info *revs, struct strvec *argv)
static void add_extra_kept_packs(const struct string_list *names)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
if (!names->nr)
return;
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
const char *name = basename(p->pack_name);
int i;
@@ -5185,8 +5191,10 @@ int cmd_pack_objects(int argc,
add_extra_kept_packs(&keep_pack_list);
if (ignore_packed_keep_on_disk) {
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
- for (p = get_all_packs(the_repository); p; p = p->next)
+
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next)
if (p->pack_local && p->pack_keep)
break;
if (!p) /* no keep-able packs found */
@@ -5198,8 +5206,10 @@ int cmd_pack_objects(int argc,
* want to unset "local" based on looking at packs, as
* it also covers non-local objects
*/
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
- for (p = get_all_packs(the_repository); p; p = p->next) {
+
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (!p->pack_local) {
have_non_local_packs = 1;
break;
diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c
index fe81c293e3..80743d8806 100644
--- a/builtin/pack-redundant.c
+++ b/builtin/pack-redundant.c
@@ -566,7 +566,8 @@ static struct pack_list * add_pack(struct packed_git *p)
static struct pack_list * add_pack_file(const char *filename)
{
- struct packed_git *p = get_all_packs(the_repository);
+ struct packfile_store *packs = the_repository->objects->packfiles;
+ struct packed_git *p = packfile_store_get_all_packs(packs);
if (strlen(filename) < 40)
die("Bad pack filename: %s", filename);
@@ -581,7 +582,8 @@ static struct pack_list * add_pack_file(const char *filename)
static void load_all(void)
{
- struct packed_git *p = get_all_packs(the_repository);
+ struct packfile_store *packs = the_repository->objects->packfiles;
+ struct packed_git *p = packfile_store_get_all_packs(packs);
while (p) {
add_pack(p);
@@ -626,7 +628,7 @@ int cmd_pack_redundant(int argc, const char **argv, const char *prefix UNUSED, s
}
if (!i_still_use_this)
- you_still_use_that("git pack-redundant");
+ you_still_use_that("git pack-redundant", NULL);
if (load_all_packs)
load_all();
diff --git a/builtin/pack-refs.c b/builtin/pack-refs.c
index 5e28d0f9e8..3446b84cda 100644
--- a/builtin/pack-refs.c
+++ b/builtin/pack-refs.c
@@ -1,60 +1,16 @@
#include "builtin.h"
-#include "config.h"
-#include "environment.h"
#include "gettext.h"
-#include "parse-options.h"
-#include "refs.h"
-#include "revision.h"
-
-static char const * const pack_refs_usage[] = {
- N_("git pack-refs [--all] [--no-prune] [--auto] [--include <pattern>] [--exclude <pattern>]"),
- NULL
-};
+#include "pack-refs.h"
int cmd_pack_refs(int argc,
const char **argv,
const char *prefix,
struct repository *repo)
{
- struct ref_exclusions excludes = REF_EXCLUSIONS_INIT;
- struct string_list included_refs = STRING_LIST_INIT_NODUP;
- struct pack_refs_opts pack_refs_opts = {
- .exclusions = &excludes,
- .includes = &included_refs,
- .flags = PACK_REFS_PRUNE,
- };
- struct string_list option_excluded_refs = STRING_LIST_INIT_NODUP;
- struct string_list_item *item;
- int pack_all = 0;
- int ret;
-
- struct option opts[] = {
- OPT_BOOL(0, "all", &pack_all, N_("pack everything")),
- OPT_BIT(0, "prune", &pack_refs_opts.flags, N_("prune loose refs (default)"), PACK_REFS_PRUNE),
- OPT_BIT(0, "auto", &pack_refs_opts.flags, N_("auto-pack refs as needed"), PACK_REFS_AUTO),
- OPT_STRING_LIST(0, "include", pack_refs_opts.includes, N_("pattern"),
- N_("references to include")),
- OPT_STRING_LIST(0, "exclude", &option_excluded_refs, N_("pattern"),
- N_("references to exclude")),
- OPT_END(),
+ static char const * const pack_refs_usage[] = {
+ N_("git pack-refs " PACK_REFS_OPTS),
+ NULL
};
- repo_config(repo, git_default_config, NULL);
- if (parse_options(argc, argv, prefix, opts, pack_refs_usage, 0))
- usage_with_options(pack_refs_usage, opts);
-
- for_each_string_list_item(item, &option_excluded_refs)
- add_ref_exclusion(pack_refs_opts.exclusions, item->string);
-
- if (pack_all)
- string_list_append(pack_refs_opts.includes, "*");
-
- if (!pack_refs_opts.includes->nr)
- string_list_append(pack_refs_opts.includes, "refs/tags/*");
-
- ret = refs_pack_refs(get_main_ref_store(repo), &pack_refs_opts);
- clear_ref_exclusions(&excludes);
- string_list_clear(&included_refs, 0);
- string_list_clear(&option_excluded_refs, 0);
- return ret;
+ return pack_refs_core(argc, argv, prefix, repo, pack_refs_usage);
}
diff --git a/builtin/range-diff.c b/builtin/range-diff.c
index 1bc082a869..e54c0f7fe1 100644
--- a/builtin/range-diff.c
+++ b/builtin/range-diff.c
@@ -38,13 +38,13 @@ int cmd_range_diff(int argc,
struct repository *repo UNUSED)
{
struct diff_options diffopt = { NULL };
- struct strvec other_arg = STRVEC_INIT;
+ struct strvec log_arg = STRVEC_INIT;
struct strvec diff_merges_arg = STRVEC_INIT;
struct range_diff_options range_diff_opts = {
.creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT,
.max_memory = RANGE_DIFF_MAX_MEMORY_DEFAULT,
.diffopt = &diffopt,
- .other_arg = &other_arg
+ .log_arg = &log_arg
};
int simple_color = -1, left_only = 0, right_only = 0;
struct option range_diff_options[] = {
@@ -53,7 +53,7 @@ int cmd_range_diff(int argc,
N_("percentage by which creation is weighted")),
OPT_BOOL(0, "no-dual-color", &simple_color,
N_("use simple diff colors")),
- OPT_PASSTHRU_ARGV(0, "notes", &other_arg,
+ OPT_PASSTHRU_ARGV(0, "notes", &log_arg,
N_("notes"), N_("passed to 'git log'"),
PARSE_OPT_OPTARG),
OPT_PASSTHRU_ARGV(0, "diff-merges", &diff_merges_arg,
@@ -93,7 +93,7 @@ int cmd_range_diff(int argc,
/* If `--diff-merges` was specified, imply `--merges` */
if (diff_merges_arg.nr) {
range_diff_opts.include_merges = 1;
- strvec_pushv(&other_arg, diff_merges_arg.v);
+ strvec_pushv(&log_arg, diff_merges_arg.v);
}
for (i = 0; i < argc; i++)
@@ -125,7 +125,7 @@ int cmd_range_diff(int argc,
strbuf_addf(&range1, "%s..%s", argv[0], argv[1]);
strbuf_addf(&range2, "%s..%s", argv[0], argv[2]);
- strvec_pushv(&other_arg, argv +
+ strvec_pushv(&log_arg, argv +
(dash_dash < 0 ? 3 : dash_dash));
} else if (dash_dash == 2 ||
(dash_dash < 0 && argc > 1 &&
@@ -145,7 +145,7 @@ int cmd_range_diff(int argc,
strbuf_addstr(&range1, argv[0]);
strbuf_addstr(&range2, argv[1]);
- strvec_pushv(&other_arg, argv +
+ strvec_pushv(&log_arg, argv +
(dash_dash < 0 ? 2 : dash_dash));
} else if (dash_dash == 1 ||
(dash_dash < 0 && argc > 0 &&
@@ -176,7 +176,7 @@ int cmd_range_diff(int argc,
strbuf_addf(&range1, "%s..%.*s", b, a_len, a);
strbuf_addf(&range2, "%.*s..%s", a_len, a, b);
- strvec_pushv(&other_arg, argv +
+ strvec_pushv(&log_arg, argv +
(dash_dash < 0 ? 1 : dash_dash));
} else
usage_msg_opt(_("need two commit ranges"),
@@ -188,7 +188,7 @@ int cmd_range_diff(int argc,
range_diff_opts.right_only = right_only;
res = show_range_diff(range1.buf, range2.buf, &range_diff_opts);
- strvec_clear(&other_arg);
+ strvec_clear(&log_arg);
strvec_clear(&diff_merges_arg);
strbuf_release(&range1);
strbuf_release(&range2);
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index 1113137a6f..c9288a9c7e 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -2389,7 +2389,7 @@ static const char *unpack(int err_fd, struct shallow_info *si)
status = finish_command(&child);
if (status)
return "index-pack abnormal exit";
- reprepare_packed_git(the_repository);
+ odb_reprepare(the_repository->objects);
}
return NULL;
}
diff --git a/builtin/reflog.c b/builtin/reflog.c
index c8f6b93d60..dcbfe89339 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -418,6 +418,8 @@ static int cmd_reflog_write(int argc, const char **argv, const char *prefix,
const char *ref, *message;
int ret;
+ repo_config(repo, git_ident_config, NULL);
+
argc = parse_options(argc, argv, prefix, options, reflog_write_usage, 0);
if (argc != 4)
usage_with_options(reflog_write_usage, options);
diff --git a/builtin/refs.c b/builtin/refs.c
index 91548783b7..3064f888b2 100644
--- a/builtin/refs.c
+++ b/builtin/refs.c
@@ -2,6 +2,7 @@
#include "builtin.h"
#include "config.h"
#include "fsck.h"
+#include "pack-refs.h"
#include "parse-options.h"
#include "refs.h"
#include "strbuf.h"
@@ -18,6 +19,9 @@
#define REFS_EXISTS_USAGE \
N_("git refs exists <ref>")
+#define REFS_OPTIMIZE_USAGE \
+ N_("git refs optimize " PACK_REFS_OPTS)
+
static int cmd_refs_migrate(int argc, const char **argv, const char *prefix,
struct repository *repo UNUSED)
{
@@ -159,6 +163,17 @@ out:
return ret;
}
+static int cmd_refs_optimize(int argc, const char **argv, const char *prefix,
+ struct repository *repo)
+{
+ static char const * const refs_optimize_usage[] = {
+ REFS_OPTIMIZE_USAGE,
+ NULL
+ };
+
+ return pack_refs_core(argc, argv, prefix, repo, refs_optimize_usage);
+}
+
int cmd_refs(int argc,
const char **argv,
const char *prefix,
@@ -169,6 +184,7 @@ int cmd_refs(int argc,
REFS_VERIFY_USAGE,
"git refs list " COMMON_USAGE_FOR_EACH_REF,
REFS_EXISTS_USAGE,
+ REFS_OPTIMIZE_USAGE,
NULL,
};
parse_opt_subcommand_fn *fn = NULL;
@@ -177,6 +193,7 @@ int cmd_refs(int argc,
OPT_SUBCOMMAND("verify", &fn, cmd_refs_verify),
OPT_SUBCOMMAND("list", &fn, cmd_refs_list),
OPT_SUBCOMMAND("exists", &fn, cmd_refs_exists),
+ OPT_SUBCOMMAND("optimize", &fn, cmd_refs_optimize),
OPT_END(),
};
diff --git a/builtin/repack.c b/builtin/repack.c
index c490a51e91..e8730808c5 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -265,10 +265,11 @@ static void existing_packs_release(struct existing_packs *existing)
static void collect_pack_filenames(struct existing_packs *existing,
const struct string_list *extra_keep)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
struct strbuf buf = STRBUF_INIT;
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
int i;
const char *base;
@@ -497,10 +498,11 @@ static void init_pack_geometry(struct pack_geometry *geometry,
struct existing_packs *existing,
const struct pack_objects_args *args)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
struct strbuf buf = STRBUF_INIT;
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (args->local && !p->pack_local)
/*
* When asked to only repack local packfiles we skip
@@ -1137,11 +1139,12 @@ static int write_filtered_pack(const struct pack_objects_args *args,
static void combine_small_cruft_packs(FILE *in, size_t combine_cruft_below_size,
struct existing_packs *existing)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
struct strbuf buf = STRBUF_INIT;
size_t i;
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (!(p->is_cruft && p->pack_local))
continue;
@@ -1685,7 +1688,7 @@ int cmd_repack(int argc,
goto cleanup;
}
- reprepare_packed_git(the_repository);
+ odb_reprepare(the_repository->objects);
if (delete_redundant) {
int opts = 0;
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index 28124b324d..ef79e43715 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -2,7 +2,6 @@
#define DISABLE_SIGN_COMPARE_WARNINGS
#include "builtin.h"
-#include "bulk-checkin.h"
#include "config.h"
#include "environment.h"
#include "gettext.h"
@@ -600,12 +599,12 @@ static void unpack_all(void)
progress = start_progress(the_repository,
_("Unpacking objects"), nr_objects);
CALLOC_ARRAY(obj_list, nr_objects);
- transaction = begin_odb_transaction(the_repository->objects);
+ transaction = odb_transaction_begin(the_repository->objects);
for (i = 0; i < nr_objects; i++) {
unpack_one(i);
display_progress(progress, i + 1);
}
- end_odb_transaction(transaction);
+ odb_transaction_commit(transaction);
stop_progress(&progress);
if (delta_list)
diff --git a/builtin/update-index.c b/builtin/update-index.c
index 2ba2d29c95..8a5907767b 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -8,7 +8,6 @@
#define DISABLE_SIGN_COMPARE_WARNINGS
#include "builtin.h"
-#include "bulk-checkin.h"
#include "config.h"
#include "environment.h"
#include "gettext.h"
@@ -19,6 +18,7 @@
#include "cache-tree.h"
#include "tree-walk.h"
#include "object-file.h"
+#include "odb.h"
#include "refs.h"
#include "resolve-undo.h"
#include "parse-options.h"
@@ -70,14 +70,6 @@ static void report(const char *fmt, ...)
if (!verbose)
return;
- /*
- * It is possible, though unlikely, that a caller could use the verbose
- * output to synchronize with addition of objects to the object
- * database. The current implementation of ODB transactions leaves
- * objects invisible while a transaction is active, so flush the
- * transaction here before reporting a change made by update-index.
- */
- flush_odb_transaction(the_repository->objects->transaction);
va_start(vp, fmt);
vprintf(fmt, vp);
putchar('\n');
@@ -1131,7 +1123,7 @@ int cmd_update_index(int argc,
* Allow the object layer to optimize adding multiple objects in
* a batch.
*/
- transaction = begin_odb_transaction(the_repository->objects);
+ transaction = odb_transaction_begin(the_repository->objects);
while (ctx.argc) {
if (parseopt_state != PARSE_OPT_DONE)
parseopt_state = parse_options_step(&ctx, options,
@@ -1150,6 +1142,21 @@ int cmd_update_index(int argc,
const char *path = ctx.argv[0];
char *p;
+ /*
+ * It is possible, though unlikely, that a caller could
+ * use the verbose output to synchronize with addition
+ * of objects to the object database. The current
+ * implementation of ODB transactions leaves objects
+ * invisible while a transaction is active, so end the
+ * transaction here early before processing the next
+ * update. All further updates are performed outside of
+ * a transaction.
+ */
+ if (transaction && verbose) {
+ odb_transaction_commit(transaction);
+ transaction = NULL;
+ }
+
setup_work_tree();
p = prefix_path(prefix, prefix_length, path);
update_one(p);
@@ -1214,7 +1221,7 @@ int cmd_update_index(int argc,
/*
* By now we have added all of the new objects
*/
- end_odb_transaction(transaction);
+ odb_transaction_commit(transaction);
if (split_index > 0) {
if (repo_config_get_split_index(the_repository) == 0)
diff --git a/bulk-checkin.c b/bulk-checkin.c
deleted file mode 100644
index 124c493067..0000000000
--- a/bulk-checkin.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright (c) 2011, Google Inc.
- */
-
-#define USE_THE_REPOSITORY_VARIABLE
-
-#include "git-compat-util.h"
-#include "bulk-checkin.h"
-#include "environment.h"
-#include "gettext.h"
-#include "hex.h"
-#include "lockfile.h"
-#include "repository.h"
-#include "csum-file.h"
-#include "pack.h"
-#include "strbuf.h"
-#include "tmp-objdir.h"
-#include "packfile.h"
-#include "object-file.h"
-#include "odb.h"
-
-struct bulk_checkin_packfile {
- char *pack_tmp_name;
- struct hashfile *f;
- off_t offset;
- struct pack_idx_option pack_idx_opts;
-
- struct pack_idx_entry **written;
- uint32_t alloc_written;
- uint32_t nr_written;
-};
-
-struct odb_transaction {
- struct object_database *odb;
-
- int nesting;
- struct tmp_objdir *objdir;
- struct bulk_checkin_packfile packfile;
-};
-
-static void finish_tmp_packfile(struct odb_transaction *transaction,
- struct strbuf *basename,
- unsigned char hash[])
-{
- struct bulk_checkin_packfile *state = &transaction->packfile;
- struct repository *repo = transaction->odb->repo;
- char *idx_tmp_name = NULL;
-
- stage_tmp_packfiles(repo, basename, state->pack_tmp_name,
- state->written, state->nr_written, NULL,
- &state->pack_idx_opts, hash, &idx_tmp_name);
- rename_tmp_packfile_idx(repo, basename, &idx_tmp_name);
-
- free(idx_tmp_name);
-}
-
-static void flush_bulk_checkin_packfile(struct odb_transaction *transaction)
-{
- struct bulk_checkin_packfile *state = &transaction->packfile;
- struct repository *repo = transaction->odb->repo;
- unsigned char hash[GIT_MAX_RAWSZ];
- struct strbuf packname = STRBUF_INIT;
-
- if (!state->f)
- return;
-
- if (state->nr_written == 0) {
- close(state->f->fd);
- free_hashfile(state->f);
- unlink(state->pack_tmp_name);
- goto clear_exit;
- } else if (state->nr_written == 1) {
- finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK,
- CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
- } else {
- int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
- fixup_pack_header_footer(repo->hash_algo, fd, hash, state->pack_tmp_name,
- state->nr_written, hash,
- state->offset);
- close(fd);
- }
-
- strbuf_addf(&packname, "%s/pack/pack-%s.",
- repo_get_object_directory(transaction->odb->repo),
- hash_to_hex_algop(hash, repo->hash_algo));
-
- finish_tmp_packfile(transaction, &packname, hash);
- for (uint32_t i = 0; i < state->nr_written; i++)
- free(state->written[i]);
-
-clear_exit:
- free(state->pack_tmp_name);
- free(state->written);
- memset(state, 0, sizeof(*state));
-
- strbuf_release(&packname);
- /* Make objects we just wrote available to ourselves */
- reprepare_packed_git(repo);
-}
-
-/*
- * Cleanup after batch-mode fsync_object_files.
- */
-static void flush_batch_fsync(struct odb_transaction *transaction)
-{
- struct strbuf temp_path = STRBUF_INIT;
- struct tempfile *temp;
-
- if (!transaction->objdir)
- return;
-
- /*
- * Issue a full hardware flush against a temporary file to ensure
- * that all objects are durable before any renames occur. The code in
- * fsync_loose_object_bulk_checkin has already issued a writeout
- * request, but it has not flushed any writeback cache in the storage
- * hardware or any filesystem logs. This fsync call acts as a barrier
- * to ensure that the data in each new object file is durable before
- * the final name is visible.
- */
- strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX",
- repo_get_object_directory(transaction->odb->repo));
- temp = xmks_tempfile(temp_path.buf);
- fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
- delete_tempfile(&temp);
- strbuf_release(&temp_path);
-
- /*
- * Make the object files visible in the primary ODB after their data is
- * fully durable.
- */
- tmp_objdir_migrate(transaction->objdir);
- transaction->objdir = NULL;
-}
-
-static int already_written(struct odb_transaction *transaction,
- struct object_id *oid)
-{
- /* The object may already exist in the repository */
- if (odb_has_object(transaction->odb, oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
- return 1;
-
- /* Might want to keep the list sorted */
- for (uint32_t i = 0; i < transaction->packfile.nr_written; i++)
- if (oideq(&transaction->packfile.written[i]->oid, oid))
- return 1;
-
- /* This is a new object we need to keep */
- return 0;
-}
-
-/*
- * Read the contents from fd for size bytes, streaming it to the
- * packfile in state while updating the hash in ctx. Signal a failure
- * by returning a negative value when the resulting pack would exceed
- * the pack size limit and this is not the first object in the pack,
- * so that the caller can discard what we wrote from the current pack
- * by truncating it and opening a new one. The caller will then call
- * us again after rewinding the input fd.
- *
- * The already_hashed_to pointer is kept untouched by the caller to
- * make sure we do not hash the same byte when we are called
- * again. This way, the caller does not have to checkpoint its hash
- * status before calling us just in case we ask it to call us again
- * with a new pack.
- */
-static int stream_blob_to_pack(struct bulk_checkin_packfile *state,
- struct git_hash_ctx *ctx, off_t *already_hashed_to,
- int fd, size_t size, const char *path,
- unsigned flags)
-{
- git_zstream s;
- unsigned char ibuf[16384];
- unsigned char obuf[16384];
- unsigned hdrlen;
- int status = Z_OK;
- int write_object = (flags & INDEX_WRITE_OBJECT);
- off_t offset = 0;
-
- git_deflate_init(&s, pack_compression_level);
-
- hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), OBJ_BLOB, size);
- s.next_out = obuf + hdrlen;
- s.avail_out = sizeof(obuf) - hdrlen;
-
- while (status != Z_STREAM_END) {
- if (size && !s.avail_in) {
- size_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
- ssize_t read_result = read_in_full(fd, ibuf, rsize);
- if (read_result < 0)
- die_errno("failed to read from '%s'", path);
- if ((size_t)read_result != rsize)
- die("failed to read %u bytes from '%s'",
- (unsigned)rsize, path);
- offset += rsize;
- if (*already_hashed_to < offset) {
- size_t hsize = offset - *already_hashed_to;
- if (rsize < hsize)
- hsize = rsize;
- if (hsize)
- git_hash_update(ctx, ibuf, hsize);
- *already_hashed_to = offset;
- }
- s.next_in = ibuf;
- s.avail_in = rsize;
- size -= rsize;
- }
-
- status = git_deflate(&s, size ? 0 : Z_FINISH);
-
- if (!s.avail_out || status == Z_STREAM_END) {
- if (write_object) {
- size_t written = s.next_out - obuf;
-
- /* would we bust the size limit? */
- if (state->nr_written &&
- pack_size_limit_cfg &&
- pack_size_limit_cfg < state->offset + written) {
- git_deflate_abort(&s);
- return -1;
- }
-
- hashwrite(state->f, obuf, written);
- state->offset += written;
- }
- s.next_out = obuf;
- s.avail_out = sizeof(obuf);
- }
-
- switch (status) {
- case Z_OK:
- case Z_BUF_ERROR:
- case Z_STREAM_END:
- continue;
- default:
- die("unexpected deflate failure: %d", status);
- }
- }
- git_deflate_end(&s);
- return 0;
-}
-
-/* Lazily create backing packfile for the state */
-static void prepare_to_stream(struct odb_transaction *transaction,
- unsigned flags)
-{
- struct bulk_checkin_packfile *state = &transaction->packfile;
- if (!(flags & INDEX_WRITE_OBJECT) || state->f)
- return;
-
- state->f = create_tmp_packfile(transaction->odb->repo,
- &state->pack_tmp_name);
- reset_pack_idx_option(&state->pack_idx_opts);
-
- /* Pretend we are going to write only one object */
- state->offset = write_pack_header(state->f, 1);
- if (!state->offset)
- die_errno("unable to write pack header");
-}
-
-int index_blob_bulk_checkin(struct odb_transaction *transaction,
- struct object_id *result_oid, int fd, size_t size,
- const char *path, unsigned flags)
-{
- struct bulk_checkin_packfile *state = &transaction->packfile;
- off_t seekback, already_hashed_to;
- struct git_hash_ctx ctx;
- unsigned char obuf[16384];
- unsigned header_len;
- struct hashfile_checkpoint checkpoint;
- struct pack_idx_entry *idx = NULL;
-
- seekback = lseek(fd, 0, SEEK_CUR);
- if (seekback == (off_t) -1)
- return error("cannot find the current offset");
-
- header_len = format_object_header((char *)obuf, sizeof(obuf),
- OBJ_BLOB, size);
- transaction->odb->repo->hash_algo->init_fn(&ctx);
- git_hash_update(&ctx, obuf, header_len);
-
- /* Note: idx is non-NULL when we are writing */
- if ((flags & INDEX_WRITE_OBJECT) != 0) {
- CALLOC_ARRAY(idx, 1);
-
- prepare_to_stream(transaction, flags);
- hashfile_checkpoint_init(state->f, &checkpoint);
- }
-
- already_hashed_to = 0;
-
- while (1) {
- prepare_to_stream(transaction, flags);
- if (idx) {
- hashfile_checkpoint(state->f, &checkpoint);
- idx->offset = state->offset;
- crc32_begin(state->f);
- }
- if (!stream_blob_to_pack(state, &ctx, &already_hashed_to,
- fd, size, path, flags))
- break;
- /*
- * Writing this object to the current pack will make
- * it too big; we need to truncate it, start a new
- * pack, and write into it.
- */
- if (!idx)
- BUG("should not happen");
- hashfile_truncate(state->f, &checkpoint);
- state->offset = checkpoint.offset;
- flush_bulk_checkin_packfile(transaction);
- if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
- return error("cannot seek back");
- }
- git_hash_final_oid(result_oid, &ctx);
- if (!idx)
- return 0;
-
- idx->crc32 = crc32_end(state->f);
- if (already_written(transaction, result_oid)) {
- hashfile_truncate(state->f, &checkpoint);
- state->offset = checkpoint.offset;
- free(idx);
- } else {
- oidcpy(&idx->oid, result_oid);
- ALLOC_GROW(state->written,
- state->nr_written + 1,
- state->alloc_written);
- state->written[state->nr_written++] = idx;
- }
- return 0;
-}
-
-void prepare_loose_object_bulk_checkin(struct odb_transaction *transaction)
-{
- /*
- * We lazily create the temporary object directory
- * the first time an object might be added, since
- * callers may not know whether any objects will be
- * added at the time they call begin_odb_transaction.
- */
- if (!transaction || transaction->objdir)
- return;
-
- transaction->objdir = tmp_objdir_create(transaction->odb->repo, "bulk-fsync");
- if (transaction->objdir)
- tmp_objdir_replace_primary_odb(transaction->objdir, 0);
-}
-
-void fsync_loose_object_bulk_checkin(struct odb_transaction *transaction,
- int fd, const char *filename)
-{
- /*
- * If we have an active ODB transaction, we issue a call that
- * cleans the filesystem page cache but avoids a hardware flush
- * command. Later on we will issue a single hardware flush
- * before renaming the objects to their final names as part of
- * flush_batch_fsync.
- */
- if (!transaction || !transaction->objdir ||
- git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
- if (errno == ENOSYS)
- warning(_("core.fsyncMethod = batch is unsupported on this platform"));
- fsync_or_die(fd, filename);
- }
-}
-
-struct odb_transaction *begin_odb_transaction(struct object_database *odb)
-{
- if (!odb->transaction) {
- CALLOC_ARRAY(odb->transaction, 1);
- odb->transaction->odb = odb;
- }
-
- odb->transaction->nesting += 1;
-
- return odb->transaction;
-}
-
-void flush_odb_transaction(struct odb_transaction *transaction)
-{
- if (!transaction)
- return;
-
- flush_batch_fsync(transaction);
- flush_bulk_checkin_packfile(transaction);
-}
-
-void end_odb_transaction(struct odb_transaction *transaction)
-{
- if (!transaction || transaction->nesting == 0)
- BUG("Unbalanced ODB transaction nesting");
-
- transaction->nesting -= 1;
-
- if (transaction->nesting)
- return;
-
- flush_odb_transaction(transaction);
- transaction->odb->transaction = NULL;
- free(transaction);
-}
diff --git a/bulk-checkin.h b/bulk-checkin.h
deleted file mode 100644
index ac8887f476..0000000000
--- a/bulk-checkin.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2011, Google Inc.
- */
-#ifndef BULK_CHECKIN_H
-#define BULK_CHECKIN_H
-
-#include "object.h"
-#include "odb.h"
-
-struct odb_transaction;
-
-void prepare_loose_object_bulk_checkin(struct odb_transaction *transaction);
-void fsync_loose_object_bulk_checkin(struct odb_transaction *transaction,
- int fd, const char *filename);
-
-/*
- * This writes the specified object to a packfile. Objects written here
- * during the same transaction are written to the same packfile. The
- * packfile is not flushed until the transaction is flushed. The caller
- * is expected to ensure a valid transaction is setup for objects to be
- * recorded to.
- *
- * This also bypasses the usual "convert-to-git" dance, and that is on
- * purpose. We could write a streaming version of the converting
- * functions and insert that before feeding the data to fast-import
- * (or equivalent in-core API described above). However, that is
- * somewhat complicated, as we do not know the size of the filter
- * result, which we need to know beforehand when writing a git object.
- * Since the primary motivation for trying to stream from the working
- * tree file and to avoid mmaping it in core is to deal with large
- * binary blobs, they generally do not want to get any conversion, and
- * callers should avoid this code path when filters are requested.
- */
-int index_blob_bulk_checkin(struct odb_transaction *transaction,
- struct object_id *oid, int fd, size_t size,
- const char *path, unsigned flags);
-
-/*
- * Tell the object database to optimize for adding
- * multiple objects. end_odb_transaction must be called
- * to make new objects visible. Transactions can be nested,
- * and objects are only visible after the outermost transaction
- * is complete or the transaction is flushed.
- */
-struct odb_transaction *begin_odb_transaction(struct object_database *odb);
-
-/*
- * Make any objects that are currently part of a pending object
- * database transaction visible. It is valid to call this function
- * even if no transaction is active.
- */
-void flush_odb_transaction(struct odb_transaction *transaction);
-
-/*
- * Tell the object database to make any objects from the
- * current transaction visible if this is the final nested
- * transaction.
- */
-void end_odb_transaction(struct odb_transaction *transaction);
-
-#endif
diff --git a/cache-tree.c b/cache-tree.c
index d225554eed..2aba47060e 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -8,7 +8,6 @@
#include "tree.h"
#include "tree-walk.h"
#include "cache-tree.h"
-#include "bulk-checkin.h"
#include "object-file.h"
#include "odb.h"
#include "read-cache-ll.h"
@@ -490,10 +489,10 @@ int cache_tree_update(struct index_state *istate, int flags)
trace_performance_enter();
trace2_region_enter("cache_tree", "update", the_repository);
- transaction = begin_odb_transaction(the_repository->objects);
+ transaction = odb_transaction_begin(the_repository->objects);
i = update_one(istate->cache_tree, istate->cache, istate->cache_nr,
"", 0, &skip, flags);
- end_odb_transaction(transaction);
+ odb_transaction_commit(transaction);
trace2_region_leave("cache_tree", "update", the_repository);
trace_performance_leave("cache_tree_update");
if (i < 0)
diff --git a/ci/install-dependencies.sh b/ci/install-dependencies.sh
index d061a47293..a8dcd9b9bc 100755
--- a/ci/install-dependencies.sh
+++ b/ci/install-dependencies.sh
@@ -30,8 +30,12 @@ alpine-*)
bash cvs gnupg perl-cgi perl-dbd-sqlite perl-io-tty >/dev/null
;;
fedora-*|almalinux-*)
+ case "$jobname" in
+ *-meson)
+ MESON_DEPS="meson ninja";;
+ esac
dnf -yq update >/dev/null &&
- dnf -yq install shadow-utils sudo make gcc findutils diffutils perl python3 gawk gettext zlib-devel expat-devel openssl-devel curl-devel pcre2-devel >/dev/null
+ dnf -yq install shadow-utils sudo make pkg-config gcc findutils diffutils perl python3 gawk gettext zlib-devel expat-devel openssl-devel curl-devel pcre2-devel $MESON_DEPS cargo >/dev/null
;;
ubuntu-*|i386/ubuntu-*|debian-*)
# Required so that apt doesn't wait for user input on certain packages.
@@ -58,9 +62,18 @@ ubuntu-*|i386/ubuntu-*|debian-*)
make libssl-dev libcurl4-openssl-dev libexpat-dev wget sudo default-jre \
tcl tk gettext zlib1g-dev perl-modules liberror-perl libauthen-sasl-perl \
libemail-valid-perl libio-pty-perl libio-socket-ssl-perl libnet-smtp-ssl-perl libdbd-sqlite3-perl libcgi-pm-perl \
- libsecret-1-dev libpcre2-dev meson ninja-build pkg-config \
+ libsecret-1-dev libpcre2-dev meson ninja-build pkg-config cargo \
${CC_PACKAGE:-${CC:-gcc}} $PYTHON_PACKAGE
+ # Starting with Ubuntu 25.10, sudo can now be provided via either
+ # sudo(1) or sudo-rs(1), with the latter being the default. The problem
+ # is that it does not support `--preserve-env` though, which we rely on
+ # in our CI. We thus revert back to the C implementation.
+ if test -f /etc/alternatives/sudo
+ then
+ sudo update-alternatives --set sudo /usr/bin/sudo.ws
+ fi
+
case "$distro" in
ubuntu-*)
mkdir --parents "$CUSTOM_PATH"
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
index a21834043f..8bda62b921 100755
--- a/ci/run-build-and-tests.sh
+++ b/ci/run-build-and-tests.sh
@@ -5,11 +5,12 @@
. ${0%/*}/lib.sh
-run_tests=t
-
case "$jobname" in
-linux-breaking-changes)
+fedora-breaking-changes-musl|linux-breaking-changes)
export WITH_BREAKING_CHANGES=YesPlease
+ export WITH_RUST=YesPlease
+ MESONFLAGS="$MESONFLAGS -Dbreaking_changes=true"
+ MESONFLAGS="$MESONFLAGS -Drust=enabled"
;;
linux-TEST-vars)
export OPENSSL_SHA1_UNSAFE=YesPlease
@@ -35,12 +36,6 @@ linux-sha256)
linux-reftable|linux-reftable-leaks|osx-reftable)
export GIT_TEST_DEFAULT_REF_FORMAT=reftable
;;
-pedantic)
- # Don't run the tests; we only care about whether Git can be
- # built.
- export DEVOPTS=pedantic
- run_tests=
- ;;
esac
case "$jobname" in
@@ -53,21 +48,15 @@ case "$jobname" in
-Dtest_output_directory="${TEST_OUTPUT_DIRECTORY:-$(pwd)/t}" \
$MESONFLAGS
group "Build" meson compile -C build --
- if test -n "$run_tests"
- then
- group "Run tests" meson test -C build --print-errorlogs --test-args="$GIT_TEST_OPTS" || (
- ./t/aggregate-results.sh "${TEST_OUTPUT_DIRECTORY:-t}/test-results"
- handle_failed_tests
- )
- fi
+ group "Run tests" meson test -C build --print-errorlogs --test-args="$GIT_TEST_OPTS" || (
+ ./t/aggregate-results.sh "${TEST_OUTPUT_DIRECTORY:-t}/test-results"
+ handle_failed_tests
+ )
;;
*)
group Build make
- if test -n "$run_tests"
- then
- group "Run tests" make test ||
- handle_failed_tests
- fi
+ group "Run tests" make test ||
+ handle_failed_tests
;;
esac
diff --git a/ci/test-documentation.sh b/ci/test-documentation.sh
index 49f87f50fd..5e4fd8fbd7 100755
--- a/ci/test-documentation.sh
+++ b/ci/test-documentation.sh
@@ -48,13 +48,13 @@ check_unignored_build_artifacts
# Build docs with Meson and AsciiDoc
meson setup build-asciidoc -Ddocs=html,man -Ddocs_backend=asciidoc
-meson compile -C build-asciidoc
+meson compile -C build-asciidoc docs
check_docs build-asciidoc AsciiDoc
rm -rf build-asciidoc
# Build docs with Meson and AsciiDoctor
meson setup build-asciidoctor -Ddocs=html,man -Ddocs_backend=asciidoctor
-meson compile -C build-asciidoctor
+meson compile -C build-asciidoctor docs
check_docs build-asciidoctor Asciidoctor
rm -rf build-asciidoctor
diff --git a/commit-graph.c b/commit-graph.c
index 2f20f66cfd..474454db73 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -735,7 +735,7 @@ struct commit_graph *read_commit_graph_one(struct odb_source *source)
* On the first invocation, this function attempts to load the commit
* graph if the repository is configured to have one.
*/
-static int prepare_commit_graph(struct repository *r)
+static struct commit_graph *prepare_commit_graph(struct repository *r)
{
struct odb_source *source;
@@ -747,10 +747,10 @@ static int prepare_commit_graph(struct repository *r)
* we want to disable even an already-loaded graph file.
*/
if (!r->gitdir || r->commit_graph_disabled)
- return 0;
+ return NULL;
if (r->objects->commit_graph_attempted)
- return !!r->objects->commit_graph;
+ return r->objects->commit_graph;
r->objects->commit_graph_attempted = 1;
prepare_repo_settings(r);
@@ -763,10 +763,10 @@ static int prepare_commit_graph(struct repository *r)
* so that commit graph loading is not attempted again for this
* repository.)
*/
- return 0;
+ return NULL;
if (!commit_graph_compatible(r))
- return 0;
+ return NULL;
odb_prepare_alternates(r->objects);
for (source = r->objects->sources; source; source = source->next) {
@@ -775,20 +775,17 @@ static int prepare_commit_graph(struct repository *r)
break;
}
- return !!r->objects->commit_graph;
+ return r->objects->commit_graph;
}
int generation_numbers_enabled(struct repository *r)
{
uint32_t first_generation;
struct commit_graph *g;
- if (!prepare_commit_graph(r))
- return 0;
-
- g = r->objects->commit_graph;
- if (!g->num_commits)
- return 0;
+ g = prepare_commit_graph(r);
+ if (!g || !g->num_commits)
+ return 0;
first_generation = get_be32(g->chunk_commit_data +
g->hash_algo->rawsz + 8) >> 2;
@@ -799,12 +796,9 @@ int generation_numbers_enabled(struct repository *r)
int corrected_commit_dates_enabled(struct repository *r)
{
struct commit_graph *g;
- if (!prepare_commit_graph(r))
- return 0;
-
- g = r->objects->commit_graph;
- if (!g->num_commits)
+ g = prepare_commit_graph(r);
+ if (!g || !g->num_commits)
return 0;
return g->read_generation_data;
@@ -1014,26 +1008,32 @@ static int find_commit_pos_in_graph(struct commit *item, struct commit_graph *g,
}
}
-int repo_find_commit_pos_in_graph(struct repository *r, struct commit *c,
- uint32_t *pos)
+struct commit_graph *repo_find_commit_pos_in_graph(struct repository *r,
+ struct commit *c,
+ uint32_t *pos)
{
- if (!prepare_commit_graph(r))
- return 0;
- return find_commit_pos_in_graph(c, r->objects->commit_graph, pos);
+ struct commit_graph *g = prepare_commit_graph(r);
+ if (!g)
+ return NULL;
+ if (!find_commit_pos_in_graph(c, g, pos))
+ return NULL;
+ return g;
}
struct commit *lookup_commit_in_graph(struct repository *repo, const struct object_id *id)
{
static int commit_graph_paranoia = -1;
+ struct commit_graph *g;
struct commit *commit;
uint32_t pos;
if (commit_graph_paranoia == -1)
commit_graph_paranoia = git_env_bool(GIT_COMMIT_GRAPH_PARANOIA, 0);
- if (!prepare_commit_graph(repo))
+ g = prepare_commit_graph(repo);
+ if (!g)
return NULL;
- if (!search_commit_pos_in_graph(id, repo->objects->commit_graph, &pos))
+ if (!search_commit_pos_in_graph(id, g, &pos))
return NULL;
if (commit_graph_paranoia && !odb_has_object(repo->objects, id, 0))
return NULL;
@@ -1044,7 +1044,7 @@ struct commit *lookup_commit_in_graph(struct repository *repo, const struct obje
if (commit->object.parsed)
return commit;
- if (!fill_commit_in_graph(commit, repo->objects->commit_graph, pos))
+ if (!fill_commit_in_graph(commit, g, pos))
return NULL;
return commit;
@@ -1067,6 +1067,7 @@ static int parse_commit_in_graph_one(struct commit_graph *g,
int parse_commit_in_graph(struct repository *r, struct commit *item)
{
static int checked_env = 0;
+ struct commit_graph *g;
if (!checked_env &&
git_env_bool(GIT_TEST_COMMIT_GRAPH_DIE_ON_PARSE, 0))
@@ -1074,16 +1075,20 @@ int parse_commit_in_graph(struct repository *r, struct commit *item)
GIT_TEST_COMMIT_GRAPH_DIE_ON_PARSE);
checked_env = 1;
- if (!prepare_commit_graph(r))
+ g = prepare_commit_graph(r);
+ if (!g)
return 0;
- return parse_commit_in_graph_one(r->objects->commit_graph, item);
+ return parse_commit_in_graph_one(g, item);
}
void load_commit_graph_info(struct repository *r, struct commit *item)
{
+ struct commit_graph *g;
uint32_t pos;
- if (repo_find_commit_pos_in_graph(r, item, &pos))
- fill_commit_graph_info(item, r->objects->commit_graph, pos);
+
+ g = repo_find_commit_pos_in_graph(r, item, &pos);
+ if (g)
+ fill_commit_graph_info(item, g, pos);
}
static struct tree *load_tree_for_commit(struct commit_graph *g,
@@ -2226,7 +2231,8 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
return 0;
}
-static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
+static void split_graph_merge_strategy(struct write_commit_graph_context *ctx,
+ struct commit_graph *graph_to_merge)
{
struct commit_graph *g;
uint32_t num_commits;
@@ -2245,7 +2251,7 @@ static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
flags = ctx->opts->split_flags;
}
- g = ctx->r->objects->commit_graph;
+ g = graph_to_merge;
num_commits = ctx->commits.nr;
if (flags == COMMIT_GRAPH_SPLIT_REPLACE)
ctx->num_commit_graphs_after = 1;
@@ -2297,7 +2303,7 @@ static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
ctx->commit_graph_filenames_after[i] = xstrdup(ctx->commit_graph_filenames_before[i]);
i = ctx->num_commit_graphs_before - 1;
- g = ctx->r->objects->commit_graph;
+ g = graph_to_merge;
while (g) {
if (i < ctx->num_commit_graphs_after)
@@ -2395,9 +2401,9 @@ static void sort_and_scan_merged_commits(struct write_commit_graph_context *ctx)
stop_progress(&ctx->progress);
}
-static void merge_commit_graphs(struct write_commit_graph_context *ctx)
+static void merge_commit_graphs(struct write_commit_graph_context *ctx,
+ struct commit_graph *g)
{
- struct commit_graph *g = ctx->r->objects->commit_graph;
uint32_t current_graph_number = ctx->num_commit_graphs_before;
while (g && current_graph_number >= ctx->num_commit_graphs_after) {
@@ -2524,6 +2530,7 @@ int write_commit_graph(struct odb_source *source,
int replace = 0;
struct bloom_filter_settings bloom_settings = DEFAULT_BLOOM_FILTER_SETTINGS;
struct topo_level_slab topo_levels;
+ struct commit_graph *g;
prepare_repo_settings(r);
if (!r->settings.core_commit_graph) {
@@ -2552,23 +2559,13 @@ int write_commit_graph(struct odb_source *source,
init_topo_level_slab(&topo_levels);
ctx.topo_levels = &topo_levels;
- prepare_commit_graph(ctx.r);
- if (ctx.r->objects->commit_graph) {
- struct commit_graph *g = ctx.r->objects->commit_graph;
-
- while (g) {
- g->topo_levels = &topo_levels;
- g = g->base_graph;
- }
- }
+ g = prepare_commit_graph(ctx.r);
+ for (struct commit_graph *chain = g; chain; chain = chain->base_graph)
+ g->topo_levels = &topo_levels;
if (flags & COMMIT_GRAPH_WRITE_BLOOM_FILTERS)
ctx.changed_paths = 1;
if (!(flags & COMMIT_GRAPH_NO_WRITE_BLOOM_FILTERS)) {
- struct commit_graph *g;
-
- g = ctx.r->objects->commit_graph;
-
/* We have changed-paths already. Keep them in the next graph */
if (g && g->bloom_filter_settings) {
ctx.changed_paths = 1;
@@ -2585,22 +2582,15 @@ int write_commit_graph(struct odb_source *source,
bloom_settings.hash_version = bloom_settings.hash_version == 2 ? 2 : 1;
if (ctx.split) {
- struct commit_graph *g = ctx.r->objects->commit_graph;
-
- while (g) {
+ for (struct commit_graph *chain = g; chain; chain = chain->base_graph)
ctx.num_commit_graphs_before++;
- g = g->base_graph;
- }
if (ctx.num_commit_graphs_before) {
ALLOC_ARRAY(ctx.commit_graph_filenames_before, ctx.num_commit_graphs_before);
i = ctx.num_commit_graphs_before;
- g = ctx.r->objects->commit_graph;
- while (g) {
- ctx.commit_graph_filenames_before[--i] = xstrdup(g->filename);
- g = g->base_graph;
- }
+ for (struct commit_graph *chain = g; chain; chain = chain->base_graph)
+ ctx.commit_graph_filenames_before[--i] = xstrdup(chain->filename);
}
if (ctx.opts)
@@ -2609,8 +2599,7 @@ int write_commit_graph(struct odb_source *source,
ctx.approx_nr_objects = repo_approximate_object_count(r);
- if (ctx.append && ctx.r->objects->commit_graph) {
- struct commit_graph *g = ctx.r->objects->commit_graph;
+ if (ctx.append && g) {
for (i = 0; i < g->num_commits; i++) {
struct object_id oid;
oidread(&oid, g->chunk_oid_lookup + st_mult(g->hash_algo->rawsz, i),
@@ -2649,14 +2638,15 @@ int write_commit_graph(struct odb_source *source,
goto cleanup;
if (ctx.split) {
- split_graph_merge_strategy(&ctx);
+ split_graph_merge_strategy(&ctx, g);
if (!replace)
- merge_commit_graphs(&ctx);
- } else
+ merge_commit_graphs(&ctx, g);
+ } else {
ctx.num_commit_graphs_after = 1;
+ }
- ctx.trust_generation_numbers = validate_mixed_generation_chain(ctx.r->objects->commit_graph);
+ ctx.trust_generation_numbers = validate_mixed_generation_chain(g);
compute_topological_levels(&ctx);
if (ctx.write_generation_data)
diff --git a/commit-graph.h b/commit-graph.h
index 4899b54ef8..f6a5433641 100644
--- a/commit-graph.h
+++ b/commit-graph.h
@@ -48,10 +48,9 @@ int open_commit_graph_chain(const char *chain_file, int *fd, struct stat *st,
int parse_commit_in_graph(struct repository *r, struct commit *item);
/*
- * Fills `*pos` with the graph position of `c`, and returns 1 if `c` is
- * found in the commit-graph belonging to `r`, or 0 otherwise.
- * Initializes the commit-graph belonging to `r` if it hasn't been
- * already.
+ * Fills `*pos` with the graph position of `c`, and returns the graph `c` is
+ * found in, or NULL otherwise. Initializes the commit-graphs belonging to
+ * `r` if it hasn't been already.
*
* Note: this is a low-level helper that does not alter any slab data
* associated with `c`. Useful in circumstances where the slab data is
@@ -59,8 +58,9 @@ int parse_commit_in_graph(struct repository *r, struct commit *item);
*
* In most cases, callers should use `parse_commit_in_graph()` instead.
*/
-int repo_find_commit_pos_in_graph(struct repository *r, struct commit *c,
- uint32_t *pos);
+struct commit_graph *repo_find_commit_pos_in_graph(struct repository *r,
+ struct commit *c,
+ uint32_t *pos);
/*
* Look up the given commit ID in the commit-graph. This will only return a
diff --git a/config.c b/config.c
index 74bf76a97e..71b136bf7f 100644
--- a/config.c
+++ b/config.c
@@ -1278,11 +1278,23 @@ int git_config_string(char **dest, const char *var, const char *value)
int git_config_pathname(char **dest, const char *var, const char *value)
{
+ int is_optional;
+ char *path;
+
if (!value)
return config_error_nonbool(var);
- *dest = interpolate_path(value, 0);
- if (!*dest)
+
+ is_optional = skip_prefix(value, ":(optional)", &value);
+ path = interpolate_path(value, 0);
+ if (!path)
die(_("failed to expand user dir in: '%s'"), value);
+
+ if (is_optional && is_missing_file(path)) {
+ free(path);
+ return 0;
+ }
+
+ *dest = path;
return 0;
}
diff --git a/connected.c b/connected.c
index 18c13245d8..b288a18b17 100644
--- a/connected.c
+++ b/connected.c
@@ -72,11 +72,12 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
* Before checking for promisor packs, be sure we have the
* latest pack-files loaded into memory.
*/
- reprepare_packed_git(the_repository);
+ odb_reprepare(the_repository->objects);
do {
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *p;
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (!p->pack_promisor)
continue;
if (find_pack_entry_one(oid, p))
diff --git a/contrib/contacts/meson.build b/contrib/contacts/meson.build
index 73d82dfe52..c8fdb35ed9 100644
--- a/contrib/contacts/meson.build
+++ b/contrib/contacts/meson.build
@@ -20,7 +20,7 @@ if get_option('docs').contains('man')
output: 'git-contacts.xml',
)
- custom_target(
+ doc_targets += custom_target(
command: [
xmlto,
'-m', '@INPUT@',
@@ -39,7 +39,7 @@ if get_option('docs').contains('man')
endif
if get_option('docs').contains('html')
- custom_target(
+ doc_targets += custom_target(
command: asciidoc_common_options + [
'--backend=' + asciidoc_html,
'--doctype=manpage',
diff --git a/contrib/subtree/meson.build b/contrib/subtree/meson.build
index 98dd8e0c8e..46cdbcc30c 100644
--- a/contrib/subtree/meson.build
+++ b/contrib/subtree/meson.build
@@ -38,7 +38,7 @@ if get_option('docs').contains('man')
output: 'git-subtree.xml',
)
- custom_target(
+ doc_targets += custom_target(
command: [
xmlto,
'-m', '@INPUT@',
@@ -57,7 +57,7 @@ if get_option('docs').contains('man')
endif
if get_option('docs').contains('html')
- custom_target(
+ doc_targets += custom_target(
command: asciidoc_common_options + [
'--backend=' + asciidoc_html,
'--doctype=manpage',
diff --git a/dir.c b/dir.c
index 71108ac79b..0a67a99cb3 100644
--- a/dir.c
+++ b/dir.c
@@ -3579,7 +3579,8 @@ static void write_one_dir(struct untracked_cache_dir *untracked,
struct stat_data stat_data;
struct strbuf *out = &wd->out;
unsigned char intbuf[16];
- unsigned int intlen, value;
+ unsigned int value;
+ uint8_t intlen;
int i = wd->index++;
/*
@@ -3632,7 +3633,7 @@ void write_untracked_extension(struct strbuf *out, struct untracked_cache *untra
struct ondisk_untracked_cache *ouc;
struct write_data wd;
unsigned char varbuf[16];
- int varint_len;
+ uint8_t varint_len;
const unsigned hashsz = the_hash_algo->rawsz;
CALLOC_ARRAY(ouc, 1);
@@ -3738,7 +3739,7 @@ static int read_one_dir(struct untracked_cache_dir **untracked_,
struct untracked_cache_dir ud, *untracked;
const unsigned char *data = rd->data, *end = rd->end;
const unsigned char *eos;
- unsigned int value;
+ uint64_t value;
int i;
memset(&ud, 0, sizeof(ud));
@@ -3830,7 +3831,8 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
struct read_data rd;
const unsigned char *next = data, *end = (const unsigned char *)data + sz;
const char *ident;
- int ident_len;
+ uint64_t ident_len;
+ uint64_t varint_len;
ssize_t len;
const char *exclude_per_dir;
const unsigned hashsz = the_hash_algo->rawsz;
@@ -3867,8 +3869,8 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
if (next >= end)
goto done2;
- len = decode_varint(&next);
- if (next > end || len == 0)
+ varint_len = decode_varint(&next);
+ if (next > end || varint_len == 0)
goto done2;
rd.valid = ewah_new();
@@ -3877,9 +3879,9 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
rd.data = next;
rd.end = end;
rd.index = 0;
- ALLOC_ARRAY(rd.ucd, len);
+ ALLOC_ARRAY(rd.ucd, varint_len);
- if (read_one_dir(&uc->root, &rd) || rd.index != len)
+ if (read_one_dir(&uc->root, &rd) || rd.index != varint_len)
goto done;
next = rd.data;
diff --git a/fetch-pack.c b/fetch-pack.c
index 6ed5662951..fe7a84bf2f 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -1983,7 +1983,7 @@ static void update_shallow(struct fetch_pack_args *args,
* remote is shallow, but this is a clone, there are
* no objects in repo to worry about. Accept any
* shallow points that exist in the pack (iow in repo
- * after get_pack() and reprepare_packed_git())
+ * after get_pack() and odb_reprepare())
*/
struct oid_array extra = OID_ARRAY_INIT;
struct object_id *oid = si->shallow->oid;
@@ -2108,7 +2108,7 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
&si, pack_lockfiles);
}
- reprepare_packed_git(the_repository);
+ odb_reprepare(the_repository->objects);
if (!args->cloning && args->deepen) {
struct check_connected_options opt = CHECK_CONNECTED_INIT;
diff --git a/fsck.h b/fsck.h
index dd7df3d5b3..759df97655 100644
--- a/fsck.h
+++ b/fsck.h
@@ -33,15 +33,27 @@ enum fsck_msg_type {
FUNC(BAD_PACKED_REF_ENTRY, ERROR) \
FUNC(BAD_PACKED_REF_HEADER, ERROR) \
FUNC(BAD_PARENT_SHA1, ERROR) \
+ FUNC(BAD_REFERENT_NAME, ERROR) \
FUNC(BAD_REF_CONTENT, ERROR) \
FUNC(BAD_REF_FILETYPE, ERROR) \
FUNC(BAD_REF_NAME, ERROR) \
- FUNC(BAD_REFERENT_NAME, ERROR) \
FUNC(BAD_TIMEZONE, ERROR) \
FUNC(BAD_TREE, ERROR) \
FUNC(BAD_TREE_SHA1, ERROR) \
FUNC(BAD_TYPE, ERROR) \
FUNC(DUPLICATE_ENTRIES, ERROR) \
+ FUNC(GITATTRIBUTES_BLOB, ERROR) \
+ FUNC(GITATTRIBUTES_LARGE, ERROR) \
+ FUNC(GITATTRIBUTES_LINE_LENGTH, ERROR) \
+ FUNC(GITATTRIBUTES_MISSING, ERROR) \
+ FUNC(GITMODULES_BLOB, ERROR) \
+ FUNC(GITMODULES_LARGE, ERROR) \
+ FUNC(GITMODULES_MISSING, ERROR) \
+ FUNC(GITMODULES_NAME, ERROR) \
+ FUNC(GITMODULES_PATH, ERROR) \
+ FUNC(GITMODULES_SYMLINK, ERROR) \
+ FUNC(GITMODULES_UPDATE, ERROR) \
+ FUNC(GITMODULES_URL, ERROR) \
FUNC(MISSING_AUTHOR, ERROR) \
FUNC(MISSING_COMMITTER, ERROR) \
FUNC(MISSING_EMAIL, ERROR) \
@@ -60,39 +72,28 @@ enum fsck_msg_type {
FUNC(TREE_NOT_SORTED, ERROR) \
FUNC(UNKNOWN_TYPE, ERROR) \
FUNC(ZERO_PADDED_DATE, ERROR) \
- FUNC(GITMODULES_MISSING, ERROR) \
- FUNC(GITMODULES_BLOB, ERROR) \
- FUNC(GITMODULES_LARGE, ERROR) \
- FUNC(GITMODULES_NAME, ERROR) \
- FUNC(GITMODULES_SYMLINK, ERROR) \
- FUNC(GITMODULES_URL, ERROR) \
- FUNC(GITMODULES_PATH, ERROR) \
- FUNC(GITMODULES_UPDATE, ERROR) \
- FUNC(GITATTRIBUTES_MISSING, ERROR) \
- FUNC(GITATTRIBUTES_LARGE, ERROR) \
- FUNC(GITATTRIBUTES_LINE_LENGTH, ERROR) \
- FUNC(GITATTRIBUTES_BLOB, ERROR) \
/* warnings */ \
+ FUNC(BAD_REFTABLE_TABLE_NAME, WARN) \
FUNC(EMPTY_NAME, WARN) \
FUNC(FULL_PATHNAME, WARN) \
FUNC(HAS_DOT, WARN) \
FUNC(HAS_DOTDOT, WARN) \
FUNC(HAS_DOTGIT, WARN) \
+ FUNC(LARGE_PATHNAME, WARN) \
FUNC(NULL_SHA1, WARN) \
- FUNC(ZERO_PADDED_FILEMODE, WARN) \
FUNC(NUL_IN_COMMIT, WARN) \
- FUNC(LARGE_PATHNAME, WARN) \
+ FUNC(ZERO_PADDED_FILEMODE, WARN) \
/* infos (reported as warnings, but ignored by default) */ \
FUNC(BAD_FILEMODE, INFO) \
+ FUNC(BAD_TAG_NAME, INFO) \
FUNC(EMPTY_PACKED_REFS_FILE, INFO) \
- FUNC(GITMODULES_PARSE, INFO) \
- FUNC(GITIGNORE_SYMLINK, INFO) \
FUNC(GITATTRIBUTES_SYMLINK, INFO) \
+ FUNC(GITIGNORE_SYMLINK, INFO) \
+ FUNC(GITMODULES_PARSE, INFO) \
FUNC(MAILMAP_SYMLINK, INFO) \
- FUNC(BAD_TAG_NAME, INFO) \
FUNC(MISSING_TAGGER_ENTRY, INFO) \
- FUNC(SYMLINK_REF, INFO) \
FUNC(REF_MISSING_NEWLINE, INFO) \
+ FUNC(SYMLINK_REF, INFO) \
FUNC(SYMREF_TARGET_IS_NOT_A_REF, INFO) \
FUNC(TRAILING_REF_CONTENT, INFO) \
/* ignored (elevated when requested) */ \
diff --git a/git-compat-util.h b/git-compat-util.h
index 9408f463e3..398e0fac4f 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -460,7 +460,7 @@ void warning_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
void show_usage_if_asked(int ac, const char **av, const char *err);
-NORETURN void you_still_use_that(const char *command_name);
+NORETURN void you_still_use_that(const char *command_name, const char *hint);
#ifndef NO_OPENSSL
#ifdef APPLE_COMMON_CRYPTO
diff --git a/git.c b/git.c
index d020eef021..c5fad56813 100644
--- a/git.c
+++ b/git.c
@@ -28,6 +28,7 @@
#define NEED_WORK_TREE (1<<3)
#define DELAY_PAGER_CONFIG (1<<4)
#define NO_PARSEOPT (1<<5) /* parse-options is not used */
+#define DEPRECATED (1<<6)
struct cmd_struct {
const char *cmd;
@@ -51,7 +52,9 @@ const char git_more_info_string[] =
static int use_pager = -1;
-static void list_builtins(struct string_list *list, unsigned int exclude_option);
+static void list_builtins(struct string_list *list,
+ unsigned int include_option,
+ unsigned int exclude_option);
static void exclude_helpers_from_list(struct string_list *list)
{
@@ -88,7 +91,7 @@ static int list_cmds(const char *spec)
int len = sep - spec;
if (match_token(spec, len, "builtins"))
- list_builtins(&list, 0);
+ list_builtins(&list, 0, 0);
else if (match_token(spec, len, "main"))
list_all_main_cmds(&list);
else if (match_token(spec, len, "others"))
@@ -99,6 +102,8 @@ static int list_cmds(const char *spec)
list_aliases(&list);
else if (match_token(spec, len, "config"))
list_cmds_by_config(&list);
+ else if (match_token(spec, len, "deprecated"))
+ list_builtins(&list, DEPRECATED, 0);
else if (len > 5 && !strncmp(spec, "list-", 5)) {
struct strbuf sb = STRBUF_INIT;
@@ -322,7 +327,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
if (!strcmp(cmd, "parseopt")) {
struct string_list list = STRING_LIST_INIT_DUP;
- list_builtins(&list, NO_PARSEOPT);
+ list_builtins(&list, 0, NO_PARSEOPT);
for (size_t i = 0; i < list.nr; i++)
printf("%s ", list.items[i].string);
string_list_clear(&list, 0);
@@ -360,7 +365,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
return (*argv) - orig_argv;
}
-static int handle_alias(struct strvec *args)
+static int handle_alias(struct strvec *args, struct string_list *expanded_aliases)
{
int envchanged = 0, ret = 0, saved_errno = errno;
int count, option_count;
@@ -371,6 +376,8 @@ static int handle_alias(struct strvec *args)
alias_command = args->v[0];
alias_string = alias_lookup(alias_command);
if (alias_string) {
+ struct string_list_item *seen;
+
if (args->nr == 2 && !strcmp(args->v[1], "-h"))
fprintf_ln(stderr, _("'%s' is aliased to '%s'"),
alias_command, alias_string);
@@ -418,6 +425,25 @@ static int handle_alias(struct strvec *args)
if (!strcmp(alias_command, new_argv[0]))
die(_("recursive alias: %s"), alias_command);
+ string_list_append(expanded_aliases, alias_command);
+ seen = unsorted_string_list_lookup(expanded_aliases,
+ new_argv[0]);
+
+ if (seen) {
+ struct strbuf sb = STRBUF_INIT;
+ for (size_t i = 0; i < expanded_aliases->nr; i++) {
+ struct string_list_item *item = &expanded_aliases->items[i];
+
+ strbuf_addf(&sb, "\n %s", item->string);
+ if (item == seen)
+ strbuf_addstr(&sb, " <==");
+ else if (i == expanded_aliases->nr - 1)
+ strbuf_addstr(&sb, " ==>");
+ }
+ die(_("alias loop detected: expansion of '%s' does"
+ " not terminate:%s"), expanded_aliases->items[0].string, sb.buf);
+ }
+
trace_argv_printf(new_argv,
"trace: alias expansion: %s =>",
alias_command);
@@ -591,7 +617,7 @@ static struct cmd_struct commands[] = {
{ "notes", cmd_notes, RUN_SETUP },
{ "pack-objects", cmd_pack_objects, RUN_SETUP },
#ifndef WITH_BREAKING_CHANGES
- { "pack-redundant", cmd_pack_redundant, RUN_SETUP | NO_PARSEOPT },
+ { "pack-redundant", cmd_pack_redundant, RUN_SETUP | NO_PARSEOPT | DEPRECATED },
#endif
{ "pack-refs", cmd_pack_refs, RUN_SETUP },
{ "patch-id", cmd_patch_id, RUN_SETUP_GENTLY | NO_PARSEOPT },
@@ -649,7 +675,7 @@ static struct cmd_struct commands[] = {
{ "verify-tag", cmd_verify_tag, RUN_SETUP },
{ "version", cmd_version },
#ifndef WITH_BREAKING_CHANGES
- { "whatchanged", cmd_whatchanged, RUN_SETUP },
+ { "whatchanged", cmd_whatchanged, RUN_SETUP | DEPRECATED },
#endif
{ "worktree", cmd_worktree, RUN_SETUP },
{ "write-tree", cmd_write_tree, RUN_SETUP },
@@ -670,11 +696,16 @@ int is_builtin(const char *s)
return !!get_builtin(s);
}
-static void list_builtins(struct string_list *out, unsigned int exclude_option)
+static void list_builtins(struct string_list *out,
+ unsigned int include_option,
+ unsigned int exclude_option)
{
+ if (include_option && exclude_option)
+ BUG("'include_option' and 'exclude_option' are mutually exclusive");
for (size_t i = 0; i < ARRAY_SIZE(commands); i++) {
- if (exclude_option &&
- (commands[i].option & exclude_option))
+ if (include_option && !(commands[i].option & include_option))
+ continue;
+ if (exclude_option && (commands[i].option & exclude_option))
continue;
string_list_append(out, commands[i].cmd);
}
@@ -795,14 +826,30 @@ static void execv_dashed_external(const char **argv)
exit(128);
}
+static int is_deprecated_command(const char *cmd)
+{
+ struct cmd_struct *builtin = get_builtin(cmd);
+ return builtin && (builtin->option & DEPRECATED);
+}
+
static int run_argv(struct strvec *args)
{
int done_alias = 0;
- struct string_list cmd_list = STRING_LIST_INIT_DUP;
- struct string_list_item *seen;
+ struct string_list expanded_aliases = STRING_LIST_INIT_DUP;
while (1) {
/*
+ * Allow deprecated commands to be overridden by aliases. This
+ * creates a seamless path forward for people who want to keep
+ * using the name after it is gone, but want to skip the
+ * deprecation complaint in the meantime.
+ */
+ if (is_deprecated_command(args->v[0]) &&
+ handle_alias(args, &expanded_aliases)) {
+ done_alias = 1;
+ continue;
+ }
+ /*
* If we tried alias and futzed with our environment,
* it no longer is safe to invoke builtins directly in
* general. We have to spawn them as dashed externals.
@@ -851,35 +898,17 @@ static int run_argv(struct strvec *args)
/* .. then try the external ones */
execv_dashed_external(args->v);
- seen = unsorted_string_list_lookup(&cmd_list, args->v[0]);
- if (seen) {
- struct strbuf sb = STRBUF_INIT;
- for (size_t i = 0; i < cmd_list.nr; i++) {
- struct string_list_item *item = &cmd_list.items[i];
-
- strbuf_addf(&sb, "\n %s", item->string);
- if (item == seen)
- strbuf_addstr(&sb, " <==");
- else if (i == cmd_list.nr - 1)
- strbuf_addstr(&sb, " ==>");
- }
- die(_("alias loop detected: expansion of '%s' does"
- " not terminate:%s"), cmd_list.items[0].string, sb.buf);
- }
-
- string_list_append(&cmd_list, args->v[0]);
-
/*
* It could be an alias -- this works around the insanity
* of overriding "git log" with "git show" by having
* alias.log = show
*/
- if (!handle_alias(args))
+ if (!handle_alias(args, &expanded_aliases))
break;
done_alias = 1;
}
- string_list_clear(&cmd_list, 0);
+ string_list_clear(&expanded_aliases, 0);
return done_alias;
}
diff --git a/gitk-git/gitk b/gitk-git/gitk
index 6e4d71d585..c02db0194d 100755
--- a/gitk-git/gitk
+++ b/gitk-git/gitk
@@ -2215,6 +2215,7 @@ proc setoptions {} {
}
proc setttkstyle {} {
+ global theme
eval font configure TkDefaultFont [fontflags mainfont]
eval font configure TkTextFont [fontflags textfont]
eval font configure TkHeadingFont [fontflags mainfont]
@@ -2224,6 +2225,10 @@ proc setttkstyle {} {
eval font configure TkIconFont [fontflags uifont]
eval font configure TkMenuFont [fontflags uifont]
eval font configure TkSmallCaptionFont [fontflags uifont]
+
+ if {[catch {ttk::style theme use $theme} err]} {
+ set theme [ttk::style theme use]
+ }
}
# Make a menu and submenus.
@@ -2376,7 +2381,6 @@ proc makewindow {} {
global highlight_files gdttype
global searchstring sstring
global bgcolor fgcolor bglist fglist diffcolors diffbgcolors selectbgcolor
- global uifgcolor uifgdisabledcolor
global filesepbgcolor filesepfgcolor
global mergecolors foundbgcolor currentsearchhitbgcolor
global headctxmenu progresscanv progressitem progresscoords statusw
@@ -2495,40 +2499,18 @@ proc makewindow {} {
set sha1entry .tf.bar.sha1
set entries $sha1entry
set sha1but .tf.bar.sha1label
- button $sha1but -text "[mc "Commit ID:"] " -state disabled -relief flat \
+ ttk::button $sha1but -text "[mc "Commit ID:"] " -state disabled \
-command gotocommit -width 8
- $sha1but conf -disabledforeground [$sha1but cget -foreground]
pack .tf.bar.sha1label -side left
ttk::entry $sha1entry -width $hashlength -font textfont -textvariable sha1string
trace add variable sha1string write sha1change
pack $sha1entry -side left -pady 2
- set bm_left_data {
- #define left_width 16
- #define left_height 16
- static unsigned char left_bits[] = {
- 0x00, 0x00, 0xc0, 0x01, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x1c, 0x00,
- 0x0e, 0x00, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0x0e, 0x00, 0x1c, 0x00,
- 0x38, 0x00, 0x70, 0x00, 0xe0, 0x00, 0xc0, 0x01};
- }
- set bm_right_data {
- #define right_width 16
- #define right_height 16
- static unsigned char right_bits[] = {
- 0x00, 0x00, 0xc0, 0x01, 0x80, 0x03, 0x00, 0x07, 0x00, 0x0e, 0x00, 0x1c,
- 0x00, 0x38, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0x00, 0x38, 0x00, 0x1c,
- 0x00, 0x0e, 0x00, 0x07, 0x80, 0x03, 0xc0, 0x01};
- }
- image create bitmap bm-left -data $bm_left_data -foreground $uifgcolor
- image create bitmap bm-left-gray -data $bm_left_data -foreground $uifgdisabledcolor
- image create bitmap bm-right -data $bm_right_data -foreground $uifgcolor
- image create bitmap bm-right-gray -data $bm_right_data -foreground $uifgdisabledcolor
-
- ttk::button .tf.bar.leftbut -command goback -state disabled -width 26
- .tf.bar.leftbut configure -image [list bm-left disabled bm-left-gray]
+ ttk::button .tf.bar.leftbut -command goback -state disabled
+ .tf.bar.leftbut configure -text \u2190 -width 3
pack .tf.bar.leftbut -side left -fill y
- ttk::button .tf.bar.rightbut -command goforw -state disabled -width 26
- .tf.bar.rightbut configure -image [list bm-right disabled bm-right-gray]
+ ttk::button .tf.bar.rightbut -command goforw -state disabled
+ .tf.bar.rightbut configure -text \u2192 -width 3
pack .tf.bar.rightbut -side left -fill y
ttk::label .tf.bar.rowlabel -text [mc "Row"]
@@ -2559,31 +2541,8 @@ proc makewindow {} {
# build up the bottom bar of upper window
ttk::label .tf.lbar.flabel -text "[mc "Find"] "
- set bm_down_data {
- #define down_width 16
- #define down_height 16
- static unsigned char down_bits[] = {
- 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01,
- 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01,
- 0x87, 0xe1, 0x8e, 0x71, 0x9c, 0x39, 0xb8, 0x1d,
- 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x01};
- }
- image create bitmap bm-down -data $bm_down_data -foreground $uifgcolor
- ttk::button .tf.lbar.fnext -width 26 -command {dofind 1 1}
- .tf.lbar.fnext configure -image bm-down
-
- set bm_up_data {
- #define up_width 16
- #define up_height 16
- static unsigned char up_bits[] = {
- 0x80, 0x01, 0xc0, 0x03, 0xe0, 0x07, 0xf0, 0x0f,
- 0xb8, 0x1d, 0x9c, 0x39, 0x8e, 0x71, 0x87, 0xe1,
- 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01,
- 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01};
- }
- image create bitmap bm-up -data $bm_up_data -foreground $uifgcolor
- ttk::button .tf.lbar.fprev -width 26 -command {dofind -1 1}
- .tf.lbar.fprev configure -image bm-up
+ ttk::button .tf.lbar.fnext -command {dofind 1 1} -text \u2193 -width 3
+ ttk::button .tf.lbar.fprev -command {dofind -1 1} -text \u2191 -width 3
ttk::label .tf.lbar.flab2 -text " [mc "commit"] "
@@ -2656,7 +2615,7 @@ proc makewindow {} {
ttk::label .bleft.mid.labeldiffcontext -text " [mc "Lines of context"]: "
pack .bleft.mid.diff .bleft.mid.old .bleft.mid.new -side left -ipadx $wgap
- spinbox .bleft.mid.diffcontext -width 5 \
+ ttk::spinbox .bleft.mid.diffcontext -width 5 \
-from 0 -increment 1 -to 10000000 \
-validate all -validatecommand "diffcontextvalidate %P" \
-textvariable diffcontextstring
@@ -8910,9 +8869,9 @@ proc sha1change {n1 n2 op} {
}
if {[$sha1but cget -state] == $state} return
if {$state == "normal"} {
- $sha1but conf -state normal -relief raised -text "[mc "Goto:"] "
+ $sha1but conf -state normal -text "[mc "Goto:"] "
} else {
- $sha1but conf -state disabled -relief flat -text "[mc "Commit ID:"] "
+ $sha1but conf -state disabled -text "[mc "Commit ID:"] "
}
}
@@ -10294,7 +10253,9 @@ proc refill_reflist {} {
if {![string match "remotes/*" $n] && [string match $reflistfilter $n]} {
if {[commitinview $headids($n) $curview]} {
lappend localrefs [list $n H]
- if {[info exists upstreamofref($n)] && [commitinview $headids($upstreamofref($n)) $curview]} {
+ if {[info exists upstreamofref($n)] && \
+ [info exists headids($upstreamofref($n))] && \
+ [commitinview $headids($upstreamofref($n)) $curview]} {
lappend trackedremoterefs [list $upstreamofref($n) R]
}
} else {
@@ -11608,9 +11569,10 @@ proc mkfontdisp {font top which} {
set fontpref($font) [set $font]
ttk::button $top.${font}but -text $which \
-command [list choosefont $font $which]
- ttk::label $top.$font -relief flat -font $font \
- -text $fontattr($font,family) -justify left
+ ttk::label $top.$font -font $font \
+ -text $fontattr($font,family)
grid x $top.${font}but $top.$font -sticky w
+ grid configure $top.$font -sticky ew
}
proc centertext {w} {
@@ -11690,48 +11652,52 @@ proc prefspage_general {notebook} {
ttk::label $page.ldisp -text [mc "Commit list display options"] -font mainfontbold
grid $page.ldisp - -sticky w -pady 10
+
ttk::label $page.spacer -text " "
ttk::label $page.maxwidthl -text [mc "Maximum graph width (lines)"]
- spinbox $page.maxwidth -from 0 -to 100 -width 4 -textvariable maxwidth
+ ttk::spinbox $page.maxwidth -from 0 -to 100 -width 4 -textvariable maxwidth
grid $page.spacer $page.maxwidthl $page.maxwidth -sticky w
#xgettext:no-tcl-format
ttk::label $page.maxpctl -text [mc "Maximum graph width (% of pane)"]
- spinbox $page.maxpct -from 1 -to 100 -width 4 -textvariable maxgraphpct
+ ttk::spinbox $page.maxpct -from 1 -to 100 -width 4 -textvariable maxgraphpct
grid x $page.maxpctl $page.maxpct -sticky w
+
ttk::checkbutton $page.showlocal -text [mc "Show local changes"] \
-variable showlocalchanges
grid x $page.showlocal -sticky w
+
ttk::checkbutton $page.hideremotes -text [mc "Hide remote refs"] \
-variable hideremotes
grid x $page.hideremotes -sticky w
ttk::entry $page.refstohide -textvariable refstohide
- ttk::frame $page.refstohidef
- ttk::label $page.refstohidef.l -text [mc "Refs to hide (space-separated globs)" ]
- pack $page.refstohidef.l -side left
- pack configure $page.refstohidef.l -padx 10
- grid x $page.refstohidef $page.refstohide -sticky ew
+ ttk::label $page.refstohidel -text [mc "Refs to hide (space-separated globs)"]
+ grid x $page.refstohidel $page.refstohide -sticky ew
+ grid configure $page.refstohide -padx {0 5}
ttk::checkbutton $page.autocopy -text [mc "Copy commit ID to clipboard"] \
-variable autocopy
grid x $page.autocopy -sticky w
+
if {[haveselectionclipboard]} {
ttk::checkbutton $page.autoselect -text [mc "Copy commit ID to X11 selection"] \
-variable autoselect
grid x $page.autoselect -sticky w
}
- spinbox $page.autosellen -from 1 -to $hashlength -width 4 -textvariable autosellen
+ ttk::spinbox $page.autosellen -from 1 -to $hashlength -width 4 -textvariable autosellen
ttk::label $page.autosellenl -text [mc "Length of commit ID to copy"]
grid x $page.autosellenl $page.autosellen -sticky w
+
ttk::label $page.kscroll1 -text [mc "Wheel scrolling multiplier"]
- spinbox $page.kscroll -from 1 -to 20 -width 4 -textvariable kscroll
+ ttk::spinbox $page.kscroll -from 1 -to 20 -width 4 -textvariable kscroll
grid x $page.kscroll1 $page.kscroll -sticky w
ttk::label $page.ddisp -text [mc "Diff display options"] -font mainfontbold
grid $page.ddisp - -sticky w -pady 10
+
ttk::label $page.tabstopl -text [mc "Tab spacing"]
- spinbox $page.tabstop -from 1 -to 20 -width 4 -textvariable tabstop
+ ttk::spinbox $page.tabstop -from 1 -to 20 -width 4 -textvariable tabstop
grid x $page.tabstopl $page.tabstop -sticky w
ttk::label $page.wrapcommentl -text [mc "Wrap comment text"]
@@ -11745,12 +11711,15 @@ proc prefspage_general {notebook} {
ttk::checkbutton $page.ntag -text [mc "Display nearby tags/heads"] \
-variable showneartags
grid x $page.ntag -sticky w
+
ttk::label $page.maxrefsl -text [mc "Maximum # tags/heads to show"]
- spinbox $page.maxrefs -from 1 -to 1000 -width 4 -textvariable maxrefs
+ ttk::spinbox $page.maxrefs -from 1 -to 1000 -width 4 -textvariable maxrefs
grid x $page.maxrefsl $page.maxrefs -sticky w
+
ttk::checkbutton $page.ldiff -text [mc "Limit diffs to listed paths"] \
-variable limitdiffs
grid x $page.ldiff -sticky w
+
ttk::checkbutton $page.lattr -text [mc "Support per-file encodings"] \
-variable perfile_attrs
grid x $page.lattr -sticky w
@@ -11759,76 +11728,109 @@ proc prefspage_general {notebook} {
ttk::frame $page.extdifff
ttk::label $page.extdifff.l -text [mc "External diff tool" ]
ttk::button $page.extdifff.b -text [mc "Choose..."] -command choose_extdiff
- pack $page.extdifff.l $page.extdifff.b -side left
- pack configure $page.extdifff.l -padx 10
+ pack $page.extdifff.l -side left
+ pack $page.extdifff.b -side right -padx {0 5}
grid x $page.extdifff $page.extdifft -sticky ew
+ grid configure $page.extdifft -padx {0 5}
ttk::entry $page.webbrowser -textvariable web_browser
- ttk::frame $page.webbrowserf
- ttk::label $page.webbrowserf.l -text [mc "Web browser" ]
- pack $page.webbrowserf.l -side left
- pack configure $page.webbrowserf.l -padx 10
- grid x $page.webbrowserf $page.webbrowser -sticky ew
+ ttk::label $page.webbrowserl -text [mc "Web browser" ]
+ grid x $page.webbrowserl $page.webbrowser -sticky ew
+ grid configure $page.webbrowser -padx {0 5}
+
+ grid columnconfigure $page 2 -weight 1
return $page
}
proc prefspage_colors {notebook} {
- global uicolor bgcolor fgcolor ctext diffcolors selectbgcolor markbgcolor
+ global bgcolor fgcolor ctext diffcolors selectbgcolor markbgcolor
global diffbgcolors
+ global themeloader
set page [create_prefs_page $notebook.colors]
+ ttk::label $page.themesel -font mainfontbold \
+ -text [mc "Themes - change requires restart"]
+ grid $page.themesel - -sticky w -pady 10
+
+ ttk::label $page.themelabel -text [mc "Theme to use after restart"]
+ makedroplist $page.theme theme {*}[lsort [ttk::style theme names]]
+ grid x $page.themelabel $page.theme -sticky w
+
+ ttk::entry $page.tloadvar -textvariable themeloader
+ ttk::frame $page.tloadframe
+ ttk::label $page.tloadframe.l -text [mc "Theme definition file"]
+ ttk::button $page.tloadframe.b -text [mc "Choose..."] \
+ -command [list choose_themeloader $page]
+ pack $page.tloadframe.l -side left
+ pack $page.tloadframe.b -side right -padx {0 5}
+ pack configure $page.tloadframe.l -padx 0
+ grid x $page.tloadframe $page.tloadvar -sticky ew
+ grid configure $page.tloadvar -padx {0 5}
+
+ ttk::label $page.themelabel2 -text \
+ [mc "The theme definition file may affect all themes."]
+ ttk::button $page.themebut2 -text [mc "Apply theme"] \
+ -command [list updatetheme $page]
+ grid x $page.themebut2 $page.themelabel2 -sticky w
+
ttk::label $page.cdisp -text [mc "Colors: press to choose"] -font mainfontbold
grid $page.cdisp - -sticky w -pady 10
- label $page.ui -padx 40 -relief sunk -background $uicolor
- ttk::button $page.uibut -text [mc "Interface"] \
- -command [list choosecolor uicolor {} $page [mc "interface"]]
- grid x $page.uibut $page.ui -sticky w
label $page.bg -padx 40 -relief sunk -background $bgcolor
ttk::button $page.bgbut -text [mc "Background"] \
-command [list choosecolor bgcolor {} $page [mc "background"]]
grid x $page.bgbut $page.bg -sticky w
+
label $page.fg -padx 40 -relief sunk -background $fgcolor
ttk::button $page.fgbut -text [mc "Foreground"] \
-command [list choosecolor fgcolor {} $page [mc "foreground"]]
grid x $page.fgbut $page.fg -sticky w
+
label $page.diffold -padx 40 -relief sunk -background [lindex $diffcolors 0]
ttk::button $page.diffoldbut -text [mc "Diff: old lines"] \
-command [list choosecolor diffcolors 0 $page [mc "diff old lines"]]
grid x $page.diffoldbut $page.diffold -sticky w
+
label $page.diffoldbg -padx 40 -relief sunk -background [lindex $diffbgcolors 0]
ttk::button $page.diffoldbgbut -text [mc "Diff: old lines bg"] \
-command [list choosecolor diffbgcolors 0 $page [mc "diff old lines bg"]]
grid x $page.diffoldbgbut $page.diffoldbg -sticky w
+
label $page.diffnew -padx 40 -relief sunk -background [lindex $diffcolors 1]
ttk::button $page.diffnewbut -text [mc "Diff: new lines"] \
-command [list choosecolor diffcolors 1 $page [mc "diff new lines"]]
grid x $page.diffnewbut $page.diffnew -sticky w
+
label $page.diffnewbg -padx 40 -relief sunk -background [lindex $diffbgcolors 1]
ttk::button $page.diffnewbgbut -text [mc "Diff: new lines bg"] \
-command [list choosecolor diffbgcolors 1 $page [mc "diff new lines bg"]]
grid x $page.diffnewbgbut $page.diffnewbg -sticky w
+
label $page.hunksep -padx 40 -relief sunk -background [lindex $diffcolors 2]
ttk::button $page.hunksepbut -text [mc "Diff: hunk header"] \
-command [list choosecolor diffcolors 2 $page [mc "diff hunk header"]]
grid x $page.hunksepbut $page.hunksep -sticky w
+
label $page.markbgsep -padx 40 -relief sunk -background $markbgcolor
ttk::button $page.markbgbut -text [mc "Marked line bg"] \
-command [list choosecolor markbgcolor {} $page [mc "marked line background"]]
grid x $page.markbgbut $page.markbgsep -sticky w
+
label $page.selbgsep -padx 40 -relief sunk -background $selectbgcolor
ttk::button $page.selbgbut -text [mc "Select bg"] \
-command [list choosecolor selectbgcolor {} $page [mc "background"]]
grid x $page.selbgbut $page.selbgsep -sticky w
+
+ grid columnconfigure $page 2 -weight 1
+
return $page
}
proc prefspage_set_colorswatches {page} {
- global uicolor bgcolor fgcolor ctext diffcolors selectbgcolor markbgcolor
+ global bgcolor fgcolor ctext diffcolors selectbgcolor markbgcolor
global diffbgcolors
- $page.ui configure -background $uicolor
$page.bg configure -background $bgcolor
$page.fg configure -background $fgcolor
$page.diffold configure -background [lindex $diffcolors 0]
@@ -11847,6 +11849,7 @@ proc prefspage_fonts {notebook} {
mkfontdisp mainfont $page [mc "Main font"]
mkfontdisp textfont $page [mc "Diff display font"]
mkfontdisp uifont $page [mc "User interface font"]
+ grid columnconfigure $page 2 -weight 1
return $page
}
@@ -11881,7 +11884,7 @@ proc doprefs {} {
grid rowconfigure $notebook 1 -weight 1
raise [lindex $pages 0]
- grid $notebook -sticky news -padx 2 -pady 2
+ grid $notebook -sticky news -padx 3 -pady 3
grid rowconfigure $top 0 -weight 1
grid columnconfigure $top 0 -weight 1
@@ -11890,12 +11893,13 @@ proc doprefs {} {
ttk::button $top.buts.can -text [mc "Cancel"] -command prefscan -default normal
bind $top <Key-Return> prefsok
bind $top <Key-Escape> prefscan
- grid $top.buts.ok $top.buts.can
- grid columnconfigure $top.buts 0 -weight 1 -uniform a
- grid columnconfigure $top.buts 1 -weight 1 -uniform a
- grid $top.buts - - -pady 10 -sticky ew
- grid columnconfigure $top 2 -weight 1
+ grid $top.buts.ok $top.buts.can -padx 20
+ grid $top.buts -sticky w -pady 10
bind $top <Visibility> [list focus $top.buts.ok]
+
+ # let geometry manager determine run, set minimum size
+ update idletasks
+ wm minsize $top [winfo reqwidth $top] [winfo reqheight $top]
}
proc choose_extdiff {} {
@@ -11907,6 +11911,51 @@ proc choose_extdiff {} {
}
}
+proc run_themeloader {f} {
+ if {![info exists ::_themefiles_seen]} {
+ set ::_themefiles_seen [dict create]
+ }
+
+ set fn [file normalize $f]
+ if {![dict exists $::_themefiles_seen $fn]} {
+ if {[catch {source $fn} err]} {
+ error_popup "could not interpret: $fn\n$err"
+ dict set ::_themefiles_seen $fn 0
+ } else {
+ dict set ::_themefiles_seen $fn 1
+ }
+ }
+ return [dict get $::_themefiles_seen $fn]
+}
+
+proc updatetheme {prefspage {dotheme 1}} {
+ global theme
+ global themeloader
+ if {$themeloader ne {}} {
+ if {![run_themeloader $themeloader]} {
+ set themeloader {}
+ return
+ } else {
+ $prefspage.theme configure -values \
+ [lsort [ttk::style theme names]]
+ }
+ }
+ if {$dotheme} {
+ ttk::style theme use $theme
+ set_gui_colors
+ prefspage_set_colorswatches $prefspage
+ }
+}
+
+proc choose_themeloader {prefspage} {
+ global themeloader
+ set tfile [tk_getOpenFile -title [mc "Gitk: select theme definition"] -multiple false]
+ if {$tfile ne {}} {
+ set themeloader $tfile
+ updatetheme $prefspage 0
+ }
+}
+
proc choosecolor {v vi prefspage x} {
global $v
@@ -11930,21 +11979,6 @@ proc setselbg {c} {
allcanvs itemconf secsel -fill $c
}
-# This sets the background color and the color scheme for the whole UI.
-# For some reason, tk_setPalette chooses a nasty dark red for selectColor
-# if we don't specify one ourselves, which makes the checkbuttons and
-# radiobuttons look bad. This chooses white for selectColor if the
-# background color is light, or black if it is dark.
-proc setui {c} {
- if {[tk windowingsystem] eq "win32"} { return }
- set bg [winfo rgb . $c]
- set selc black
- if {[lindex $bg 0] + 1.5 * [lindex $bg 1] + 0.5 * [lindex $bg 2] > 100000} {
- set selc white
- }
- tk_setPalette background $c selectColor $selc
-}
-
proc setbg {c} {
global bglist
@@ -11969,10 +12003,9 @@ proc setfg {c} {
}
proc set_gui_colors {} {
- global uicolor bgcolor fgcolor ctext diffcolors selectbgcolor markbgcolor
+ global bgcolor fgcolor ctext diffcolors selectbgcolor markbgcolor
global diffbgcolors
- setui $uicolor
setbg $bgcolor
setfg $fgcolor
$ctext tag conf d0 -foreground [lindex $diffcolors 0]
@@ -11994,6 +12027,7 @@ proc prefscan {} {
catch {destroy $prefstop}
unset prefstop
fontcan
+ setttkstyle
set_gui_colors
}
@@ -12460,11 +12494,13 @@ namespace import ::msgcat::mc
# on OSX bring the current Wish process window to front
if {[tk windowingsystem] eq "aqua"} {
- safe_exec [list osascript -e [format {
- tell application "System Events"
- set frontmost of processes whose unix id is %d to true
- end tell
- } [pid] ]]
+ catch {
+ safe_exec [list osascript -e [format {
+ tell application "System Events"
+ set frontmost of processes whose unix id is %d to true
+ end tell
+ } [pid] ]]
+ }
}
# Unset GIT_TRACE var if set
@@ -12569,17 +12605,11 @@ if {[tk windowingsystem] eq "aqua"} {
set colors {"#00ff00" red blue magenta darkgrey brown orange}
if {[tk windowingsystem] eq "win32"} {
- set uicolor SystemButtonFace
- set uifgcolor SystemButtonText
- set uifgdisabledcolor SystemDisabledText
set bgcolor SystemWindow
set fgcolor SystemWindowText
set selectbgcolor SystemHighlight
set web_browser "cmd /c start"
} else {
- set uicolor grey85
- set uifgcolor black
- set uifgdisabledcolor "#999"
set bgcolor white
set fgcolor black
set selectbgcolor gray85
@@ -12619,6 +12649,12 @@ set circleoutlinecolor $fgcolor
set foundbgcolor yellow
set currentsearchhitbgcolor orange
+set theme [ttk::style theme use]
+set themeloader {}
+set uicolor {}
+set uifgcolor {}
+set uifgdisabledcolor {}
+
# button for popping up context menus
if {[tk windowingsystem] eq "aqua" && [package vcompare $::tcl_version 8.7] < 0} {
set ctxbut <Button-2>
@@ -12702,6 +12738,8 @@ set config_variables {
tagfgcolor
tagoutlinecolor
textfont
+ theme
+ themeloader
uicolor
uifgcolor
uifgdisabledcolor
@@ -12801,7 +12839,13 @@ set nullid "0000000000000000000000000000000000000000"
set nullid2 "0000000000000000000000000000000000000001"
set nullfile "/dev/null"
-setttkstyle
+if {[file exists $themeloader]} {
+ if {![run_themeloader $themeloader]} {
+ puts stderr "Could not interpret themeloader: $themeloader"
+ exit 1
+ }
+}
+
set appname "gitk"
set runq {}
@@ -12917,6 +12961,7 @@ if {[tk windowingsystem] eq "win32"} {
focus -force .
}
+setttkstyle
set_gui_colors
getcommits {}
diff --git a/gpg-interface.c b/gpg-interface.c
index 06e7fb5060..2f4f0e32cb 100644
--- a/gpg-interface.c
+++ b/gpg-interface.c
@@ -1125,3 +1125,20 @@ out:
FREE_AND_NULL(ssh_signing_key_file);
return ret;
}
+
+int parse_sign_mode(const char *arg, enum sign_mode *mode)
+{
+ if (!strcmp(arg, "abort"))
+ *mode = SIGN_ABORT;
+ else if (!strcmp(arg, "verbatim") || !strcmp(arg, "ignore"))
+ *mode = SIGN_VERBATIM;
+ else if (!strcmp(arg, "warn-verbatim") || !strcmp(arg, "warn"))
+ *mode = SIGN_WARN_VERBATIM;
+ else if (!strcmp(arg, "warn-strip"))
+ *mode = SIGN_WARN_STRIP;
+ else if (!strcmp(arg, "strip"))
+ *mode = SIGN_STRIP;
+ else
+ return -1;
+ return 0;
+}
diff --git a/gpg-interface.h b/gpg-interface.h
index 60ddf8bbfa..50487aa148 100644
--- a/gpg-interface.h
+++ b/gpg-interface.h
@@ -104,4 +104,19 @@ int check_signature(struct signature_check *sigc,
void print_signature_buffer(const struct signature_check *sigc,
unsigned flags);
+/* Modes for --signed-tags=<mode> and --signed-commits=<mode> options. */
+enum sign_mode {
+ SIGN_ABORT,
+ SIGN_WARN_VERBATIM,
+ SIGN_VERBATIM,
+ SIGN_WARN_STRIP,
+ SIGN_STRIP,
+};
+
+/*
+ * Return 0 if `arg` can be parsed into an `enum sign_mode`. Return -1
+ * otherwise.
+ */
+int parse_sign_mode(const char *arg, enum sign_mode *mode);
+
#endif
diff --git a/help.c b/help.c
index bb20498cfd..5854dd4a7e 100644
--- a/help.c
+++ b/help.c
@@ -791,6 +791,12 @@ void get_version_info(struct strbuf *buf, int show_build_options)
strbuf_addf(buf, "shell-path: %s\n", SHELL_PATH);
/* NEEDSWORK: also save and output GIT-BUILD_OPTIONS? */
+#if defined WITH_RUST
+ strbuf_addstr(buf, "rust: enabled\n");
+#else
+ strbuf_addstr(buf, "rust: disabled\n");
+#endif
+
if (fsmonitor_ipc__is_supported())
strbuf_addstr(buf, "feature: fsmonitor--daemon\n");
#if defined LIBCURL_VERSION
diff --git a/http-backend.c b/http-backend.c
index d5dfe762bb..9084058f1e 100644
--- a/http-backend.c
+++ b/http-backend.c
@@ -603,18 +603,19 @@ static void get_head(struct strbuf *hdr, char *arg UNUSED)
static void get_info_packs(struct strbuf *hdr, char *arg UNUSED)
{
size_t objdirlen = strlen(repo_get_object_directory(the_repository));
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct strbuf buf = STRBUF_INIT;
struct packed_git *p;
size_t cnt = 0;
select_getanyfile(hdr);
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (p->pack_local)
cnt++;
}
strbuf_grow(&buf, cnt * 53 + 2);
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (p->pack_local)
strbuf_addf(&buf, "P %s\n", p->pack_name + objdirlen + 6);
}
diff --git a/http-push.c b/http-push.c
index 4c43ba3bc7..a1c01e3b9b 100644
--- a/http-push.c
+++ b/http-push.c
@@ -208,7 +208,8 @@ static void curl_setup_http(CURL *curl, const char *url,
curl_easy_setopt(curl, CURLOPT_UPLOAD, 1L);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_INFILE, buffer);
- curl_easy_setopt(curl, CURLOPT_INFILESIZE, buffer->buf.len);
+ curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE,
+ cast_size_t_to_curl_off_t(buffer->buf.len));
curl_easy_setopt(curl, CURLOPT_READFUNCTION, fread_buffer);
curl_easy_setopt(curl, CURLOPT_SEEKFUNCTION, seek_buffer);
curl_easy_setopt(curl, CURLOPT_SEEKDATA, buffer);
diff --git a/http.c b/http.c
index a7d55dcbba..7e3af1e72f 100644
--- a/http.c
+++ b/http.c
@@ -2416,6 +2416,7 @@ static char *fetch_pack_index(unsigned char *hash, const char *base_url)
static int fetch_and_setup_pack_index(struct packed_git **packs_head,
unsigned char *sha1, const char *base_url)
{
+ struct packfile_store *packs = the_repository->objects->packfiles;
struct packed_git *new_pack, *p;
char *tmp_idx = NULL;
int ret;
@@ -2424,7 +2425,7 @@ static int fetch_and_setup_pack_index(struct packed_git **packs_head,
* If we already have the pack locally, no need to fetch its index or
* even add it to list; we already have all of its objects.
*/
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
if (hasheq(p->hash, sha1, the_repository->hash_algo))
return 0;
}
@@ -2549,7 +2550,7 @@ void http_install_packfile(struct packed_git *p,
lst = &((*lst)->next);
*lst = (*lst)->next;
- install_packed_git(the_repository, p);
+ packfile_store_add_pack(the_repository->objects->packfiles, p);
}
struct http_pack_request *new_http_pack_request(
diff --git a/http.h b/http.h
index 36202139f4..553e16205c 100644
--- a/http.h
+++ b/http.h
@@ -8,6 +8,7 @@ struct packed_git;
#include <curl/curl.h>
#include <curl/easy.h>
+#include "gettext.h"
#include "strbuf.h"
#include "remote.h"
@@ -95,6 +96,15 @@ static inline int missing__target(int code, int result)
#define missing_target(a) missing__target((a)->http_code, (a)->curl_result)
+static inline curl_off_t cast_size_t_to_curl_off_t(size_t a)
+{
+ uintmax_t size = a;
+ if (size > maximum_signed_value_of_type(curl_off_t))
+ die(_("number too large to represent as curl_off_t "
+ "on this platform: %"PRIuMAX), (uintmax_t)a);
+ return (curl_off_t)a;
+}
+
/*
* Normalize curl results to handle CURL_FAILONERROR (or lack thereof). Failing
* http codes have their "result" converted to CURLE_HTTP_RETURNED_ERROR, and
@@ -210,7 +220,7 @@ int finish_http_pack_request(struct http_pack_request *preq);
void release_http_pack_request(struct http_pack_request *preq);
/*
- * Remove p from the given list, and invoke install_packed_git() on it.
+ * Remove p from the given list, and invoke packfile_store_add_pack() on it.
*
* This is a convenience function for users that have obtained a list of packs
* from http_get_info_packs() and have chosen a specific pack to fetch.
diff --git a/imap-send.c b/imap-send.c
index 4bd5b8aa0d..26dda7f328 100644
--- a/imap-send.c
+++ b/imap-send.c
@@ -1721,7 +1721,7 @@ static int curl_append_msgs_to_imap(struct imap_server_conf *server,
lf_to_crlf(&msgbuf.buf);
curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE,
- (curl_off_t)(msgbuf.buf.len-prev_len));
+ cast_size_t_to_curl_off_t(msgbuf.buf.len-prev_len));
res = curl_easy_perform(curl);
diff --git a/log-tree.c b/log-tree.c
index 67d9ace596..7d917f2a83 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -718,7 +718,8 @@ static void show_diff_of_diff(struct rev_info *opt)
.creation_factor = opt->creation_factor,
.dual_color = 1,
.max_memory = RANGE_DIFF_MAX_MEMORY_DEFAULT,
- .diffopt = &opts
+ .diffopt = &opts,
+ .log_arg = &opt->rdiff_log_arg
};
memcpy(&dq, &diff_queued_diff, sizeof(diff_queued_diff));
diff --git a/mailmap.c b/mailmap.c
index 56c72102d9..37fd158a51 100644
--- a/mailmap.c
+++ b/mailmap.c
@@ -1,5 +1,4 @@
#define USE_THE_REPOSITORY_VARIABLE
-#define DISABLE_SIGN_COMPARE_WARNINGS
#include "git-compat-util.h"
#include "environment.h"
@@ -243,10 +242,9 @@ void clear_mailmap(struct string_list *map)
static struct string_list_item *lookup_prefix(struct string_list *map,
const char *string, size_t len)
{
- int i = string_list_find_insert_index(map, string, 1);
- if (i < 0) {
- /* exact match */
- i = -1 - i;
+ bool exact_match;
+ size_t i = string_list_find_insert_index(map, string, &exact_match);
+ if (exact_match) {
if (!string[len])
return &map->items[i];
/*
@@ -267,7 +265,7 @@ static struct string_list_item *lookup_prefix(struct string_list *map,
* overlong key would be inserted, which must come after the
* real location of the key if one exists.
*/
- while (0 <= --i && i < map->nr) {
+ while (i-- && i < map->nr) {
int cmp = strncasecmp(map->items[i].string, string, len);
if (cmp < 0)
/*
diff --git a/meson.build b/meson.build
index b3dfcc0497..cee9424475 100644
--- a/meson.build
+++ b/meson.build
@@ -220,7 +220,7 @@ project('git', 'c',
# learned to define __STDC_VERSION__ with C11 and later. We thus require
# GNU C99 and fall back to C11. Meson only learned to handle the fallback
# with version 1.3.0, so on older versions we use GNU C99 unconditionally.
- default_options: meson.version().version_compare('>=1.3.0') ? ['c_std=gnu99,c11'] : ['c_std=gnu99'],
+ default_options: meson.version().version_compare('>=1.3.0') ? ['rust_std=2018', 'c_std=gnu99,c11'] : ['rust_std=2018', 'c_std=gnu99'],
)
fs = import('fs')
@@ -287,7 +287,6 @@ libgit_sources = [
'blob.c',
'bloom.c',
'branch.c',
- 'bulk-checkin.c',
'bundle-uri.c',
'bundle.c',
'cache-tree.c',
@@ -407,6 +406,7 @@ libgit_sources = [
'pack-check.c',
'pack-mtimes.c',
'pack-objects.c',
+ 'pack-refs.c',
'pack-revindex.c',
'pack-write.c',
'packfile.c',
@@ -452,6 +452,7 @@ libgit_sources = [
'reftable/error.c',
'reftable/block.c',
'reftable/blocksource.c',
+ 'reftable/fsck.c',
'reftable/iter.c',
'reftable/merged.c',
'reftable/pq.c',
@@ -522,7 +523,6 @@ libgit_sources = [
'usage.c',
'userdiff.c',
'utf8.c',
- 'varint.c',
'version.c',
'versioncmp.c',
'walker.c',
@@ -1703,6 +1703,17 @@ version_def_h = custom_target(
)
libgit_sources += version_def_h
+cargo = find_program('cargo', dirs: program_path, native: true, required: get_option('rust'))
+rust_option = get_option('rust').disable_auto_if(not cargo.found())
+if rust_option.allowed()
+ subdir('src')
+ libgit_c_args += '-DWITH_RUST'
+else
+ libgit_sources += [
+ 'varint.c',
+ ]
+endif
+
libgit = declare_dependency(
link_with: static_library('git',
sources: libgit_sources,
@@ -2101,11 +2112,20 @@ endif
subdir('bin-wrappers')
if get_option('docs') != []
+ doc_targets = []
subdir('Documentation')
+else
+ docs_backend = 'none'
endif
subdir('contrib')
+# Note that the target is intentionally configured after including the
+# 'contrib' directory, as some tool there also have their own manpages.
+if get_option('docs') != []
+ alias_target('docs', doc_targets)
+endif
+
exclude_from_check_headers = [
'compat/',
'unicode-width.h',
@@ -2240,10 +2260,12 @@ summary({
'pcre2': pcre2,
'perl': perl_features_enabled,
'python': target_python.found(),
+ 'rust': rust_option.allowed(),
}, section: 'Auto-detected features', bool_yn: true)
summary({
'csprng': csprng_backend,
+ 'docs': docs_backend,
'https': https_backend,
'sha1': sha1_backend,
'sha1_unsafe': sha1_unsafe_backend,
diff --git a/meson_options.txt b/meson_options.txt
index 1668f260a1..143dee9237 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -71,6 +71,8 @@ option('zlib_backend', type: 'combo', choices: ['auto', 'zlib', 'zlib-ng'], valu
# Build tweaks.
option('breaking_changes', type: 'boolean', value: false,
description: 'Enable upcoming breaking changes.')
+option('rust', type: 'feature', value: 'auto',
+ description: 'Enable building with Rust.')
option('macos_use_homebrew_gettext', type: 'boolean', value: true,
description: 'Use gettext from Homebrew instead of the slightly-broken system-provided one.')
diff --git a/midx.c b/midx.c
index 7726c13d7e..1d6269f957 100644
--- a/midx.c
+++ b/midx.c
@@ -93,6 +93,12 @@ static int midx_read_object_offsets(const unsigned char *chunk_start,
return 0;
}
+struct multi_pack_index *get_multi_pack_index(struct odb_source *source)
+{
+ packfile_store_prepare(source->odb->packfiles);
+ return source->midx;
+}
+
static struct multi_pack_index *load_multi_pack_index_one(struct odb_source *source,
const char *midx_name)
{
@@ -443,7 +449,6 @@ int prepare_midx_pack(struct multi_pack_index *m,
{
struct repository *r = m->source->odb->repo;
struct strbuf pack_name = STRBUF_INIT;
- struct strbuf key = STRBUF_INIT;
struct packed_git *p;
pack_int_id = midx_for_pack(&m, pack_int_id);
@@ -455,25 +460,11 @@ int prepare_midx_pack(struct multi_pack_index *m,
strbuf_addf(&pack_name, "%s/pack/%s", m->source->path,
m->pack_names[pack_int_id]);
-
- /* pack_map holds the ".pack" name, but we have the .idx */
- strbuf_addbuf(&key, &pack_name);
- strbuf_strip_suffix(&key, ".idx");
- strbuf_addstr(&key, ".pack");
- p = hashmap_get_entry_from_hash(&r->objects->pack_map,
- strhash(key.buf), key.buf,
- struct packed_git, packmap_ent);
- if (!p) {
- p = add_packed_git(r, pack_name.buf, pack_name.len,
- m->source->local);
- if (p) {
- install_packed_git(r, p);
- list_add_tail(&p->mru, &r->objects->packed_git_mru);
- }
- }
-
+ p = packfile_store_load_pack(r->objects->packfiles,
+ pack_name.buf, m->source->local);
+ if (p)
+ list_add_tail(&p->mru, &r->objects->packfiles->mru);
strbuf_release(&pack_name);
- strbuf_release(&key);
if (!p) {
m->packs[pack_int_id] = MIDX_PACK_ERROR;
diff --git a/midx.h b/midx.h
index e241d2d690..6e54d73503 100644
--- a/midx.h
+++ b/midx.h
@@ -94,6 +94,7 @@ void get_midx_chain_filename(struct odb_source *source, struct strbuf *out);
void get_split_midx_filename_ext(struct odb_source *source, struct strbuf *buf,
const unsigned char *hash, const char *ext);
+struct multi_pack_index *get_multi_pack_index(struct odb_source *source);
struct multi_pack_index *load_multi_pack_index(struct odb_source *source);
int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id);
struct packed_git *nth_midxed_pack(struct multi_pack_index *m,
diff --git a/object-file.c b/object-file.c
index bc15af4245..4675c8ed6b 100644
--- a/object-file.c
+++ b/object-file.c
@@ -10,7 +10,6 @@
#define USE_THE_REPOSITORY_VARIABLE
#include "git-compat-util.h"
-#include "bulk-checkin.h"
#include "convert.h"
#include "dir.h"
#include "environment.h"
@@ -28,6 +27,8 @@
#include "read-cache-ll.h"
#include "setup.h"
#include "streaming.h"
+#include "tempfile.h"
+#include "tmp-objdir.h"
/* The maximum size for an object header. */
#define MAX_HEADER_LEN 32
@@ -666,6 +667,93 @@ void hash_object_file(const struct git_hash_algo *algo, const void *buf,
write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
}
+struct transaction_packfile {
+ char *pack_tmp_name;
+ struct hashfile *f;
+ off_t offset;
+ struct pack_idx_option pack_idx_opts;
+
+ struct pack_idx_entry **written;
+ uint32_t alloc_written;
+ uint32_t nr_written;
+};
+
+struct odb_transaction {
+ struct object_database *odb;
+
+ struct tmp_objdir *objdir;
+ struct transaction_packfile packfile;
+};
+
+static void prepare_loose_object_transaction(struct odb_transaction *transaction)
+{
+ /*
+ * We lazily create the temporary object directory
+ * the first time an object might be added, since
+ * callers may not know whether any objects will be
+ * added at the time they call object_file_transaction_begin.
+ */
+ if (!transaction || transaction->objdir)
+ return;
+
+ transaction->objdir = tmp_objdir_create(transaction->odb->repo, "bulk-fsync");
+ if (transaction->objdir)
+ tmp_objdir_replace_primary_odb(transaction->objdir, 0);
+}
+
+static void fsync_loose_object_transaction(struct odb_transaction *transaction,
+ int fd, const char *filename)
+{
+ /*
+ * If we have an active ODB transaction, we issue a call that
+ * cleans the filesystem page cache but avoids a hardware flush
+ * command. Later on we will issue a single hardware flush
+ * before renaming the objects to their final names as part of
+ * flush_batch_fsync.
+ */
+ if (!transaction || !transaction->objdir ||
+ git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
+ if (errno == ENOSYS)
+ warning(_("core.fsyncMethod = batch is unsupported on this platform"));
+ fsync_or_die(fd, filename);
+ }
+}
+
+/*
+ * Cleanup after batch-mode fsync_object_files.
+ */
+static void flush_loose_object_transaction(struct odb_transaction *transaction)
+{
+ struct strbuf temp_path = STRBUF_INIT;
+ struct tempfile *temp;
+
+ if (!transaction->objdir)
+ return;
+
+ /*
+ * Issue a full hardware flush against a temporary file to ensure
+ * that all objects are durable before any renames occur. The code in
+ * fsync_loose_object_transaction has already issued a writeout
+ * request, but it has not flushed any writeback cache in the storage
+ * hardware or any filesystem logs. This fsync call acts as a barrier
+ * to ensure that the data in each new object file is durable before
+ * the final name is visible.
+ */
+ strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX",
+ repo_get_object_directory(transaction->odb->repo));
+ temp = xmks_tempfile(temp_path.buf);
+ fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
+ delete_tempfile(&temp);
+ strbuf_release(&temp_path);
+
+ /*
+ * Make the object files visible in the primary ODB after their data is
+ * fully durable.
+ */
+ tmp_objdir_migrate(transaction->objdir);
+ transaction->objdir = NULL;
+}
+
/* Finalize a file on disk, and close it. */
static void close_loose_object(struct odb_source *source,
int fd, const char *filename)
@@ -674,7 +762,7 @@ static void close_loose_object(struct odb_source *source,
goto out;
if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
- fsync_loose_object_bulk_checkin(source->odb->transaction, fd, filename);
+ fsync_loose_object_transaction(source->odb->transaction, fd, filename);
else if (fsync_object_files > 0)
fsync_or_die(fd, filename);
else
@@ -852,7 +940,7 @@ static int write_loose_object(struct odb_source *source,
static struct strbuf filename = STRBUF_INIT;
if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
- prepare_loose_object_bulk_checkin(source->odb->transaction);
+ prepare_loose_object_transaction(source->odb->transaction);
odb_loose_path(source, &filename, oid);
@@ -941,7 +1029,7 @@ int stream_loose_object(struct odb_source *source,
int hdrlen;
if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT))
- prepare_loose_object_bulk_checkin(source->odb->transaction);
+ prepare_loose_object_transaction(source->odb->transaction);
/* Since oid is not determined, save tmp file to odb path. */
strbuf_addf(&filename, "%s/", source->path);
@@ -1243,6 +1331,274 @@ static int index_core(struct index_state *istate,
return ret;
}
+static int already_written(struct odb_transaction *transaction,
+ struct object_id *oid)
+{
+ /* The object may already exist in the repository */
+ if (odb_has_object(transaction->odb, oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ return 1;
+
+ /* Might want to keep the list sorted */
+ for (uint32_t i = 0; i < transaction->packfile.nr_written; i++)
+ if (oideq(&transaction->packfile.written[i]->oid, oid))
+ return 1;
+
+ /* This is a new object we need to keep */
+ return 0;
+}
+
+/* Lazily create backing packfile for the state */
+static void prepare_packfile_transaction(struct odb_transaction *transaction,
+ unsigned flags)
+{
+ struct transaction_packfile *state = &transaction->packfile;
+ if (!(flags & INDEX_WRITE_OBJECT) || state->f)
+ return;
+
+ state->f = create_tmp_packfile(transaction->odb->repo,
+ &state->pack_tmp_name);
+ reset_pack_idx_option(&state->pack_idx_opts);
+
+ /* Pretend we are going to write only one object */
+ state->offset = write_pack_header(state->f, 1);
+ if (!state->offset)
+ die_errno("unable to write pack header");
+}
+
+/*
+ * Read the contents from fd for size bytes, streaming it to the
+ * packfile in state while updating the hash in ctx. Signal a failure
+ * by returning a negative value when the resulting pack would exceed
+ * the pack size limit and this is not the first object in the pack,
+ * so that the caller can discard what we wrote from the current pack
+ * by truncating it and opening a new one. The caller will then call
+ * us again after rewinding the input fd.
+ *
+ * The already_hashed_to pointer is kept untouched by the caller to
+ * make sure we do not hash the same byte when we are called
+ * again. This way, the caller does not have to checkpoint its hash
+ * status before calling us just in case we ask it to call us again
+ * with a new pack.
+ */
+static int stream_blob_to_pack(struct transaction_packfile *state,
+ struct git_hash_ctx *ctx, off_t *already_hashed_to,
+ int fd, size_t size, const char *path,
+ unsigned flags)
+{
+ git_zstream s;
+ unsigned char ibuf[16384];
+ unsigned char obuf[16384];
+ unsigned hdrlen;
+ int status = Z_OK;
+ int write_object = (flags & INDEX_WRITE_OBJECT);
+ off_t offset = 0;
+
+ git_deflate_init(&s, pack_compression_level);
+
+ hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), OBJ_BLOB, size);
+ s.next_out = obuf + hdrlen;
+ s.avail_out = sizeof(obuf) - hdrlen;
+
+ while (status != Z_STREAM_END) {
+ if (size && !s.avail_in) {
+ size_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
+ ssize_t read_result = read_in_full(fd, ibuf, rsize);
+ if (read_result < 0)
+ die_errno("failed to read from '%s'", path);
+ if ((size_t)read_result != rsize)
+ die("failed to read %u bytes from '%s'",
+ (unsigned)rsize, path);
+ offset += rsize;
+ if (*already_hashed_to < offset) {
+ size_t hsize = offset - *already_hashed_to;
+ if (rsize < hsize)
+ hsize = rsize;
+ if (hsize)
+ git_hash_update(ctx, ibuf, hsize);
+ *already_hashed_to = offset;
+ }
+ s.next_in = ibuf;
+ s.avail_in = rsize;
+ size -= rsize;
+ }
+
+ status = git_deflate(&s, size ? 0 : Z_FINISH);
+
+ if (!s.avail_out || status == Z_STREAM_END) {
+ if (write_object) {
+ size_t written = s.next_out - obuf;
+
+ /* would we bust the size limit? */
+ if (state->nr_written &&
+ pack_size_limit_cfg &&
+ pack_size_limit_cfg < state->offset + written) {
+ git_deflate_abort(&s);
+ return -1;
+ }
+
+ hashwrite(state->f, obuf, written);
+ state->offset += written;
+ }
+ s.next_out = obuf;
+ s.avail_out = sizeof(obuf);
+ }
+
+ switch (status) {
+ case Z_OK:
+ case Z_BUF_ERROR:
+ case Z_STREAM_END:
+ continue;
+ default:
+ die("unexpected deflate failure: %d", status);
+ }
+ }
+ git_deflate_end(&s);
+ return 0;
+}
+
+static void flush_packfile_transaction(struct odb_transaction *transaction)
+{
+ struct transaction_packfile *state = &transaction->packfile;
+ struct repository *repo = transaction->odb->repo;
+ unsigned char hash[GIT_MAX_RAWSZ];
+ struct strbuf packname = STRBUF_INIT;
+ char *idx_tmp_name = NULL;
+
+ if (!state->f)
+ return;
+
+ if (state->nr_written == 0) {
+ close(state->f->fd);
+ free_hashfile(state->f);
+ unlink(state->pack_tmp_name);
+ goto clear_exit;
+ } else if (state->nr_written == 1) {
+ finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK,
+ CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
+ } else {
+ int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
+ fixup_pack_header_footer(repo->hash_algo, fd, hash, state->pack_tmp_name,
+ state->nr_written, hash,
+ state->offset);
+ close(fd);
+ }
+
+ strbuf_addf(&packname, "%s/pack/pack-%s.",
+ repo_get_object_directory(transaction->odb->repo),
+ hash_to_hex_algop(hash, repo->hash_algo));
+
+ stage_tmp_packfiles(repo, &packname, state->pack_tmp_name,
+ state->written, state->nr_written, NULL,
+ &state->pack_idx_opts, hash, &idx_tmp_name);
+ rename_tmp_packfile_idx(repo, &packname, &idx_tmp_name);
+
+ for (uint32_t i = 0; i < state->nr_written; i++)
+ free(state->written[i]);
+
+clear_exit:
+ free(idx_tmp_name);
+ free(state->pack_tmp_name);
+ free(state->written);
+ memset(state, 0, sizeof(*state));
+
+ strbuf_release(&packname);
+ /* Make objects we just wrote available to ourselves */
+ odb_reprepare(repo->objects);
+}
+
+/*
+ * This writes the specified object to a packfile. Objects written here
+ * during the same transaction are written to the same packfile. The
+ * packfile is not flushed until the transaction is flushed. The caller
+ * is expected to ensure a valid transaction is setup for objects to be
+ * recorded to.
+ *
+ * This also bypasses the usual "convert-to-git" dance, and that is on
+ * purpose. We could write a streaming version of the converting
+ * functions and insert that before feeding the data to fast-import
+ * (or equivalent in-core API described above). However, that is
+ * somewhat complicated, as we do not know the size of the filter
+ * result, which we need to know beforehand when writing a git object.
+ * Since the primary motivation for trying to stream from the working
+ * tree file and to avoid mmaping it in core is to deal with large
+ * binary blobs, they generally do not want to get any conversion, and
+ * callers should avoid this code path when filters are requested.
+ */
+static int index_blob_packfile_transaction(struct odb_transaction *transaction,
+ struct object_id *result_oid, int fd,
+ size_t size, const char *path,
+ unsigned flags)
+{
+ struct transaction_packfile *state = &transaction->packfile;
+ off_t seekback, already_hashed_to;
+ struct git_hash_ctx ctx;
+ unsigned char obuf[16384];
+ unsigned header_len;
+ struct hashfile_checkpoint checkpoint;
+ struct pack_idx_entry *idx = NULL;
+
+ seekback = lseek(fd, 0, SEEK_CUR);
+ if (seekback == (off_t)-1)
+ return error("cannot find the current offset");
+
+ header_len = format_object_header((char *)obuf, sizeof(obuf),
+ OBJ_BLOB, size);
+ transaction->odb->repo->hash_algo->init_fn(&ctx);
+ git_hash_update(&ctx, obuf, header_len);
+
+ /* Note: idx is non-NULL when we are writing */
+ if ((flags & INDEX_WRITE_OBJECT) != 0) {
+ CALLOC_ARRAY(idx, 1);
+
+ prepare_packfile_transaction(transaction, flags);
+ hashfile_checkpoint_init(state->f, &checkpoint);
+ }
+
+ already_hashed_to = 0;
+
+ while (1) {
+ prepare_packfile_transaction(transaction, flags);
+ if (idx) {
+ hashfile_checkpoint(state->f, &checkpoint);
+ idx->offset = state->offset;
+ crc32_begin(state->f);
+ }
+ if (!stream_blob_to_pack(state, &ctx, &already_hashed_to,
+ fd, size, path, flags))
+ break;
+ /*
+ * Writing this object to the current pack will make
+ * it too big; we need to truncate it, start a new
+ * pack, and write into it.
+ */
+ if (!idx)
+ BUG("should not happen");
+ hashfile_truncate(state->f, &checkpoint);
+ state->offset = checkpoint.offset;
+ flush_packfile_transaction(transaction);
+ if (lseek(fd, seekback, SEEK_SET) == (off_t)-1)
+ return error("cannot seek back");
+ }
+ git_hash_final_oid(result_oid, &ctx);
+ if (!idx)
+ return 0;
+
+ idx->crc32 = crc32_end(state->f);
+ if (already_written(transaction, result_oid)) {
+ hashfile_truncate(state->f, &checkpoint);
+ state->offset = checkpoint.offset;
+ free(idx);
+ } else {
+ oidcpy(&idx->oid, result_oid);
+ ALLOC_GROW(state->written,
+ state->nr_written + 1,
+ state->alloc_written);
+ state->written[state->nr_written++] = idx;
+ }
+ return 0;
+}
+
int index_fd(struct index_state *istate, struct object_id *oid,
int fd, struct stat *st,
enum object_type type, const char *path, unsigned flags)
@@ -1266,11 +1622,12 @@ int index_fd(struct index_state *istate, struct object_id *oid,
} else {
struct odb_transaction *transaction;
- transaction = begin_odb_transaction(the_repository->objects);
- ret = index_blob_bulk_checkin(transaction,
- oid, fd, xsize_t(st->st_size),
- path, flags);
- end_odb_transaction(transaction);
+ transaction = odb_transaction_begin(the_repository->objects);
+ ret = index_blob_packfile_transaction(the_repository->objects->transaction,
+ oid, fd,
+ xsize_t(st->st_size),
+ path, flags);
+ odb_transaction_commit(transaction);
}
close(fd);
@@ -1609,3 +1966,32 @@ out:
munmap(map, mapsize);
return ret;
}
+
+struct odb_transaction *object_file_transaction_begin(struct odb_source *source)
+{
+ struct object_database *odb = source->odb;
+
+ if (odb->transaction)
+ return NULL;
+
+ CALLOC_ARRAY(odb->transaction, 1);
+ odb->transaction->odb = odb;
+
+ return odb->transaction;
+}
+
+void object_file_transaction_commit(struct odb_transaction *transaction)
+{
+ if (!transaction)
+ return;
+
+ /*
+ * Ensure the transaction ending matches the pending transaction.
+ */
+ ASSERT(transaction == transaction->odb->transaction);
+
+ flush_loose_object_transaction(transaction);
+ flush_packfile_transaction(transaction);
+ transaction->odb->transaction = NULL;
+ free(transaction);
+}
diff --git a/object-file.h b/object-file.h
index 15d97630d3..3fd48dcafb 100644
--- a/object-file.h
+++ b/object-file.h
@@ -218,4 +218,20 @@ int read_loose_object(struct repository *repo,
void **contents,
struct object_info *oi);
+struct odb_transaction;
+
+/*
+ * Tell the object database to optimize for adding
+ * multiple objects. object_file_transaction_commit must be called
+ * to make new objects visible. If a transaction is already
+ * pending, NULL is returned.
+ */
+struct odb_transaction *object_file_transaction_begin(struct odb_source *source);
+
+/*
+ * Tell the object database to make any objects from the
+ * current transaction visible.
+ */
+void object_file_transaction_commit(struct odb_transaction *transaction);
+
#endif /* OBJECT_FILE_H */
diff --git a/object-name.c b/object-name.c
index 7774991d28..f6902e140d 100644
--- a/object-name.c
+++ b/object-name.c
@@ -213,7 +213,7 @@ static void find_short_packed_object(struct disambiguate_state *ds)
unique_in_midx(m, ds);
}
- for (p = get_packed_git(ds->repo); p && !ds->ambiguous;
+ for (p = packfile_store_get_packs(ds->repo->objects->packfiles); p && !ds->ambiguous;
p = p->next)
unique_in_pack(p, ds);
}
@@ -596,7 +596,7 @@ static enum get_oid_result get_short_oid(struct repository *r,
* or migrated from loose to packed.
*/
if (status == MISSING_OBJECT) {
- reprepare_packed_git(r);
+ odb_reprepare(r->objects);
find_short_object_filename(&ds);
find_short_packed_object(&ds);
status = finish_object_disambiguation(&ds, oid);
@@ -805,7 +805,7 @@ static void find_abbrev_len_packed(struct min_abbrev_data *mad)
find_abbrev_len_for_midx(m, mad);
}
- for (p = get_packed_git(mad->repo); p; p = p->next)
+ for (p = packfile_store_get_packs(mad->repo->objects->packfiles); p; p = p->next)
find_abbrev_len_for_pack(p, mad);
}
diff --git a/odb.c b/odb.c
index 75c443fe66..00a6e71568 100644
--- a/odb.c
+++ b/odb.c
@@ -694,7 +694,7 @@ static int do_oid_object_info_extended(struct object_database *odb,
/* Not a loose object; someone else may have just packed it. */
if (!(flags & OBJECT_INFO_QUICK)) {
- reprepare_packed_git(odb->repo);
+ odb_reprepare(odb->repo->objects);
if (find_pack_entry(odb->repo, real, &e))
break;
}
@@ -996,8 +996,7 @@ struct object_database *odb_new(struct repository *repo)
memset(o, 0, sizeof(*o));
o->repo = repo;
- INIT_LIST_HEAD(&o->packed_git_mru);
- hashmap_init(&o->pack_map, pack_map_entry_cmp, NULL, 0);
+ o->packfiles = packfile_store_new(o);
pthread_mutex_init(&o->replace_mutex, NULL);
string_list_init_dup(&o->submodule_source_paths);
return o;
@@ -1035,19 +1034,44 @@ void odb_clear(struct object_database *o)
free((char *) o->cached_objects[i].value.buf);
FREE_AND_NULL(o->cached_objects);
- INIT_LIST_HEAD(&o->packed_git_mru);
close_object_store(o);
+ packfile_store_free(o->packfiles);
+ o->packfiles = NULL;
+
+ string_list_clear(&o->submodule_source_paths, 0);
+}
+
+void odb_reprepare(struct object_database *o)
+{
+ struct odb_source *source;
+
+ obj_read_lock();
/*
- * `close_object_store()` only closes the packfiles, but doesn't free
- * them. We thus have to do this manually.
+ * Reprepare alt odbs, in case the alternates file was modified
+ * during the course of this process. This only _adds_ odbs to
+ * the linked list, so existing odbs will continue to exist for
+ * the lifetime of the process.
*/
- for (struct packed_git *p = o->packed_git, *next; p; p = next) {
- next = p->next;
- free(p);
- }
- o->packed_git = NULL;
+ o->loaded_alternates = 0;
+ odb_prepare_alternates(o);
- hashmap_clear(&o->pack_map);
- string_list_clear(&o->submodule_source_paths, 0);
+ for (source = o->sources; source; source = source->next)
+ odb_clear_loose_cache(source);
+
+ o->approximate_object_count_valid = 0;
+
+ packfile_store_reprepare(o->packfiles);
+
+ obj_read_unlock();
+}
+
+struct odb_transaction *odb_transaction_begin(struct object_database *odb)
+{
+ return object_file_transaction_begin(odb->sources);
+}
+
+void odb_transaction_commit(struct odb_transaction *transaction)
+{
+ object_file_transaction_commit(transaction);
}
diff --git a/odb.h b/odb.h
index bd7374f92f..e6602dd90c 100644
--- a/odb.h
+++ b/odb.h
@@ -3,7 +3,6 @@
#include "hashmap.h"
#include "object.h"
-#include "list.h"
#include "oidset.h"
#include "oidmap.h"
#include "string-list.h"
@@ -91,6 +90,7 @@ struct odb_source {
};
struct packed_git;
+struct packfile_store;
struct cached_object_entry;
struct odb_transaction;
@@ -139,20 +139,8 @@ struct object_database {
struct commit_graph *commit_graph;
unsigned commit_graph_attempted : 1; /* if loading has been attempted */
- /*
- * private data
- *
- * should only be accessed directly by packfile.c
- */
-
- struct packed_git *packed_git;
- /* A most-recently-used ordered version of the packed_git list. */
- struct list_head packed_git_mru;
-
- struct {
- struct packed_git **packs;
- unsigned flags;
- } kept_pack_cache;
+ /* Should only be accessed directly by packfile.c and midx.c. */
+ struct packfile_store *packfiles;
/*
* This is meant to hold a *small* number of objects that you would
@@ -164,12 +152,6 @@ struct object_database {
size_t cached_object_nr, cached_object_alloc;
/*
- * A map of packfiles to packed_git structs for tracking which
- * packs have been loaded already.
- */
- struct hashmap pack_map;
-
- /*
* A fast, rough count of the number of objects in the repository.
* These two fields are not meant for direct access. Use
* repo_approximate_object_count() instead.
@@ -178,12 +160,6 @@ struct object_database {
unsigned approximate_object_count_valid : 1;
/*
- * Whether packed_git has already been populated with this repository's
- * packs.
- */
- unsigned packed_git_initialized : 1;
-
- /*
* Submodule source paths that will be added as additional sources to
* allow lookup of submodule objects via the main object database.
*/
@@ -194,6 +170,25 @@ struct object_database *odb_new(struct repository *repo);
void odb_clear(struct object_database *o);
/*
+ * Clear caches, reload alternates and then reload object sources so that new
+ * objects may become accessible.
+ */
+void odb_reprepare(struct object_database *o);
+
+/*
+ * Starts an ODB transaction. Subsequent objects are written to the transaction
+ * and not committed until odb_transaction_commit() is invoked on the
+ * transaction. If the ODB already has a pending transaction, NULL is returned.
+ */
+struct odb_transaction *odb_transaction_begin(struct object_database *odb);
+
+/*
+ * Commits an ODB transaction making the written objects visible. If the
+ * specified transaction is NULL, the function is a no-op.
+ */
+void odb_transaction_commit(struct odb_transaction *transaction);
+
+/*
* Find source by its object directory path. Returns a `NULL` pointer in case
* the source could not be found.
*/
@@ -494,37 +489,4 @@ static inline int odb_write_object(struct object_database *odb,
return odb_write_object_ext(odb, buf, len, type, oid, NULL, 0);
}
-/* Compatibility wrappers, to be removed once Git 2.51 has been released. */
-#include "repository.h"
-
-static inline int oid_object_info_extended(struct repository *r,
- const struct object_id *oid,
- struct object_info *oi,
- unsigned flags)
-{
- return odb_read_object_info_extended(r->objects, oid, oi, flags);
-}
-
-static inline int oid_object_info(struct repository *r,
- const struct object_id *oid,
- unsigned long *sizep)
-{
- return odb_read_object_info(r->objects, oid, sizep);
-}
-
-static inline void *repo_read_object_file(struct repository *r,
- const struct object_id *oid,
- enum object_type *type,
- unsigned long *size)
-{
- return odb_read_object(r->objects, oid, type, size);
-}
-
-static inline int has_object(struct repository *r,
- const struct object_id *oid,
- unsigned flags)
-{
- return odb_has_object(r->objects, oid, flags);
-}
-
#endif /* ODB_H */
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 058bdb5d7d..ac71035d77 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -664,7 +664,7 @@ static int open_pack_bitmap(struct repository *r,
struct packed_git *p;
int ret = -1;
- for (p = get_all_packs(r); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
if (open_pack_bitmap_1(bitmap_git, p) == 0) {
ret = 0;
/*
@@ -3362,7 +3362,7 @@ int verify_bitmap_files(struct repository *r)
free(midx_bitmap_name);
}
- for (struct packed_git *p = get_all_packs(r);
+ for (struct packed_git *p = packfile_store_get_all_packs(r->objects->packfiles);
p; p = p->next) {
char *pack_bitmap_name = pack_bitmap_filename(p);
res |= verify_bitmap_file(r->hash_algo, pack_bitmap_name);
diff --git a/pack-objects.c b/pack-objects.c
index a9d9855063..9d6ee72569 100644
--- a/pack-objects.c
+++ b/pack-objects.c
@@ -4,6 +4,7 @@
#include "pack-objects.h"
#include "packfile.h"
#include "parse.h"
+#include "repository.h"
static uint32_t locate_object_entry_hash(struct packing_data *pdata,
const struct object_id *oid,
@@ -86,6 +87,7 @@ struct object_entry *packlist_find(struct packing_data *pdata,
static void prepare_in_pack_by_idx(struct packing_data *pdata)
{
+ struct packfile_store *packs = pdata->repo->objects->packfiles;
struct packed_git **mapping, *p;
int cnt = 0, nr = 1U << OE_IN_PACK_BITS;
@@ -95,7 +97,7 @@ static void prepare_in_pack_by_idx(struct packing_data *pdata)
* (i.e. in_pack_idx also zero) should return NULL.
*/
mapping[cnt++] = NULL;
- for (p = get_all_packs(pdata->repo); p; p = p->next, cnt++) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next, cnt++) {
if (cnt == nr) {
free(mapping);
return;
diff --git a/pack-refs.c b/pack-refs.c
new file mode 100644
index 0000000000..1a5e07d8b8
--- /dev/null
+++ b/pack-refs.c
@@ -0,0 +1,56 @@
+#include "builtin.h"
+#include "config.h"
+#include "environment.h"
+#include "pack-refs.h"
+#include "parse-options.h"
+#include "refs.h"
+#include "revision.h"
+
+int pack_refs_core(int argc,
+ const char **argv,
+ const char *prefix,
+ struct repository *repo,
+ const char * const *usage_opts)
+{
+ struct ref_exclusions excludes = REF_EXCLUSIONS_INIT;
+ struct string_list included_refs = STRING_LIST_INIT_NODUP;
+ struct pack_refs_opts pack_refs_opts = {
+ .exclusions = &excludes,
+ .includes = &included_refs,
+ .flags = PACK_REFS_PRUNE,
+ };
+ struct string_list option_excluded_refs = STRING_LIST_INIT_NODUP;
+ struct string_list_item *item;
+ int pack_all = 0;
+ int ret;
+
+ struct option opts[] = {
+ OPT_BOOL(0, "all", &pack_all, N_("pack everything")),
+ OPT_BIT(0, "prune", &pack_refs_opts.flags, N_("prune loose refs (default)"), PACK_REFS_PRUNE),
+ OPT_BIT(0, "auto", &pack_refs_opts.flags, N_("auto-pack refs as needed"), PACK_REFS_AUTO),
+ OPT_STRING_LIST(0, "include", pack_refs_opts.includes, N_("pattern"),
+ N_("references to include")),
+ OPT_STRING_LIST(0, "exclude", &option_excluded_refs, N_("pattern"),
+ N_("references to exclude")),
+ OPT_END(),
+ };
+ repo_config(repo, git_default_config, NULL);
+ if (parse_options(argc, argv, prefix, opts, usage_opts, 0))
+ usage_with_options(usage_opts, opts);
+
+ for_each_string_list_item(item, &option_excluded_refs)
+ add_ref_exclusion(pack_refs_opts.exclusions, item->string);
+
+ if (pack_all)
+ string_list_append(pack_refs_opts.includes, "*");
+
+ if (!pack_refs_opts.includes->nr)
+ string_list_append(pack_refs_opts.includes, "refs/tags/*");
+
+ ret = refs_optimize(get_main_ref_store(repo), &pack_refs_opts);
+
+ clear_ref_exclusions(&excludes);
+ string_list_clear(&included_refs, 0);
+ string_list_clear(&option_excluded_refs, 0);
+ return ret;
+}
diff --git a/pack-refs.h b/pack-refs.h
new file mode 100644
index 0000000000..5de27e7da8
--- /dev/null
+++ b/pack-refs.h
@@ -0,0 +1,23 @@
+#ifndef PACK_REFS_H
+#define PACK_REFS_H
+
+struct repository;
+
+/*
+ * Shared usage string for options common to git-pack-refs(1)
+ * and git-refs-optimize(1). The command-specific part (e.g., "git refs optimize ")
+ * must be prepended by the caller.
+ */
+#define PACK_REFS_OPTS \
+ "[--all] [--no-prune] [--auto] [--include <pattern>] [--exclude <pattern>]"
+
+/*
+ * The core logic for pack-refs and its clones.
+ */
+int pack_refs_core(int argc,
+ const char **argv,
+ const char *prefix,
+ struct repository *repo,
+ const char * const *usage_opts);
+
+#endif /* PACK_REFS_H */
diff --git a/packfile.c b/packfile.c
index acb680966d..5a7caec292 100644
--- a/packfile.c
+++ b/packfile.c
@@ -278,7 +278,7 @@ static int unuse_one_window(struct packed_git *current)
if (current)
scan_windows(current, &lru_p, &lru_w, &lru_l);
- for (p = current->repo->objects->packed_git; p; p = p->next)
+ for (p = current->repo->objects->packfiles->packs; p; p = p->next)
scan_windows(p, &lru_p, &lru_w, &lru_l);
if (lru_p) {
munmap(lru_w->base, lru_w->len);
@@ -362,13 +362,8 @@ void close_pack(struct packed_git *p)
void close_object_store(struct object_database *o)
{
struct odb_source *source;
- struct packed_git *p;
- for (p = o->packed_git; p; p = p->next)
- if (p->do_not_close)
- BUG("want to close pack marked 'do-not-close'");
- else
- close_pack(p);
+ packfile_store_close(o->packfiles);
for (source = o->sources; source; source = source->next) {
if (source->midx)
@@ -468,7 +463,7 @@ static int close_one_pack(struct repository *r)
struct pack_window *mru_w = NULL;
int accept_windows_inuse = 1;
- for (p = r->objects->packed_git; p; p = p->next) {
+ for (p = r->objects->packfiles->packs; p; p = p->next) {
if (p->pack_fd == -1)
continue;
find_lru_pack(p, &lru_p, &mru_w, &accept_windows_inuse);
@@ -784,16 +779,44 @@ struct packed_git *add_packed_git(struct repository *r, const char *path,
return p;
}
-void install_packed_git(struct repository *r, struct packed_git *pack)
+void packfile_store_add_pack(struct packfile_store *store,
+ struct packed_git *pack)
{
if (pack->pack_fd != -1)
pack_open_fds++;
- pack->next = r->objects->packed_git;
- r->objects->packed_git = pack;
+ pack->next = store->packs;
+ store->packs = pack;
hashmap_entry_init(&pack->packmap_ent, strhash(pack->pack_name));
- hashmap_add(&r->objects->pack_map, &pack->packmap_ent);
+ hashmap_add(&store->map, &pack->packmap_ent);
+}
+
+struct packed_git *packfile_store_load_pack(struct packfile_store *store,
+ const char *idx_path, int local)
+{
+ struct strbuf key = STRBUF_INIT;
+ struct packed_git *p;
+
+ /*
+ * We're being called with the path to the index file, but `pack_map`
+ * holds the path to the packfile itself.
+ */
+ strbuf_addstr(&key, idx_path);
+ strbuf_strip_suffix(&key, ".idx");
+ strbuf_addstr(&key, ".pack");
+
+ p = hashmap_get_entry_from_hash(&store->map, strhash(key.buf), key.buf,
+ struct packed_git, packmap_ent);
+ if (!p) {
+ p = add_packed_git(store->odb->repo, idx_path,
+ strlen(idx_path), local);
+ if (p)
+ packfile_store_add_pack(store, p);
+ }
+
+ strbuf_release(&key);
+ return p;
}
void (*report_garbage)(unsigned seen_bits, const char *path);
@@ -895,23 +918,14 @@ static void prepare_pack(const char *full_name, size_t full_name_len,
const char *file_name, void *_data)
{
struct prepare_pack_data *data = (struct prepare_pack_data *)_data;
- struct packed_git *p;
size_t base_len = full_name_len;
if (strip_suffix_mem(full_name, &base_len, ".idx") &&
!(data->m && midx_contains_pack(data->m, file_name))) {
- struct hashmap_entry hent;
- char *pack_name = xstrfmt("%.*s.pack", (int)base_len, full_name);
- unsigned int hash = strhash(pack_name);
- hashmap_entry_init(&hent, hash);
-
- /* Don't reopen a pack we already have. */
- if (!hashmap_get(&data->r->objects->pack_map, &hent, pack_name)) {
- p = add_packed_git(data->r, full_name, full_name_len, data->local);
- if (p)
- install_packed_git(data->r, p);
- }
- free(pack_name);
+ char *trimmed_path = xstrndup(full_name, full_name_len);
+ packfile_store_load_pack(data->r->objects->packfiles,
+ trimmed_path, data->local);
+ free(trimmed_path);
}
if (!report_garbage)
@@ -951,40 +965,6 @@ static void prepare_packed_git_one(struct odb_source *source)
string_list_clear(data.garbage, 0);
}
-static void prepare_packed_git(struct repository *r);
-/*
- * Give a fast, rough count of the number of objects in the repository. This
- * ignores loose objects completely. If you have a lot of them, then either
- * you should repack because your performance will be awful, or they are
- * all unreachable objects about to be pruned, in which case they're not really
- * interesting as a measure of repo size in the first place.
- */
-unsigned long repo_approximate_object_count(struct repository *r)
-{
- if (!r->objects->approximate_object_count_valid) {
- struct odb_source *source;
- unsigned long count = 0;
- struct packed_git *p;
-
- prepare_packed_git(r);
-
- for (source = r->objects->sources; source; source = source->next) {
- struct multi_pack_index *m = get_multi_pack_index(source);
- if (m)
- count += m->num_objects;
- }
-
- for (p = r->objects->packed_git; p; p = p->next) {
- if (open_pack_index(p))
- continue;
- count += p->num_objects;
- }
- r->objects->approximate_object_count = count;
- r->objects->approximate_object_count_valid = 1;
- }
- return r->objects->approximate_object_count;
-}
-
DEFINE_LIST_SORT(static, sort_packs, struct packed_git, next);
static int sort_pack(const struct packed_git *a, const struct packed_git *b)
@@ -1013,80 +993,51 @@ static int sort_pack(const struct packed_git *a, const struct packed_git *b)
return -1;
}
-static void rearrange_packed_git(struct repository *r)
-{
- sort_packs(&r->objects->packed_git, sort_pack);
-}
-
-static void prepare_packed_git_mru(struct repository *r)
+static void packfile_store_prepare_mru(struct packfile_store *store)
{
struct packed_git *p;
- INIT_LIST_HEAD(&r->objects->packed_git_mru);
+ INIT_LIST_HEAD(&store->mru);
- for (p = r->objects->packed_git; p; p = p->next)
- list_add_tail(&p->mru, &r->objects->packed_git_mru);
+ for (p = store->packs; p; p = p->next)
+ list_add_tail(&p->mru, &store->mru);
}
-static void prepare_packed_git(struct repository *r)
+void packfile_store_prepare(struct packfile_store *store)
{
struct odb_source *source;
- if (r->objects->packed_git_initialized)
+ if (store->initialized)
return;
- odb_prepare_alternates(r->objects);
- for (source = r->objects->sources; source; source = source->next) {
+ odb_prepare_alternates(store->odb);
+ for (source = store->odb->sources; source; source = source->next) {
prepare_multi_pack_index_one(source);
prepare_packed_git_one(source);
}
- rearrange_packed_git(r);
-
- prepare_packed_git_mru(r);
- r->objects->packed_git_initialized = 1;
-}
-
-void reprepare_packed_git(struct repository *r)
-{
- struct odb_source *source;
+ sort_packs(&store->packs, sort_pack);
- obj_read_lock();
-
- /*
- * Reprepare alt odbs, in case the alternates file was modified
- * during the course of this process. This only _adds_ odbs to
- * the linked list, so existing odbs will continue to exist for
- * the lifetime of the process.
- */
- r->objects->loaded_alternates = 0;
- odb_prepare_alternates(r->objects);
-
- for (source = r->objects->sources; source; source = source->next)
- odb_clear_loose_cache(source);
-
- r->objects->approximate_object_count_valid = 0;
- r->objects->packed_git_initialized = 0;
- prepare_packed_git(r);
- obj_read_unlock();
+ packfile_store_prepare_mru(store);
+ store->initialized = true;
}
-struct packed_git *get_packed_git(struct repository *r)
+void packfile_store_reprepare(struct packfile_store *store)
{
- prepare_packed_git(r);
- return r->objects->packed_git;
+ store->initialized = false;
+ packfile_store_prepare(store);
}
-struct multi_pack_index *get_multi_pack_index(struct odb_source *source)
+struct packed_git *packfile_store_get_packs(struct packfile_store *store)
{
- prepare_packed_git(source->odb->repo);
- return source->midx;
+ packfile_store_prepare(store);
+ return store->packs;
}
-struct packed_git *get_all_packs(struct repository *r)
+struct packed_git *packfile_store_get_all_packs(struct packfile_store *store)
{
- prepare_packed_git(r);
+ packfile_store_prepare(store);
- for (struct odb_source *source = r->objects->sources; source; source = source->next) {
+ for (struct odb_source *source = store->odb->sources; source; source = source->next) {
struct multi_pack_index *m = source->midx;
if (!m)
continue;
@@ -1094,13 +1045,46 @@ struct packed_git *get_all_packs(struct repository *r)
prepare_midx_pack(m, i);
}
- return r->objects->packed_git;
+ return store->packs;
}
-struct list_head *get_packed_git_mru(struct repository *r)
+struct list_head *packfile_store_get_packs_mru(struct packfile_store *store)
{
- prepare_packed_git(r);
- return &r->objects->packed_git_mru;
+ packfile_store_prepare(store);
+ return &store->mru;
+}
+
+/*
+ * Give a fast, rough count of the number of objects in the repository. This
+ * ignores loose objects completely. If you have a lot of them, then either
+ * you should repack because your performance will be awful, or they are
+ * all unreachable objects about to be pruned, in which case they're not really
+ * interesting as a measure of repo size in the first place.
+ */
+unsigned long repo_approximate_object_count(struct repository *r)
+{
+ if (!r->objects->approximate_object_count_valid) {
+ struct odb_source *source;
+ unsigned long count = 0;
+ struct packed_git *p;
+
+ packfile_store_prepare(r->objects->packfiles);
+
+ for (source = r->objects->sources; source; source = source->next) {
+ struct multi_pack_index *m = get_multi_pack_index(source);
+ if (m)
+ count += m->num_objects;
+ }
+
+ for (p = r->objects->packfiles->packs; p; p = p->next) {
+ if (open_pack_index(p))
+ continue;
+ count += p->num_objects;
+ }
+ r->objects->approximate_object_count = count;
+ r->objects->approximate_object_count_valid = 1;
+ }
+ return r->objects->approximate_object_count;
}
unsigned long unpack_object_header_buffer(const unsigned char *buf,
@@ -1155,7 +1139,7 @@ unsigned long get_size_from_delta(struct packed_git *p,
*
* Other worrying sections could be the call to close_pack_fd(),
* which can close packs even with in-use windows, and to
- * reprepare_packed_git(). Regarding the former, mmap doc says:
+ * odb_reprepare(). Regarding the former, mmap doc says:
* "closing the file descriptor does not unmap the region". And
* for the latter, it won't re-open already available packs.
*/
@@ -1219,7 +1203,7 @@ const struct packed_git *has_packed_and_bad(struct repository *r,
{
struct packed_git *p;
- for (p = r->objects->packed_git; p; p = p->next)
+ for (p = r->objects->packfiles->packs; p; p = p->next)
if (oidset_contains(&p->bad_objects, oid))
return p;
return NULL;
@@ -2074,19 +2058,19 @@ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pa
{
struct list_head *pos;
- prepare_packed_git(r);
+ packfile_store_prepare(r->objects->packfiles);
for (struct odb_source *source = r->objects->sources; source; source = source->next)
if (source->midx && fill_midx_entry(source->midx, oid, e))
return 1;
- if (!r->objects->packed_git)
+ if (!r->objects->packfiles->packs)
return 0;
- list_for_each(pos, &r->objects->packed_git_mru) {
+ list_for_each(pos, &r->objects->packfiles->mru) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
if (!p->multi_pack_index && fill_pack_entry(oid, e, p)) {
- list_move(&p->mru, &r->objects->packed_git_mru);
+ list_move(&p->mru, &r->objects->packfiles->mru);
return 1;
}
}
@@ -2096,19 +2080,19 @@ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pa
static void maybe_invalidate_kept_pack_cache(struct repository *r,
unsigned flags)
{
- if (!r->objects->kept_pack_cache.packs)
+ if (!r->objects->packfiles->kept_cache.packs)
return;
- if (r->objects->kept_pack_cache.flags == flags)
+ if (r->objects->packfiles->kept_cache.flags == flags)
return;
- FREE_AND_NULL(r->objects->kept_pack_cache.packs);
- r->objects->kept_pack_cache.flags = 0;
+ FREE_AND_NULL(r->objects->packfiles->kept_cache.packs);
+ r->objects->packfiles->kept_cache.flags = 0;
}
struct packed_git **kept_pack_cache(struct repository *r, unsigned flags)
{
maybe_invalidate_kept_pack_cache(r, flags);
- if (!r->objects->kept_pack_cache.packs) {
+ if (!r->objects->packfiles->kept_cache.packs) {
struct packed_git **packs = NULL;
size_t nr = 0, alloc = 0;
struct packed_git *p;
@@ -2121,7 +2105,7 @@ struct packed_git **kept_pack_cache(struct repository *r, unsigned flags)
* covers, one kept and one not kept, but the midx returns only
* the non-kept version.
*/
- for (p = get_all_packs(r); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
if ((p->pack_keep && (flags & ON_DISK_KEEP_PACKS)) ||
(p->pack_keep_in_core && (flags & IN_CORE_KEEP_PACKS))) {
ALLOC_GROW(packs, nr + 1, alloc);
@@ -2131,11 +2115,11 @@ struct packed_git **kept_pack_cache(struct repository *r, unsigned flags)
ALLOC_GROW(packs, nr + 1, alloc);
packs[nr] = NULL;
- r->objects->kept_pack_cache.packs = packs;
- r->objects->kept_pack_cache.flags = flags;
+ r->objects->packfiles->kept_cache.packs = packs;
+ r->objects->packfiles->kept_cache.flags = flags;
}
- return r->objects->kept_pack_cache.packs;
+ return r->objects->packfiles->kept_cache.packs;
}
int find_kept_pack_entry(struct repository *r,
@@ -2218,7 +2202,7 @@ int for_each_packed_object(struct repository *repo, each_packed_object_fn cb,
int r = 0;
int pack_errors = 0;
- for (p = get_all_packs(repo); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(repo->objects->packfiles); p; p = p->next) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
@@ -2332,3 +2316,46 @@ int parse_pack_header_option(const char *in, unsigned char *out, unsigned int *l
*len = hdr - out;
return 0;
}
+
+static int pack_map_entry_cmp(const void *cmp_data UNUSED,
+ const struct hashmap_entry *entry,
+ const struct hashmap_entry *entry2,
+ const void *keydata)
+{
+ const char *key = keydata;
+ const struct packed_git *pg1, *pg2;
+
+ pg1 = container_of(entry, const struct packed_git, packmap_ent);
+ pg2 = container_of(entry2, const struct packed_git, packmap_ent);
+
+ return strcmp(pg1->pack_name, key ? key : pg2->pack_name);
+}
+
+struct packfile_store *packfile_store_new(struct object_database *odb)
+{
+ struct packfile_store *store;
+ CALLOC_ARRAY(store, 1);
+ store->odb = odb;
+ INIT_LIST_HEAD(&store->mru);
+ hashmap_init(&store->map, pack_map_entry_cmp, NULL, 0);
+ return store;
+}
+
+void packfile_store_free(struct packfile_store *store)
+{
+ for (struct packed_git *p = store->packs, *next; p; p = next) {
+ next = p->next;
+ free(p);
+ }
+ hashmap_clear(&store->map);
+ free(store);
+}
+
+void packfile_store_close(struct packfile_store *store)
+{
+ for (struct packed_git *p = store->packs; p; p = p->next) {
+ if (p->do_not_close)
+ BUG("want to close pack marked 'do-not-close'");
+ close_pack(p);
+ }
+}
diff --git a/packfile.h b/packfile.h
index f16753f2a9..e7a5792b6c 100644
--- a/packfile.h
+++ b/packfile.h
@@ -52,19 +52,114 @@ struct packed_git {
char pack_name[FLEX_ARRAY]; /* more */
};
-static inline int pack_map_entry_cmp(const void *cmp_data UNUSED,
- const struct hashmap_entry *entry,
- const struct hashmap_entry *entry2,
- const void *keydata)
-{
- const char *key = keydata;
- const struct packed_git *pg1, *pg2;
+/*
+ * A store that manages packfiles for a given object database.
+ */
+struct packfile_store {
+ struct object_database *odb;
- pg1 = container_of(entry, const struct packed_git, packmap_ent);
- pg2 = container_of(entry2, const struct packed_git, packmap_ent);
+ /*
+ * The list of packfiles in the order in which they are being added to
+ * the store.
+ */
+ struct packed_git *packs;
- return strcmp(pg1->pack_name, key ? key : pg2->pack_name);
-}
+ /*
+ * Cache of packfiles which are marked as "kept", either because there
+ * is an on-disk ".keep" file or because they are marked as "kept" in
+ * memory.
+ *
+ * Should not be accessed directly, but via `kept_pack_cache()`. The
+ * list of packs gets invalidated when the stored flags and the flags
+ * passed to `kept_pack_cache()` mismatch.
+ */
+ struct {
+ struct packed_git **packs;
+ unsigned flags;
+ } kept_cache;
+
+ /* A most-recently-used ordered version of the packs list. */
+ struct list_head mru;
+
+ /*
+ * A map of packfile names to packed_git structs for tracking which
+ * packs have been loaded already.
+ */
+ struct hashmap map;
+
+ /*
+ * Whether packfiles have already been populated with this store's
+ * packs.
+ */
+ bool initialized;
+};
+
+/*
+ * Allocate and initialize a new empty packfile store for the given object
+ * database.
+ */
+struct packfile_store *packfile_store_new(struct object_database *odb);
+
+/*
+ * Free the packfile store and all its associated state. All packfiles
+ * tracked by the store will be closed.
+ */
+void packfile_store_free(struct packfile_store *store);
+
+/*
+ * Close all packfiles associated with this store. The packfiles won't be
+ * free'd, so they can be re-opened at a later point in time.
+ */
+void packfile_store_close(struct packfile_store *store);
+
+/*
+ * Prepare the packfile store by loading packfiles and multi-pack indices for
+ * all alternates. This becomes a no-op if the store is already prepared.
+ *
+ * It shouldn't typically be necessary to call this function directly, as
+ * functions that access the store know to prepare it.
+ */
+void packfile_store_prepare(struct packfile_store *store);
+
+/*
+ * Clear the packfile caches and try to look up any new packfiles that have
+ * appeared since last preparing the packfiles store.
+ *
+ * This function must be called under the `odb_read_lock()`.
+ */
+void packfile_store_reprepare(struct packfile_store *store);
+
+/*
+ * Add the pack to the store so that contained objects become accessible via
+ * the store. This moves ownership into the store.
+ */
+void packfile_store_add_pack(struct packfile_store *store,
+ struct packed_git *pack);
+
+/*
+ * Get packs managed by the given store. Does not load the MIDX or any packs
+ * referenced by it.
+ */
+struct packed_git *packfile_store_get_packs(struct packfile_store *store);
+
+/*
+ * Get all packs managed by the given store, including packfiles that are
+ * referenced by multi-pack indices.
+ */
+struct packed_git *packfile_store_get_all_packs(struct packfile_store *store);
+
+/*
+ * Get all packs in most-recently-used order.
+ */
+struct list_head *packfile_store_get_packs_mru(struct packfile_store *store);
+
+/*
+ * Open the packfile and add it to the store if it isn't yet known. Returns
+ * either the newly opened packfile or the preexisting packfile. Returns a
+ * `NULL` pointer in case the packfile could not be opened.
+ */
+struct packed_git *packfile_store_load_pack(struct packfile_store *store,
+ const char *idx_path, int local);
struct pack_window {
struct pack_window *next;
@@ -142,14 +237,6 @@ int for_each_packed_object(struct repository *repo, each_packed_object_fn cb,
#define PACKDIR_FILE_GARBAGE 4
extern void (*report_garbage)(unsigned seen_bits, const char *path);
-void reprepare_packed_git(struct repository *r);
-void install_packed_git(struct repository *r, struct packed_git *pack);
-
-struct packed_git *get_packed_git(struct repository *r);
-struct list_head *get_packed_git_mru(struct repository *r);
-struct multi_pack_index *get_multi_pack_index(struct odb_source *source);
-struct packed_git *get_all_packs(struct repository *r);
-
/*
* Give a rough count of objects in the repository. This sacrifices accuracy
* for speed.
diff --git a/parse-options.c b/parse-options.c
index 992ec9631f..5933468c19 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -133,7 +133,6 @@ static enum parse_opt_result do_get_value(struct parse_opt_ctx_t *p,
{
const char *arg;
const int unset = flags & OPT_UNSET;
- int err;
if (unset && p->opt)
return error(_("%s takes no value"), optname(opt, flags));
@@ -209,21 +208,31 @@ static enum parse_opt_result do_get_value(struct parse_opt_ctx_t *p,
case OPTION_FILENAME:
{
const char *value;
-
- FREE_AND_NULL(*(char **)opt->value);
-
- err = 0;
+ int is_optional;
if (unset)
value = NULL;
else if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
- value = (const char *) opt->defval;
- else
- err = get_arg(p, opt, flags, &value);
+ value = (char *)opt->defval;
+ else {
+ int err = get_arg(p, opt, flags, &value);
+ if (err)
+ return err;
+ }
+ if (!value)
+ return 0;
- if (!err)
- *(char **)opt->value = fix_filename(p->prefix, value);
- return err;
+ is_optional = skip_prefix(value, ":(optional)", &value);
+ if (!value)
+ is_optional = 0;
+ value = fix_filename(p->prefix, value);
+ if (is_optional && is_empty_or_missing_file(value)) {
+ free((char *)value);
+ } else {
+ FREE_AND_NULL(*(char **)opt->value);
+ *(const char **)opt->value = value;
+ }
+ return 0;
}
case OPTION_CALLBACK:
{
diff --git a/range-diff.c b/range-diff.c
index ca449a0769..57edff40a8 100644
--- a/range-diff.c
+++ b/range-diff.c
@@ -39,7 +39,7 @@ struct patch_util {
* as struct object_id (will need to be free()d).
*/
static int read_patches(const char *range, struct string_list *list,
- const struct strvec *other_arg,
+ const struct strvec *log_arg,
unsigned int include_merges)
{
struct child_process cp = CHILD_PROCESS_INIT;
@@ -69,8 +69,8 @@ static int read_patches(const char *range, struct string_list *list,
if (!include_merges)
strvec_push(&cp.args, "--no-merges");
strvec_push(&cp.args, range);
- if (other_arg)
- strvec_pushv(&cp.args, other_arg->v);
+ if (log_arg)
+ strvec_pushv(&cp.args, log_arg->v);
cp.out = -1;
cp.no_stdin = 1;
cp.git_cmd = 1;
@@ -594,9 +594,9 @@ int show_range_diff(const char *range1, const char *range2,
if (range_diff_opts->left_only && range_diff_opts->right_only)
res = error(_("options '%s' and '%s' cannot be used together"), "--left-only", "--right-only");
- if (!res && read_patches(range1, &branch1, range_diff_opts->other_arg, include_merges))
+ if (!res && read_patches(range1, &branch1, range_diff_opts->log_arg, include_merges))
res = error(_("could not parse log for '%s'"), range1);
- if (!res && read_patches(range2, &branch2, range_diff_opts->other_arg, include_merges))
+ if (!res && read_patches(range2, &branch2, range_diff_opts->log_arg, include_merges))
res = error(_("could not parse log for '%s'"), range2);
if (!res) {
diff --git a/range-diff.h b/range-diff.h
index 9d39818e34..9b70a80009 100644
--- a/range-diff.h
+++ b/range-diff.h
@@ -23,7 +23,7 @@ struct range_diff_options {
unsigned include_merges:1;
size_t max_memory;
const struct diff_options *diffopt; /* may be NULL */
- const struct strvec *other_arg; /* may be NULL */
+ const struct strvec *log_arg; /* may be NULL */
};
/*
diff --git a/read-cache.c b/read-cache.c
index 229b8ef11c..032480d0c7 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -8,7 +8,6 @@
#define DISABLE_SIGN_COMPARE_WARNINGS
#include "git-compat-util.h"
-#include "bulk-checkin.h"
#include "config.h"
#include "date.h"
#include "diff.h"
@@ -1807,7 +1806,7 @@ static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
if (expand_name_field) {
const unsigned char *cp = (const unsigned char *)name;
- size_t strip_len, previous_len;
+ uint64_t strip_len, previous_len;
/* If we're at the beginning of a block, ignore the previous name */
strip_len = decode_varint(&cp);
@@ -2655,8 +2654,10 @@ static int ce_write_entry(struct hashfile *f, struct cache_entry *ce,
hashwrite(f, ce->name, len);
hashwrite(f, padding, align_padding_size(size, len));
} else {
- int common, to_remove, prefix_size;
+ int common, to_remove;
+ uint8_t prefix_size;
unsigned char to_remove_vi[16];
+
for (common = 0;
(common < previous_name->len &&
ce->name[common] &&
@@ -3973,9 +3974,9 @@ int add_files_to_cache(struct repository *repo, const char *prefix,
* This function is invoked from commands other than 'add', which
* may not have their own transaction active.
*/
- transaction = begin_odb_transaction(repo->objects);
+ transaction = odb_transaction_begin(repo->objects);
run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
- end_odb_transaction(transaction);
+ odb_transaction_commit(transaction);
release_revisions(&rev);
return !!data.add_errors;
diff --git a/refs.c b/refs.c
index c164374c26..965381367e 100644
--- a/refs.c
+++ b/refs.c
@@ -3,7 +3,6 @@
*/
#define USE_THE_REPOSITORY_VARIABLE
-#define DISABLE_SIGN_COMPARE_WARNINGS
#include "git-compat-util.h"
#include "advice.h"
@@ -32,6 +31,7 @@
#include "commit.h"
#include "wildmatch.h"
#include "ident.h"
+#include "fsck.h"
/*
* List of all available backends
@@ -323,6 +323,9 @@ int check_refname_format(const char *refname, int flags)
int refs_fsck(struct ref_store *refs, struct fsck_options *o,
struct worktree *wt)
{
+ if (o->verbose)
+ fprintf_ln(stderr, _("Checking references consistency"));
+
return refs->be->fsck(refs, o, wt);
}
@@ -1710,8 +1713,6 @@ const char *find_descendant_ref(const char *dirname,
const struct string_list *extras,
const struct string_list *skip)
{
- int pos;
-
if (!extras)
return NULL;
@@ -1721,7 +1722,7 @@ const char *find_descendant_ref(const char *dirname,
* with dirname (remember, dirname includes the trailing
* slash) and is not in skip, then we have a conflict.
*/
- for (pos = string_list_find_insert_index(extras, dirname, 0);
+ for (size_t pos = string_list_find_insert_index(extras, dirname, NULL);
pos < extras->nr; pos++) {
const char *extra_refname = extras->items[pos].string;
@@ -2304,6 +2305,11 @@ int refs_pack_refs(struct ref_store *refs, struct pack_refs_opts *opts)
return refs->be->pack_refs(refs, opts);
}
+int refs_optimize(struct ref_store *refs, struct pack_refs_opts *opts)
+{
+ return refs->be->optimize(refs, opts);
+}
+
int peel_iterated_oid(struct repository *r, const struct object_id *base, struct object_id *peeled)
{
if (current_ref_iter &&
@@ -2405,7 +2411,7 @@ static int run_transaction_hook(struct ref_transaction *transaction,
struct child_process proc = CHILD_PROCESS_INIT;
struct strbuf buf = STRBUF_INIT;
const char *hook;
- int ret = 0, i;
+ int ret = 0;
hook = find_hook(transaction->ref_store->repo, "reference-transaction");
if (!hook)
@@ -2422,7 +2428,7 @@ static int run_transaction_hook(struct ref_transaction *transaction,
sigchain_push(SIGPIPE, SIG_IGN);
- for (i = 0; i < transaction->nr; i++) {
+ for (size_t i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
if (update->flags & REF_LOG_ONLY)
@@ -2815,9 +2821,7 @@ void ref_transaction_for_each_queued_update(struct ref_transaction *transaction,
ref_transaction_for_each_queued_update_fn cb,
void *cb_data)
{
- int i;
-
- for (i = 0; i < transaction->nr; i++) {
+ for (size_t i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
cb(update->refname,
diff --git a/refs.h b/refs.h
index bc5d8427a5..4e6bd63aa8 100644
--- a/refs.h
+++ b/refs.h
@@ -483,6 +483,12 @@ struct pack_refs_opts {
int refs_pack_refs(struct ref_store *refs, struct pack_refs_opts *opts);
/*
+ * Optimize the ref store. The exact behavior is up to the backend.
+ * For the files backend, this is equivalent to packing refs.
+ */
+int refs_optimize(struct ref_store *refs, struct pack_refs_opts *opts);
+
+/*
* Setup reflog before using. Fill in err and return -1 on failure.
*/
int refs_create_reflog(struct ref_store *refs, const char *refname,
diff --git a/refs/debug.c b/refs/debug.c
index 1cb955961e..697adbd0dc 100644
--- a/refs/debug.c
+++ b/refs/debug.c
@@ -1,7 +1,6 @@
#include "git-compat-util.h"
#include "hex.h"
#include "refs-internal.h"
-#include "string-list.h"
#include "trace.h"
static struct trace_key trace_refs = TRACE_KEY_INIT(REFS);
diff --git a/refs/files-backend.c b/refs/files-backend.c
index bc3347d18c..5ddf418b18 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -20,7 +20,6 @@
#include "../dir-iterator.h"
#include "../lockfile.h"
#include "../object.h"
-#include "../object-file.h"
#include "../path.h"
#include "../dir.h"
#include "../chdir-notify.h"
@@ -1528,6 +1527,15 @@ static int files_pack_refs(struct ref_store *ref_store,
return 0;
}
+static int files_optimize(struct ref_store *ref_store, struct pack_refs_opts *opts)
+{
+ /*
+ * For the "files" backend, "optimizing" is the same as "packing".
+ * So, we just call the existing worker function for packing.
+ */
+ return files_pack_refs(ref_store, opts);
+}
+
/*
* People using contrib's git-new-workdir have .git/logs/refs ->
* /some/other/path/.git/logs/refs, and that may live on another device.
@@ -3961,8 +3969,6 @@ static int files_fsck_refs(struct ref_store *ref_store,
NULL,
};
- if (o->verbose)
- fprintf_ln(stderr, _("Checking references consistency"));
return files_fsck_refs_dir(ref_store, o, "refs", wt, fsck_refs_fn);
}
@@ -3989,6 +3995,7 @@ struct ref_storage_be refs_be_files = {
.transaction_abort = files_transaction_abort,
.pack_refs = files_pack_refs,
+ .optimize = files_optimize,
.rename_ref = files_rename_ref,
.copy_ref = files_copy_ref,
diff --git a/refs/ref-cache.c b/refs/ref-cache.c
index c180e0aad7..e5e5df16d8 100644
--- a/refs/ref-cache.c
+++ b/refs/ref-cache.c
@@ -539,7 +539,7 @@ static int cache_ref_iterator_seek(struct ref_iterator *ref_iterator,
*/
break;
}
- } while (slash);
+ } while (slash && dir->nr);
}
return 0;
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index 54c2079c12..4ef3bd75c6 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -447,6 +447,8 @@ typedef int ref_transaction_commit_fn(struct ref_store *refs,
typedef int pack_refs_fn(struct ref_store *ref_store,
struct pack_refs_opts *opts);
+typedef int optimize_fn(struct ref_store *ref_store,
+ struct pack_refs_opts *opts);
typedef int rename_ref_fn(struct ref_store *ref_store,
const char *oldref, const char *newref,
const char *logmsg);
@@ -572,6 +574,7 @@ struct ref_storage_be {
ref_transaction_abort_fn *transaction_abort;
pack_refs_fn *pack_refs;
+ optimize_fn *optimize;
rename_ref_fn *rename_ref;
copy_ref_fn *copy_ref;
diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
index 9e889da2ff..d4b7928620 100644
--- a/refs/reftable-backend.c
+++ b/refs/reftable-backend.c
@@ -6,20 +6,21 @@
#include "../config.h"
#include "../dir.h"
#include "../environment.h"
+#include "../fsck.h"
#include "../gettext.h"
#include "../hash.h"
#include "../hex.h"
#include "../iterator.h"
#include "../ident.h"
-#include "../lockfile.h"
#include "../object.h"
#include "../path.h"
#include "../refs.h"
#include "../reftable/reftable-basics.h"
-#include "../reftable/reftable-stack.h"
-#include "../reftable/reftable-record.h"
#include "../reftable/reftable-error.h"
+#include "../reftable/reftable-fsck.h"
#include "../reftable/reftable-iterator.h"
+#include "../reftable/reftable-record.h"
+#include "../reftable/reftable-stack.h"
#include "../repo-settings.h"
#include "../setup.h"
#include "../strmap.h"
@@ -1741,6 +1742,12 @@ out:
return ret;
}
+static int reftable_be_optimize(struct ref_store *ref_store,
+ struct pack_refs_opts *opts)
+{
+ return reftable_be_pack_refs(ref_store, opts);
+}
+
struct write_create_symref_arg {
struct reftable_ref_store *refs;
struct reftable_stack *stack;
@@ -2708,11 +2715,56 @@ done:
return ret;
}
-static int reftable_be_fsck(struct ref_store *ref_store UNUSED,
- struct fsck_options *o UNUSED,
+static void reftable_fsck_verbose_handler(const char *msg, void *cb_data)
+{
+ struct fsck_options *o = cb_data;
+
+ if (o->verbose)
+ fprintf_ln(stderr, "%s", msg);
+}
+
+static const enum fsck_msg_id fsck_msg_id_map[] = {
+ [REFTABLE_FSCK_ERROR_TABLE_NAME] = FSCK_MSG_BAD_REFTABLE_TABLE_NAME,
+};
+
+static int reftable_fsck_error_handler(struct reftable_fsck_info *info,
+ void *cb_data)
+{
+ struct fsck_ref_report report = { .path = info->path };
+ struct fsck_options *o = cb_data;
+ enum fsck_msg_id msg_id;
+
+ if (info->error < 0 || info->error >= REFTABLE_FSCK_MAX_VALUE)
+ BUG("unknown fsck error: %d", (int)info->error);
+
+ msg_id = fsck_msg_id_map[info->error];
+
+ if (!msg_id)
+ BUG("fsck_msg_id value missing for reftable error: %d", (int)info->error);
+
+ return fsck_report_ref(o, &report, msg_id, "%s", info->msg);
+}
+
+static int reftable_be_fsck(struct ref_store *ref_store, struct fsck_options *o,
struct worktree *wt UNUSED)
{
- return 0;
+ struct reftable_ref_store *refs;
+ struct strmap_entry *entry;
+ struct hashmap_iter iter;
+ int ret = 0;
+
+ refs = reftable_be_downcast(ref_store, REF_STORE_READ, "fsck");
+
+ ret |= reftable_fsck_check(refs->main_backend.stack, reftable_fsck_error_handler,
+ reftable_fsck_verbose_handler, o);
+
+ strmap_for_each_entry(&refs->worktree_backends, &iter, entry) {
+ struct reftable_backend *b = (struct reftable_backend *)entry->value;
+ ret |= reftable_fsck_check(b->stack, reftable_fsck_error_handler,
+ reftable_fsck_verbose_handler, o);
+ }
+
+ return ret;
}
struct ref_storage_be refs_be_reftable = {
@@ -2727,6 +2779,7 @@ struct ref_storage_be refs_be_reftable = {
.transaction_abort = reftable_be_transaction_abort,
.pack_refs = reftable_be_pack_refs,
+ .optimize = reftable_be_optimize,
.rename_ref = reftable_be_rename_ref,
.copy_ref = reftable_be_copy_ref,
diff --git a/reftable/basics.c b/reftable/basics.c
index 9988ebd635..e969927b61 100644
--- a/reftable/basics.c
+++ b/reftable/basics.c
@@ -195,44 +195,55 @@ size_t names_length(const char **names)
return p - names;
}
-char **parse_names(char *buf, int size)
+int parse_names(char *buf, int size, char ***out)
{
char **names = NULL;
size_t names_cap = 0;
size_t names_len = 0;
char *p = buf;
char *end = buf + size;
+ int err = 0;
while (p < end) {
char *next = strchr(p, '\n');
- if (next && next < end) {
- *next = 0;
+ if (!next) {
+ err = REFTABLE_FORMAT_ERROR;
+ goto done;
+ } else if (next < end) {
+ *next = '\0';
} else {
next = end;
}
+
if (p < next) {
if (REFTABLE_ALLOC_GROW(names, names_len + 1,
- names_cap))
- goto err;
+ names_cap)) {
+ err = REFTABLE_OUT_OF_MEMORY_ERROR;
+ goto done;
+ }
names[names_len] = reftable_strdup(p);
- if (!names[names_len++])
- goto err;
+ if (!names[names_len++]) {
+ err = REFTABLE_OUT_OF_MEMORY_ERROR;
+ goto done;
+ }
}
p = next + 1;
}
- if (REFTABLE_ALLOC_GROW(names, names_len + 1, names_cap))
- goto err;
+ if (REFTABLE_ALLOC_GROW(names, names_len + 1, names_cap)) {
+ err = REFTABLE_OUT_OF_MEMORY_ERROR;
+ goto done;
+ }
names[names_len] = NULL;
- return names;
-
-err:
+ *out = names;
+ return 0;
+done:
for (size_t i = 0; i < names_len; i++)
reftable_free(names[i]);
reftable_free(names);
- return NULL;
+ return err;
}
int names_equal(const char **a, const char **b)
diff --git a/reftable/basics.h b/reftable/basics.h
index 7d22f96261..e4b83b2b03 100644
--- a/reftable/basics.h
+++ b/reftable/basics.h
@@ -167,10 +167,11 @@ void free_names(char **a);
/*
* Parse a newline separated list of names. `size` is the length of the buffer,
- * without terminating '\0'. Empty names are discarded. Returns a `NULL`
- * pointer when allocations fail.
+ * without terminating '\0'. Empty names are discarded.
+ *
+ * Returns 0 on success, a reftable error code on error.
*/
-char **parse_names(char *buf, int size);
+int parse_names(char *buf, int size, char ***out);
/* compares two NULL-terminated arrays of strings. */
int names_equal(const char **a, const char **b);
diff --git a/reftable/fsck.c b/reftable/fsck.c
new file mode 100644
index 0000000000..26b9115b14
--- /dev/null
+++ b/reftable/fsck.c
@@ -0,0 +1,100 @@
+#include "basics.h"
+#include "reftable-fsck.h"
+#include "reftable-table.h"
+#include "stack.h"
+
+static bool table_has_valid_name(const char *name)
+{
+ const char *ptr = name;
+ char *endptr;
+
+ /* strtoull doesn't set errno on success */
+ errno = 0;
+
+ strtoull(ptr, &endptr, 16);
+ if (errno)
+ return false;
+ ptr = endptr;
+
+ if (*ptr != '-')
+ return false;
+ ptr++;
+
+ strtoull(ptr, &endptr, 16);
+ if (errno)
+ return false;
+ ptr = endptr;
+
+ if (*ptr != '-')
+ return false;
+ ptr++;
+
+ strtoul(ptr, &endptr, 16);
+ if (errno)
+ return false;
+ ptr = endptr;
+
+ if (strcmp(ptr, ".ref") && strcmp(ptr, ".log"))
+ return false;
+
+ return true;
+}
+
+typedef int (*table_check_fn)(struct reftable_table *table,
+ reftable_fsck_report_fn report_fn,
+ void *cb_data);
+
+static int table_check_name(struct reftable_table *table,
+ reftable_fsck_report_fn report_fn,
+ void *cb_data)
+{
+ if (!table_has_valid_name(table->name)) {
+ struct reftable_fsck_info info;
+
+ info.error = REFTABLE_FSCK_ERROR_TABLE_NAME;
+ info.msg = "invalid reftable table name";
+ info.path = table->name;
+
+ return report_fn(&info, cb_data);
+ }
+
+ return 0;
+}
+
+static int table_checks(struct reftable_table *table,
+ reftable_fsck_report_fn report_fn,
+ reftable_fsck_verbose_fn verbose_fn UNUSED,
+ void *cb_data)
+{
+ table_check_fn table_check_fns[] = {
+ table_check_name,
+ NULL,
+ };
+ int err = 0;
+
+ for (size_t i = 0; table_check_fns[i]; i++)
+ err |= table_check_fns[i](table, report_fn, cb_data);
+
+ return err;
+}
+
+int reftable_fsck_check(struct reftable_stack *stack,
+ reftable_fsck_report_fn report_fn,
+ reftable_fsck_verbose_fn verbose_fn,
+ void *cb_data)
+{
+ struct reftable_buf msg = REFTABLE_BUF_INIT;
+ int err = 0;
+
+ for (size_t i = 0; i < stack->tables_len; i++) {
+ reftable_buf_reset(&msg);
+ reftable_buf_addstr(&msg, "Checking table: ");
+ reftable_buf_addstr(&msg, stack->tables[i]->name);
+ verbose_fn(msg.buf, cb_data);
+
+ err |= table_checks(stack->tables[i], report_fn, verbose_fn, cb_data);
+ }
+
+ reftable_buf_release(&msg);
+ return err;
+}
diff --git a/reftable/reftable-fsck.h b/reftable/reftable-fsck.h
new file mode 100644
index 0000000000..007a392cf9
--- /dev/null
+++ b/reftable/reftable-fsck.h
@@ -0,0 +1,40 @@
+#ifndef REFTABLE_FSCK_H
+#define REFTABLE_FSCK_H
+
+#include "reftable-stack.h"
+
+enum reftable_fsck_error {
+ /* Invalid table name */
+ REFTABLE_FSCK_ERROR_TABLE_NAME = 0,
+ /* Used for bounds checking, must be last */
+ REFTABLE_FSCK_MAX_VALUE,
+};
+
+/* Represents an individual error encountered during the FSCK checks. */
+struct reftable_fsck_info {
+ enum reftable_fsck_error error;
+ const char *msg;
+ const char *path;
+};
+
+typedef int reftable_fsck_report_fn(struct reftable_fsck_info *info,
+ void *cb_data);
+typedef void reftable_fsck_verbose_fn(const char *msg, void *cb_data);
+
+/*
+ * Given a reftable stack, perform consistency checks on the stack.
+ *
+ * If an issue is encountered, the issue is reported to the callee via the
+ * provided 'report_fn'. If the issue is non-recoverable the flow will not
+ * continue. If it is recoverable, the flow will continue and further issues
+ * will be reported as identified.
+ *
+ * The 'verbose_fn' will be invoked to provide verbose information about
+ * the progress and state of the consistency checks.
+ */
+int reftable_fsck_check(struct reftable_stack *stack,
+ reftable_fsck_report_fn report_fn,
+ reftable_fsck_verbose_fn verbose_fn,
+ void *cb_data);
+
+#endif /* REFTABLE_FSCK_H */
diff --git a/reftable/stack.c b/reftable/stack.c
index f91ce50bcd..65d89820bd 100644
--- a/reftable/stack.c
+++ b/reftable/stack.c
@@ -109,12 +109,7 @@ static int fd_read_lines(int fd, char ***namesp)
}
buf[size] = 0;
- *namesp = parse_names(buf, size);
- if (!*namesp) {
- err = REFTABLE_OUT_OF_MEMORY_ERROR;
- goto done;
- }
-
+ err = parse_names(buf, size, namesp);
done:
reftable_free(buf);
return err;
diff --git a/remote-curl.c b/remote-curl.c
index 84f4694780..69f919454a 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -894,14 +894,6 @@ static int probe_rpc(struct rpc_state *rpc, struct slot_results *results)
return err;
}
-static curl_off_t xcurl_off_t(size_t len)
-{
- uintmax_t size = len;
- if (size > maximum_signed_value_of_type(curl_off_t))
- die(_("cannot handle pushes this big"));
- return (curl_off_t)size;
-}
-
/*
* If flush_received is true, do not attempt to read any more; just use what's
* in rpc->buf.
@@ -999,7 +991,7 @@ retry:
* and we just need to send it.
*/
curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDS, gzip_body);
- curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE_LARGE, xcurl_off_t(gzip_size));
+ curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE_LARGE, cast_size_t_to_curl_off_t(gzip_size));
} else if (use_gzip && 1024 < rpc->len) {
/* The client backend isn't giving us compressed data so
@@ -1030,7 +1022,7 @@ retry:
headers = curl_slist_append(headers, "Content-Encoding: gzip");
curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDS, gzip_body);
- curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE_LARGE, xcurl_off_t(gzip_size));
+ curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE_LARGE, cast_size_t_to_curl_off_t(gzip_size));
if (options.verbosity > 1) {
fprintf(stderr, "POST %s (gzip %lu to %lu bytes)\n",
@@ -1043,7 +1035,7 @@ retry:
* more normal Content-Length approach.
*/
curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDS, rpc->buf);
- curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE_LARGE, xcurl_off_t(rpc->len));
+ curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE_LARGE, cast_size_t_to_curl_off_t(rpc->len));
if (options.verbosity > 1) {
fprintf(stderr, "POST %s (%lu bytes)\n",
rpc->service_name, (unsigned long)rpc->len);
diff --git a/revision.c b/revision.c
index 806a1c4c24..cf5e6c1ec9 100644
--- a/revision.c
+++ b/revision.c
@@ -774,9 +774,6 @@ static int check_maybe_different_in_bloom_filter(struct rev_info *revs,
struct bloom_filter *filter;
int result = 0;
- if (!revs->repo->objects->commit_graph)
- return -1;
-
if (commit_graph_generation(commit) == GENERATION_NUMBER_INFINITY)
return -1;
diff --git a/revision.h b/revision.h
index a28e349044..b36acfc2d9 100644
--- a/revision.h
+++ b/revision.h
@@ -334,6 +334,7 @@ struct rev_info {
/* range-diff */
const char *rdiff1;
const char *rdiff2;
+ struct strvec rdiff_log_arg;
int creation_factor;
const char *rdiff_title;
@@ -410,6 +411,7 @@ struct rev_info {
.expand_tabs_in_log = -1, \
.commit_format = CMIT_FMT_DEFAULT, \
.expand_tabs_in_log_default = 8, \
+ .rdiff_log_arg = STRVEC_INIT, \
}
/**
diff --git a/server-info.c b/server-info.c
index 9bb30d9ab7..1d33de821e 100644
--- a/server-info.c
+++ b/server-info.c
@@ -287,12 +287,13 @@ static int compare_info(const void *a_, const void *b_)
static void init_pack_info(struct repository *r, const char *infofile, int force)
{
+ struct packfile_store *packs = r->objects->packfiles;
struct packed_git *p;
int stale;
int i;
size_t alloc = 0;
- for (p = get_all_packs(r); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
/* we ignore things on alternate path since they are
* not available to the pullers in general.
*/
diff --git a/shared.mak b/shared.mak
index 5c7bc94785..0e7492076e 100644
--- a/shared.mak
+++ b/shared.mak
@@ -56,6 +56,7 @@ ifndef V
QUIET_MKDIR_P_PARENT = @echo ' ' MKDIR -p $(@D);
## Used in "Makefile"
+ QUIET_CARGO = @echo ' ' CARGO $@;
QUIET_CC = @echo ' ' CC $@;
QUIET_AR = @echo ' ' AR $@;
QUIET_LINK = @echo ' ' LINK $@;
diff --git a/src/cargo-meson.sh b/src/cargo-meson.sh
new file mode 100755
index 0000000000..99400986d9
--- /dev/null
+++ b/src/cargo-meson.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+if test "$#" -lt 2
+then
+ exit 1
+fi
+
+SOURCE_DIR="$1"
+BUILD_DIR="$2"
+BUILD_TYPE=debug
+
+shift 2
+
+for arg
+do
+ case "$arg" in
+ --release)
+ BUILD_TYPE=release;;
+ esac
+done
+
+cargo build --lib --quiet --manifest-path="$SOURCE_DIR/Cargo.toml" --target-dir="$BUILD_DIR" "$@"
+RET=$?
+if test $RET -ne 0
+then
+ exit $RET
+fi
+
+if ! cmp "$BUILD_DIR/$BUILD_TYPE/libgitcore.a" "$BUILD_DIR/libgitcore.a" >/dev/null 2>&1
+then
+ cp "$BUILD_DIR/$BUILD_TYPE/libgitcore.a" "$BUILD_DIR/libgitcore.a"
+fi
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000000..9da70d8b57
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1 @@
+pub mod varint;
diff --git a/src/meson.build b/src/meson.build
new file mode 100644
index 0000000000..25b9ad5a14
--- /dev/null
+++ b/src/meson.build
@@ -0,0 +1,41 @@
+libgit_rs_sources = [
+ 'lib.rs',
+ 'varint.rs',
+]
+
+# Unfortunately we must use a wrapper command to move the output file into the
+# current build directory. This can fixed once `cargo build --artifact-dir`
+# stabilizes. See https://github.com/rust-lang/cargo/issues/6790 for that
+# effort.
+cargo_command = [
+ shell,
+ meson.current_source_dir() / 'cargo-meson.sh',
+ meson.project_source_root(),
+ meson.current_build_dir(),
+]
+if get_option('buildtype') == 'release'
+ cargo_command += '--release'
+endif
+
+libgit_rs = custom_target('git_rs',
+ input: libgit_rs_sources + [
+ meson.project_source_root() / 'Cargo.toml',
+ ],
+ output: 'libgitcore.a',
+ command: cargo_command,
+)
+libgit_dependencies += declare_dependency(link_with: libgit_rs)
+
+if get_option('tests')
+ test('rust', cargo,
+ args: [
+ 'test',
+ '--manifest-path',
+ meson.project_source_root() / 'Cargo.toml',
+ '--target-dir',
+ meson.current_build_dir() / 'target',
+ ],
+ timeout: 0,
+ protocol: 'rust',
+ )
+endif
diff --git a/src/varint.rs b/src/varint.rs
new file mode 100644
index 0000000000..6e610bdd8e
--- /dev/null
+++ b/src/varint.rs
@@ -0,0 +1,92 @@
+#[no_mangle]
+pub unsafe extern "C" fn decode_varint(bufp: *mut *const u8) -> u64 {
+ let mut buf = *bufp;
+ let mut c = *buf;
+ let mut val = u64::from(c & 127);
+
+ buf = buf.add(1);
+
+ while (c & 128) != 0 {
+ val = val.wrapping_add(1);
+ if val == 0 || val.leading_zeros() < 7 {
+ return 0; // overflow
+ }
+
+ c = *buf;
+ buf = buf.add(1);
+
+ val = (val << 7) + u64::from(c & 127);
+ }
+
+ *bufp = buf;
+ val
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn encode_varint(value: u64, buf: *mut u8) -> u8 {
+ let mut varint: [u8; 16] = [0; 16];
+ let mut pos = varint.len() - 1;
+
+ varint[pos] = (value & 127) as u8;
+
+ let mut value = value >> 7;
+ while value != 0 {
+ pos -= 1;
+ value -= 1;
+ varint[pos] = 128 | (value & 127) as u8;
+ value >>= 7;
+ }
+
+ if !buf.is_null() {
+ std::ptr::copy_nonoverlapping(varint.as_ptr().add(pos), buf, varint.len() - pos);
+ }
+
+ (varint.len() - pos) as u8
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_decode_varint() {
+ unsafe {
+ assert_eq!(decode_varint(&mut [0x00].as_slice().as_ptr()), 0);
+ assert_eq!(decode_varint(&mut [0x01].as_slice().as_ptr()), 1);
+ assert_eq!(decode_varint(&mut [0x7f].as_slice().as_ptr()), 127);
+ assert_eq!(decode_varint(&mut [0x80, 0x00].as_slice().as_ptr()), 128);
+ assert_eq!(decode_varint(&mut [0x80, 0x01].as_slice().as_ptr()), 129);
+ assert_eq!(decode_varint(&mut [0x80, 0x7f].as_slice().as_ptr()), 255);
+
+ // Overflows are expected to return 0.
+ assert_eq!(decode_varint(&mut [0x88; 16].as_slice().as_ptr()), 0);
+ }
+ }
+
+ #[test]
+ fn test_encode_varint() {
+ unsafe {
+ let mut varint: [u8; 16] = [0; 16];
+
+ assert_eq!(encode_varint(0, std::ptr::null_mut()), 1);
+
+ assert_eq!(encode_varint(0, varint.as_mut_slice().as_mut_ptr()), 1);
+ assert_eq!(varint, [0; 16]);
+
+ assert_eq!(encode_varint(10, varint.as_mut_slice().as_mut_ptr()), 1);
+ assert_eq!(varint, [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+
+ assert_eq!(encode_varint(127, varint.as_mut_slice().as_mut_ptr()), 1);
+ assert_eq!(varint, [127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+
+ assert_eq!(encode_varint(128, varint.as_mut_slice().as_mut_ptr()), 2);
+ assert_eq!(varint, [128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+
+ assert_eq!(encode_varint(129, varint.as_mut_slice().as_mut_ptr()), 2);
+ assert_eq!(varint, [128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+
+ assert_eq!(encode_varint(255, varint.as_mut_slice().as_mut_ptr()), 2);
+ assert_eq!(varint, [128, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+ }
+ }
+}
diff --git a/string-list.c b/string-list.c
index 343cf1ca90..08dc00984c 100644
--- a/string-list.c
+++ b/string-list.c
@@ -16,7 +16,7 @@ void string_list_init_dup(struct string_list *list)
/* if there is no exact match, point to the index where the entry could be
* inserted */
static size_t get_entry_index(const struct string_list *list, const char *string,
- int *exact_match)
+ bool *exact_match)
{
size_t left = 0, right = list->nr;
compare_strings_fn cmp = list->cmp ? list->cmp : strcmp;
@@ -29,18 +29,20 @@ static size_t get_entry_index(const struct string_list *list, const char *string
else if (compare > 0)
left = middle + 1;
else {
- *exact_match = 1;
+ if (exact_match)
+ *exact_match = true;
return middle;
}
}
- *exact_match = 0;
+ if (exact_match)
+ *exact_match = false;
return right;
}
static size_t add_entry(struct string_list *list, const char *string)
{
- int exact_match = 0;
+ bool exact_match;
size_t index = get_entry_index(list, string, &exact_match);
if (exact_match)
@@ -68,7 +70,7 @@ struct string_list_item *string_list_insert(struct string_list *list, const char
void string_list_remove(struct string_list *list, const char *string,
int free_util)
{
- int exact_match;
+ bool exact_match;
int i = get_entry_index(list, string, &exact_match);
if (exact_match) {
@@ -82,26 +84,23 @@ void string_list_remove(struct string_list *list, const char *string,
}
}
-int string_list_has_string(const struct string_list *list, const char *string)
+bool string_list_has_string(const struct string_list *list, const char *string)
{
- int exact_match;
+ bool exact_match;
get_entry_index(list, string, &exact_match);
return exact_match;
}
-int string_list_find_insert_index(const struct string_list *list, const char *string,
- int negative_existing_index)
+size_t string_list_find_insert_index(const struct string_list *list, const char *string,
+ bool *exact_match)
{
- int exact_match;
- int index = get_entry_index(list, string, &exact_match);
- if (exact_match)
- index = -1 - (negative_existing_index ? index : 0);
- return index;
+ return get_entry_index(list, string, exact_match);
}
struct string_list_item *string_list_lookup(struct string_list *list, const char *string)
{
- int exact_match, i = get_entry_index(list, string, &exact_match);
+ bool exact_match;
+ size_t i = get_entry_index(list, string, &exact_match);
if (!exact_match)
return NULL;
return list->items + i;
diff --git a/string-list.h b/string-list.h
index 2b438c7733..fa6ba07853 100644
--- a/string-list.h
+++ b/string-list.h
@@ -172,9 +172,15 @@ void string_list_remove_empty_items(struct string_list *list, int free_util);
/* Use these functions only on sorted lists: */
/** Determine if the string_list has a given string or not. */
-int string_list_has_string(const struct string_list *list, const char *string);
-int string_list_find_insert_index(const struct string_list *list, const char *string,
- int negative_existing_index);
+bool string_list_has_string(const struct string_list *list, const char *string);
+
+/**
+ * Find the index at which a new element should be inserted into the
+ * string_list to maintain sorted order. If exact_match is not NULL,
+ * it will be set to true if the string already exists in the list.
+ */
+size_t string_list_find_insert_index(const struct string_list *list, const char *string,
+ bool *exact_match);
/**
* Insert a new element to the string_list. The returned pointer can
diff --git a/t/helper/test-find-pack.c b/t/helper/test-find-pack.c
index 611a13a326..e001dc3066 100644
--- a/t/helper/test-find-pack.c
+++ b/t/helper/test-find-pack.c
@@ -39,7 +39,7 @@ int cmd__find_pack(int argc, const char **argv)
if (repo_get_oid(the_repository, argv[0], &oid))
die("cannot parse %s as an object name", argv[0]);
- for (p = get_all_packs(the_repository); p; p = p->next)
+ for (p = packfile_store_get_all_packs(the_repository->objects->packfiles); p; p = p->next)
if (find_pack_entry_one(&oid, p)) {
printf("%s\n", p->pack_name);
actual_count++;
diff --git a/t/helper/test-pack-deltas.c b/t/helper/test-pack-deltas.c
index 4caa024b1e..4981401eaa 100644
--- a/t/helper/test-pack-deltas.c
+++ b/t/helper/test-pack-deltas.c
@@ -51,16 +51,14 @@ static void write_ref_delta(struct hashfile *f,
unsigned long size, base_size, delta_size, compressed_size, hdrlen;
enum object_type type;
void *base_buf, *delta_buf;
- void *buf = repo_read_object_file(the_repository,
- oid, &type,
- &size);
+ void *buf = odb_read_object(the_repository->objects,
+ oid, &type, &size);
if (!buf)
die("unable to read %s", oid_to_hex(oid));
- base_buf = repo_read_object_file(the_repository,
- base, &type,
- &base_size);
+ base_buf = odb_read_object(the_repository->objects,
+ base, &type, &base_size);
if (!base_buf)
die("unable to read %s", oid_to_hex(base));
diff --git a/t/helper/test-pack-mtimes.c b/t/helper/test-pack-mtimes.c
index d51aaa3dc4..7c428c1601 100644
--- a/t/helper/test-pack-mtimes.c
+++ b/t/helper/test-pack-mtimes.c
@@ -37,7 +37,7 @@ int cmd__pack_mtimes(int argc, const char **argv)
if (argc != 2)
usage(pack_mtimes_usage);
- for (p = get_all_packs(the_repository); p; p = p->next) {
+ for (p = packfile_store_get_all_packs(the_repository->objects->packfiles); p; p = p->next) {
strbuf_addstr(&buf, basename(p->pack_name));
strbuf_strip_suffix(&buf, ".pack");
strbuf_addstr(&buf, ".mtimes");
diff --git a/t/meson.build b/t/meson.build
index 7974795fe4..401b24e50e 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -146,6 +146,7 @@ integration_tests = [
't0611-reftable-httpd.sh',
't0612-reftable-jgit-compatibility.sh',
't0613-reftable-write-options.sh',
+ 't0614-reftable-fsck.sh',
't1000-read-tree-m-3way.sh',
't1001-read-tree-m-2way.sh',
't1002-read-tree-m-u-2way.sh',
@@ -213,6 +214,7 @@ integration_tests = [
't1460-refs-migrate.sh',
't1461-refs-list.sh',
't1462-refs-exists.sh',
+ 't1463-refs-optimize.sh',
't1500-rev-parse.sh',
't1501-work-tree.sh',
't1502-rev-parse-parseopt.sh',
@@ -1034,6 +1036,7 @@ integration_tests = [
't9302-fast-import-unpack-limit.sh',
't9303-fast-import-compression.sh',
't9304-fast-import-marks.sh',
+ 't9305-fast-import-signatures.sh',
't9350-fast-export.sh',
't9351-fast-export-anonymize.sh',
't9400-git-cvsserver-server.sh',
diff --git a/t/pack-refs-tests.sh b/t/pack-refs-tests.sh
new file mode 100644
index 0000000000..3dbcc01718
--- /dev/null
+++ b/t/pack-refs-tests.sh
@@ -0,0 +1,431 @@
+pack_refs=${pack_refs:-pack-refs}
+
+test_expect_success 'enable reflogs' '
+ git config core.logallrefupdates true
+'
+
+test_expect_success 'prepare a trivial repository' '
+ echo Hello > A &&
+ git update-index --add A &&
+ git commit -m "Initial commit." &&
+ HEAD=$(git rev-parse --verify HEAD)
+'
+
+test_expect_success '${pack_refs} --prune --all' '
+ test_path_is_missing .git/packed-refs &&
+ git ${pack_refs} --no-prune --all &&
+ test_path_is_file .git/packed-refs &&
+ N=$(find .git/refs -type f | wc -l) &&
+ test "$N" != 0 &&
+
+ git ${pack_refs} --prune --all &&
+ test_path_is_file .git/packed-refs &&
+ N=$(find .git/refs -type f) &&
+ test -z "$N"
+'
+
+SHA1=
+
+test_expect_success 'see if git show-ref works as expected' '
+ git branch a &&
+ SHA1=$(cat .git/refs/heads/a) &&
+ echo "$SHA1 refs/heads/a" >expect &&
+ git show-ref a >result &&
+ test_cmp expect result
+'
+
+test_expect_success 'see if a branch still exists when packed' '
+ git branch b &&
+ git ${pack_refs} --all &&
+ rm -f .git/refs/heads/b &&
+ echo "$SHA1 refs/heads/b" >expect &&
+ git show-ref b >result &&
+ test_cmp expect result
+'
+
+test_expect_success 'git branch c/d should barf if branch c exists' '
+ git branch c &&
+ git ${pack_refs} --all &&
+ rm -f .git/refs/heads/c &&
+ test_must_fail git branch c/d
+'
+
+test_expect_success 'see if a branch still exists after git ${pack_refs} --prune' '
+ git branch e &&
+ git ${pack_refs} --all --prune &&
+ echo "$SHA1 refs/heads/e" >expect &&
+ git show-ref e >result &&
+ test_cmp expect result
+'
+
+test_expect_success 'see if git ${pack_refs} --prune remove ref files' '
+ git branch f &&
+ git ${pack_refs} --all --prune &&
+ ! test -f .git/refs/heads/f
+'
+
+test_expect_success 'see if git ${pack_refs} --prune removes empty dirs' '
+ git branch r/s/t &&
+ git ${pack_refs} --all --prune &&
+ ! test -e .git/refs/heads/r
+'
+
+test_expect_success 'git branch g should work when git branch g/h has been deleted' '
+ git branch g/h &&
+ git ${pack_refs} --all --prune &&
+ git branch -d g/h &&
+ git branch g &&
+ git ${pack_refs} --all &&
+ git branch -d g
+'
+
+test_expect_success 'git branch i/j/k should barf if branch i exists' '
+ git branch i &&
+ git ${pack_refs} --all --prune &&
+ test_must_fail git branch i/j/k
+'
+
+test_expect_success 'test git branch k after branch k/l/m and k/lm have been deleted' '
+ git branch k/l &&
+ git branch k/lm &&
+ git branch -d k/l &&
+ git branch k/l/m &&
+ git branch -d k/l/m &&
+ git branch -d k/lm &&
+ git branch k
+'
+
+test_expect_success 'test git branch n after some branch deletion and pruning' '
+ git branch n/o &&
+ git branch n/op &&
+ git branch -d n/o &&
+ git branch n/o/p &&
+ git branch -d n/op &&
+ git ${pack_refs} --all --prune &&
+ git branch -d n/o/p &&
+ git branch n
+'
+
+test_expect_success 'test excluded refs are not packed' '
+ git branch dont_pack1 &&
+ git branch dont_pack2 &&
+ git branch pack_this &&
+ git ${pack_refs} --all --exclude "refs/heads/dont_pack*" &&
+ test -f .git/refs/heads/dont_pack1 &&
+ test -f .git/refs/heads/dont_pack2 &&
+ ! test -f .git/refs/heads/pack_this'
+
+test_expect_success 'test --no-exclude refs clears excluded refs' '
+ git branch dont_pack3 &&
+ git branch dont_pack4 &&
+ git ${pack_refs} --all --exclude "refs/heads/dont_pack*" --no-exclude &&
+ ! test -f .git/refs/heads/dont_pack3 &&
+ ! test -f .git/refs/heads/dont_pack4'
+
+test_expect_success 'test only included refs are packed' '
+ git branch pack_this1 &&
+ git branch pack_this2 &&
+ git tag dont_pack5 &&
+ git ${pack_refs} --include "refs/heads/pack_this*" &&
+ test -f .git/refs/tags/dont_pack5 &&
+ ! test -f .git/refs/heads/pack_this1 &&
+ ! test -f .git/refs/heads/pack_this2'
+
+test_expect_success 'test --no-include refs clears included refs' '
+ git branch pack1 &&
+ git branch pack2 &&
+ git ${pack_refs} --include "refs/heads/pack*" --no-include &&
+ test -f .git/refs/heads/pack1 &&
+ test -f .git/refs/heads/pack2'
+
+test_expect_success 'test --exclude takes precedence over --include' '
+ git branch dont_pack5 &&
+ git ${pack_refs} --include "refs/heads/pack*" --exclude "refs/heads/pack*" &&
+ test -f .git/refs/heads/dont_pack5'
+
+test_expect_success 'see if up-to-date packed refs are preserved' '
+ git branch q &&
+ git ${pack_refs} --all --prune &&
+ git update-ref refs/heads/q refs/heads/q &&
+ ! test -f .git/refs/heads/q
+'
+
+test_expect_success 'pack, prune and repack' '
+ git tag foo &&
+ git ${pack_refs} --all --prune &&
+ git show-ref >all-of-them &&
+ git ${pack_refs} &&
+ git show-ref >again &&
+ test_cmp all-of-them again
+'
+
+test_expect_success 'explicit ${pack_refs} with dangling packed reference' '
+ git commit --allow-empty -m "soon to be garbage-collected" &&
+ git ${pack_refs} --all &&
+ git reset --hard HEAD^ &&
+ git reflog expire --expire=all --all &&
+ git prune --expire=all &&
+ git ${pack_refs} --all 2>result &&
+ test_must_be_empty result
+'
+
+test_expect_success 'delete ref with dangling packed version' '
+ git checkout -b lamb &&
+ git commit --allow-empty -m "future garbage" &&
+ git ${pack_refs} --all &&
+ git reset --hard HEAD^ &&
+ git checkout main &&
+ git reflog expire --expire=all --all &&
+ git prune --expire=all &&
+ git branch -d lamb 2>result &&
+ test_must_be_empty result
+'
+
+test_expect_success 'delete ref while another dangling packed ref' '
+ git branch lamb &&
+ git commit --allow-empty -m "future garbage" &&
+ git ${pack_refs} --all &&
+ git reset --hard HEAD^ &&
+ git reflog expire --expire=all --all &&
+ git prune --expire=all &&
+ git branch -d lamb 2>result &&
+ test_must_be_empty result
+'
+
+test_expect_success 'pack ref directly below refs/' '
+ git update-ref refs/top HEAD &&
+ git ${pack_refs} --all --prune &&
+ grep refs/top .git/packed-refs &&
+ test_path_is_missing .git/refs/top
+'
+
+test_expect_success 'do not pack ref in refs/bisect' '
+ git update-ref refs/bisect/local HEAD &&
+ git ${pack_refs} --all --prune &&
+ ! grep refs/bisect/local .git/packed-refs >/dev/null &&
+ test_path_is_file .git/refs/bisect/local
+'
+
+test_expect_success 'disable reflogs' '
+ git config core.logallrefupdates false &&
+ rm -rf .git/logs
+'
+
+test_expect_success 'create packed foo/bar/baz branch' '
+ git branch foo/bar/baz &&
+ git ${pack_refs} --all --prune &&
+ test_path_is_missing .git/refs/heads/foo/bar/baz &&
+ test_must_fail git reflog exists refs/heads/foo/bar/baz
+'
+
+test_expect_success 'notice d/f conflict with existing directory' '
+ test_must_fail git branch foo &&
+ test_must_fail git branch foo/bar
+'
+
+test_expect_success 'existing directory reports concrete ref' '
+ test_must_fail git branch foo 2>stderr &&
+ test_grep refs/heads/foo/bar/baz stderr
+'
+
+test_expect_success 'notice d/f conflict with existing ref' '
+ test_must_fail git branch foo/bar/baz/extra &&
+ test_must_fail git branch foo/bar/baz/lots/of/extra/components
+'
+
+test_expect_success 'reject packed-refs with unterminated line' '
+ cp .git/packed-refs .git/packed-refs.bak &&
+ test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
+ printf "%s" "$HEAD refs/zzzzz" >>.git/packed-refs &&
+ echo "fatal: unterminated line in .git/packed-refs: $HEAD refs/zzzzz" >expected_err &&
+ test_must_fail git for-each-ref >out 2>err &&
+ test_cmp expected_err err
+'
+
+test_expect_success 'reject packed-refs containing junk' '
+ cp .git/packed-refs .git/packed-refs.bak &&
+ test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
+ printf "%s\n" "bogus content" >>.git/packed-refs &&
+ echo "fatal: unexpected line in .git/packed-refs: bogus content" >expected_err &&
+ test_must_fail git for-each-ref >out 2>err &&
+ test_cmp expected_err err
+'
+
+test_expect_success 'reject packed-refs with a short SHA-1' '
+ cp .git/packed-refs .git/packed-refs.bak &&
+ test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
+ printf "%.7s %s\n" $HEAD refs/zzzzz >>.git/packed-refs &&
+ printf "fatal: unexpected line in .git/packed-refs: %.7s %s\n" $HEAD refs/zzzzz >expected_err &&
+ test_must_fail git for-each-ref >out 2>err &&
+ test_cmp expected_err err
+'
+
+test_expect_success 'timeout if packed-refs.lock exists' '
+ LOCK=.git/packed-refs.lock &&
+ >"$LOCK" &&
+ test_when_finished "rm -f $LOCK" &&
+ test_must_fail git ${pack_refs} --all --prune
+'
+
+test_expect_success 'retry acquiring packed-refs.lock' '
+ LOCK=.git/packed-refs.lock &&
+ >"$LOCK" &&
+ test_when_finished "wait && rm -f $LOCK" &&
+ {
+ ( sleep 1 && rm -f $LOCK ) &
+ } &&
+ git -c core.packedrefstimeout=3000 ${pack_refs} --all --prune
+'
+
+test_expect_success SYMLINKS 'pack symlinked packed-refs' '
+ # First make sure that symlinking works when reading:
+ git update-ref refs/heads/lossy refs/heads/main &&
+ git for-each-ref >all-refs-before &&
+ mv .git/packed-refs .git/my-deviant-packed-refs &&
+ ln -s my-deviant-packed-refs .git/packed-refs &&
+ git for-each-ref >all-refs-linked &&
+ test_cmp all-refs-before all-refs-linked &&
+ git ${pack_refs} --all --prune &&
+ git for-each-ref >all-refs-packed &&
+ test_cmp all-refs-before all-refs-packed &&
+ test -h .git/packed-refs &&
+ test "$(test_readlink .git/packed-refs)" = "my-deviant-packed-refs"
+'
+
+# The 'packed-refs' file is stored directly in .git/. This means it is global
+# to the repository, and can only contain refs that are shared across all
+# worktrees.
+test_expect_success 'refs/worktree must not be packed' '
+ test_commit initial &&
+ test_commit wt1 &&
+ test_commit wt2 &&
+ git worktree add wt1 wt1 &&
+ git worktree add wt2 wt2 &&
+ git checkout initial &&
+ git update-ref refs/worktree/foo HEAD &&
+ git -C wt1 update-ref refs/worktree/foo HEAD &&
+ git -C wt2 update-ref refs/worktree/foo HEAD &&
+ git ${pack_refs} --all &&
+ test_path_is_missing .git/refs/tags/wt1 &&
+ test_path_is_file .git/refs/worktree/foo &&
+ test_path_is_file .git/worktrees/wt1/refs/worktree/foo &&
+ test_path_is_file .git/worktrees/wt2/refs/worktree/foo
+'
+
+# we do not want to count on running ${pack_refs} to
+# actually pack it, as it is perfectly reasonable to
+# skip processing a broken ref
+test_expect_success 'create packed-refs file with broken ref' '
+ test_tick && git commit --allow-empty -m one &&
+ recoverable=$(git rev-parse HEAD) &&
+ test_tick && git commit --allow-empty -m two &&
+ missing=$(git rev-parse HEAD) &&
+ rm -f .git/refs/heads/main &&
+ cat >.git/packed-refs <<-EOF &&
+ $missing refs/heads/main
+ $recoverable refs/heads/other
+ EOF
+ echo $missing >expect &&
+ git rev-parse refs/heads/main >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '${pack_refs} does not silently delete broken packed ref' '
+ git ${pack_refs} --all --prune &&
+ git rev-parse refs/heads/main >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '${pack_refs} does not drop broken refs during deletion' '
+ git update-ref -d refs/heads/other &&
+ git rev-parse refs/heads/main >actual &&
+ test_cmp expect actual
+'
+
+for command in "git ${pack_refs} --all --auto" "git maintenance run --task=${pack_refs} --auto"
+do
+ test_expect_success "$command does not repack below 16 refs without packed-refs" '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ git config set maintenance.auto false &&
+ git commit --allow-empty --message "initial" &&
+
+ # Create 14 additional references, which brings us to
+ # 15 together with the default branch.
+ printf "create refs/heads/loose-%d HEAD\n" $(test_seq 14) >stdin &&
+ git update-ref --stdin <stdin &&
+ test_path_is_missing .git/packed-refs &&
+ git ${pack_refs} --auto --all &&
+ test_path_is_missing .git/packed-refs &&
+
+ # Create the 16th reference, which should cause us to repack.
+ git update-ref refs/heads/loose-15 HEAD &&
+ git ${pack_refs} --auto --all &&
+ test_path_is_file .git/packed-refs
+ )
+ '
+
+ test_expect_success "$command does not repack below 16 refs with small packed-refs" '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ git config set maintenance.auto false &&
+ git commit --allow-empty --message "initial" &&
+
+ git ${pack_refs} --all &&
+ test_line_count = 2 .git/packed-refs &&
+
+ # Create 15 loose references.
+ printf "create refs/heads/loose-%d HEAD\n" $(test_seq 15) >stdin &&
+ git update-ref --stdin <stdin &&
+ git ${pack_refs} --auto --all &&
+ test_line_count = 2 .git/packed-refs &&
+
+ # Create the 16th loose reference, which should cause us to repack.
+ git update-ref refs/heads/loose-17 HEAD &&
+ git ${pack_refs} --auto --all &&
+ test_line_count = 18 .git/packed-refs
+ )
+ '
+
+ test_expect_success "$command scales with size of packed-refs" '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ git config set maintenance.auto false &&
+ git commit --allow-empty --message "initial" &&
+
+ # Create 99 packed refs. This should cause the heuristic
+ # to require more than the minimum amount of loose refs.
+ test_seq 99 |
+ while read i
+ do
+ printf "create refs/heads/packed-%d HEAD\n" $i || return 1
+ done >stdin &&
+ git update-ref --stdin <stdin &&
+ git ${pack_refs} --all &&
+ test_line_count = 101 .git/packed-refs &&
+
+ # Create 24 loose refs, which should not yet cause us to repack.
+ printf "create refs/heads/loose-%d HEAD\n" $(test_seq 24) >stdin &&
+ git update-ref --stdin <stdin &&
+ git ${pack_refs} --auto --all &&
+ test_line_count = 101 .git/packed-refs &&
+
+ # Create another handful of refs to cross the border.
+ # Note that we explicitly do not check for strict
+ # boundaries here, as this also depends on the size of
+ # the object hash.
+ printf "create refs/heads/addn-%d HEAD\n" $(test_seq 10) >stdin &&
+ git update-ref --stdin <stdin &&
+ git ${pack_refs} --auto --all &&
+ test_line_count = 135 .git/packed-refs
+ )
+ '
+done
+
+test_done
diff --git a/t/t0014-alias.sh b/t/t0014-alias.sh
index 854d59ec58..07a53e7366 100755
--- a/t/t0014-alias.sh
+++ b/t/t0014-alias.sh
@@ -27,6 +27,20 @@ test_expect_success 'looping aliases - internal execution' '
test_grep "^fatal: alias loop detected: expansion of" output
'
+test_expect_success 'looping aliases - deprecated builtins' '
+ test_config alias.whatchanged pack-redundant &&
+ test_config alias.pack-redundant whatchanged &&
+ cat >expect <<-EOF &&
+ ${SQ}whatchanged${SQ} is aliased to ${SQ}pack-redundant${SQ}
+ ${SQ}pack-redundant${SQ} is aliased to ${SQ}whatchanged${SQ}
+ fatal: alias loop detected: expansion of ${SQ}whatchanged${SQ} does not terminate:
+ whatchanged <==
+ pack-redundant ==>
+ EOF
+ test_must_fail git whatchanged -h 2>actual &&
+ test_cmp expect actual
+'
+
# This test is disabled until external loops are fixed, because would block
# the test suite for a full minute.
#
@@ -55,4 +69,47 @@ test_expect_success 'tracing a shell alias with arguments shows trace of prepare
test_cmp expect actual
'
+can_alias_deprecated_builtin () {
+ cmd="$1" &&
+ # some git(1) commands will fail for `-h` (the case for
+ # git-status as of 2025-09-07)
+ test_might_fail git status -h >expect &&
+ test_file_not_empty expect &&
+ test_might_fail git -c alias."$cmd"=status "$cmd" -h >actual &&
+ test_cmp expect actual
+}
+
+test_expect_success 'can alias-shadow deprecated builtins' '
+ for cmd in $(git --list-cmds=deprecated)
+ do
+ can_alias_deprecated_builtin "$cmd" || return 1
+ done
+'
+
+test_expect_success 'can alias-shadow via two deprecated builtins' '
+ # some git(1) commands will fail... (see above)
+ test_might_fail git status -h >expect &&
+ test_file_not_empty expect &&
+ test_might_fail git -c alias.whatchanged=pack-redundant \
+ -c alias.pack-redundant=status whatchanged -h >actual &&
+ test_cmp expect actual
+'
+
+cannot_alias_regular_builtin () {
+ cmd="$1" &&
+ # some git(1) commands will fail... (see above)
+ test_might_fail git "$cmd" -h >expect &&
+ test_file_not_empty expect &&
+ test_might_fail git -c alias."$cmd"=status "$cmd" -h >actual &&
+ test_cmp expect actual
+}
+
+test_expect_success 'cannot alias-shadow a sample of regular builtins' '
+ for cmd in grep check-ref-format interpret-trailers \
+ checkout-index fast-import diagnose rev-list prune
+ do
+ cannot_alias_regular_builtin "$cmd" || return 1
+ done
+'
+
test_done
diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
index cb3a85c7ff..07aa834d33 100755
--- a/t/t0300-credentials.sh
+++ b/t/t0300-credentials.sh
@@ -991,18 +991,24 @@ test_expect_success 'url parser not confused by encoded markers' '
test_expect_success 'credential config with partial URLs' '
echo "echo password=yep" | write_script git-credential-yep &&
- test_write_lines url=https://user@example.com/repo.git >stdin &&
+ test_write_lines url=https://user@example.com/org/repo.git >stdin &&
for partial in \
example.com \
+ example.com/org/repo.git \
user@example.com \
+ user@example.com/org/repo.git \
https:// \
https://example.com \
https://example.com/ \
+ https://example.com/org \
+ https://example.com/org/ \
+ https://example.com/org/repo.git \
https://user@example.com \
https://user@example.com/ \
- https://example.com/repo.git \
- https://user@example.com/repo.git \
- /repo.git
+ https://user@example.com/org \
+ https://user@example.com/org/ \
+ https://user@example.com/org/repo.git \
+ /org/repo.git
do
git -c credential.$partial.helper=yep \
credential fill <stdin >stdout &&
@@ -1012,7 +1018,12 @@ test_expect_success 'credential config with partial URLs' '
for partial in \
dont.use.this \
+ example.com/o \
+ user@example.com/o \
http:// \
+ https://example.com/o \
+ https://user@example.com/o \
+ /o \
/repo
do
git -c credential.$partial.helper=yep \
diff --git a/t/t0601-reffiles-pack-refs.sh b/t/t0601-reffiles-pack-refs.sh
index aa7f6ecd81..12cf5d1dcb 100755
--- a/t/t0601-reffiles-pack-refs.sh
+++ b/t/t0601-reffiles-pack-refs.sh
@@ -17,432 +17,4 @@ export GIT_TEST_DEFAULT_REF_FORMAT
. ./test-lib.sh
-test_expect_success 'enable reflogs' '
- git config core.logallrefupdates true
-'
-
-test_expect_success 'prepare a trivial repository' '
- echo Hello > A &&
- git update-index --add A &&
- git commit -m "Initial commit." &&
- HEAD=$(git rev-parse --verify HEAD)
-'
-
-test_expect_success 'pack-refs --prune --all' '
- test_path_is_missing .git/packed-refs &&
- git pack-refs --no-prune --all &&
- test_path_is_file .git/packed-refs &&
- N=$(find .git/refs -type f | wc -l) &&
- test "$N" != 0 &&
-
- git pack-refs --prune --all &&
- test_path_is_file .git/packed-refs &&
- N=$(find .git/refs -type f) &&
- test -z "$N"
-'
-
-SHA1=
-
-test_expect_success 'see if git show-ref works as expected' '
- git branch a &&
- SHA1=$(cat .git/refs/heads/a) &&
- echo "$SHA1 refs/heads/a" >expect &&
- git show-ref a >result &&
- test_cmp expect result
-'
-
-test_expect_success 'see if a branch still exists when packed' '
- git branch b &&
- git pack-refs --all &&
- rm -f .git/refs/heads/b &&
- echo "$SHA1 refs/heads/b" >expect &&
- git show-ref b >result &&
- test_cmp expect result
-'
-
-test_expect_success 'git branch c/d should barf if branch c exists' '
- git branch c &&
- git pack-refs --all &&
- rm -f .git/refs/heads/c &&
- test_must_fail git branch c/d
-'
-
-test_expect_success 'see if a branch still exists after git pack-refs --prune' '
- git branch e &&
- git pack-refs --all --prune &&
- echo "$SHA1 refs/heads/e" >expect &&
- git show-ref e >result &&
- test_cmp expect result
-'
-
-test_expect_success 'see if git pack-refs --prune remove ref files' '
- git branch f &&
- git pack-refs --all --prune &&
- ! test -f .git/refs/heads/f
-'
-
-test_expect_success 'see if git pack-refs --prune removes empty dirs' '
- git branch r/s/t &&
- git pack-refs --all --prune &&
- ! test -e .git/refs/heads/r
-'
-
-test_expect_success 'git branch g should work when git branch g/h has been deleted' '
- git branch g/h &&
- git pack-refs --all --prune &&
- git branch -d g/h &&
- git branch g &&
- git pack-refs --all &&
- git branch -d g
-'
-
-test_expect_success 'git branch i/j/k should barf if branch i exists' '
- git branch i &&
- git pack-refs --all --prune &&
- test_must_fail git branch i/j/k
-'
-
-test_expect_success 'test git branch k after branch k/l/m and k/lm have been deleted' '
- git branch k/l &&
- git branch k/lm &&
- git branch -d k/l &&
- git branch k/l/m &&
- git branch -d k/l/m &&
- git branch -d k/lm &&
- git branch k
-'
-
-test_expect_success 'test git branch n after some branch deletion and pruning' '
- git branch n/o &&
- git branch n/op &&
- git branch -d n/o &&
- git branch n/o/p &&
- git branch -d n/op &&
- git pack-refs --all --prune &&
- git branch -d n/o/p &&
- git branch n
-'
-
-test_expect_success 'test excluded refs are not packed' '
- git branch dont_pack1 &&
- git branch dont_pack2 &&
- git branch pack_this &&
- git pack-refs --all --exclude "refs/heads/dont_pack*" &&
- test -f .git/refs/heads/dont_pack1 &&
- test -f .git/refs/heads/dont_pack2 &&
- ! test -f .git/refs/heads/pack_this'
-
-test_expect_success 'test --no-exclude refs clears excluded refs' '
- git branch dont_pack3 &&
- git branch dont_pack4 &&
- git pack-refs --all --exclude "refs/heads/dont_pack*" --no-exclude &&
- ! test -f .git/refs/heads/dont_pack3 &&
- ! test -f .git/refs/heads/dont_pack4'
-
-test_expect_success 'test only included refs are packed' '
- git branch pack_this1 &&
- git branch pack_this2 &&
- git tag dont_pack5 &&
- git pack-refs --include "refs/heads/pack_this*" &&
- test -f .git/refs/tags/dont_pack5 &&
- ! test -f .git/refs/heads/pack_this1 &&
- ! test -f .git/refs/heads/pack_this2'
-
-test_expect_success 'test --no-include refs clears included refs' '
- git branch pack1 &&
- git branch pack2 &&
- git pack-refs --include "refs/heads/pack*" --no-include &&
- test -f .git/refs/heads/pack1 &&
- test -f .git/refs/heads/pack2'
-
-test_expect_success 'test --exclude takes precedence over --include' '
- git branch dont_pack5 &&
- git pack-refs --include "refs/heads/pack*" --exclude "refs/heads/pack*" &&
- test -f .git/refs/heads/dont_pack5'
-
-test_expect_success 'see if up-to-date packed refs are preserved' '
- git branch q &&
- git pack-refs --all --prune &&
- git update-ref refs/heads/q refs/heads/q &&
- ! test -f .git/refs/heads/q
-'
-
-test_expect_success 'pack, prune and repack' '
- git tag foo &&
- git pack-refs --all --prune &&
- git show-ref >all-of-them &&
- git pack-refs &&
- git show-ref >again &&
- test_cmp all-of-them again
-'
-
-test_expect_success 'explicit pack-refs with dangling packed reference' '
- git commit --allow-empty -m "soon to be garbage-collected" &&
- git pack-refs --all &&
- git reset --hard HEAD^ &&
- git reflog expire --expire=all --all &&
- git prune --expire=all &&
- git pack-refs --all 2>result &&
- test_must_be_empty result
-'
-
-test_expect_success 'delete ref with dangling packed version' '
- git checkout -b lamb &&
- git commit --allow-empty -m "future garbage" &&
- git pack-refs --all &&
- git reset --hard HEAD^ &&
- git checkout main &&
- git reflog expire --expire=all --all &&
- git prune --expire=all &&
- git branch -d lamb 2>result &&
- test_must_be_empty result
-'
-
-test_expect_success 'delete ref while another dangling packed ref' '
- git branch lamb &&
- git commit --allow-empty -m "future garbage" &&
- git pack-refs --all &&
- git reset --hard HEAD^ &&
- git reflog expire --expire=all --all &&
- git prune --expire=all &&
- git branch -d lamb 2>result &&
- test_must_be_empty result
-'
-
-test_expect_success 'pack ref directly below refs/' '
- git update-ref refs/top HEAD &&
- git pack-refs --all --prune &&
- grep refs/top .git/packed-refs &&
- test_path_is_missing .git/refs/top
-'
-
-test_expect_success 'do not pack ref in refs/bisect' '
- git update-ref refs/bisect/local HEAD &&
- git pack-refs --all --prune &&
- ! grep refs/bisect/local .git/packed-refs >/dev/null &&
- test_path_is_file .git/refs/bisect/local
-'
-
-test_expect_success 'disable reflogs' '
- git config core.logallrefupdates false &&
- rm -rf .git/logs
-'
-
-test_expect_success 'create packed foo/bar/baz branch' '
- git branch foo/bar/baz &&
- git pack-refs --all --prune &&
- test_path_is_missing .git/refs/heads/foo/bar/baz &&
- test_must_fail git reflog exists refs/heads/foo/bar/baz
-'
-
-test_expect_success 'notice d/f conflict with existing directory' '
- test_must_fail git branch foo &&
- test_must_fail git branch foo/bar
-'
-
-test_expect_success 'existing directory reports concrete ref' '
- test_must_fail git branch foo 2>stderr &&
- test_grep refs/heads/foo/bar/baz stderr
-'
-
-test_expect_success 'notice d/f conflict with existing ref' '
- test_must_fail git branch foo/bar/baz/extra &&
- test_must_fail git branch foo/bar/baz/lots/of/extra/components
-'
-
-test_expect_success 'reject packed-refs with unterminated line' '
- cp .git/packed-refs .git/packed-refs.bak &&
- test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
- printf "%s" "$HEAD refs/zzzzz" >>.git/packed-refs &&
- echo "fatal: unterminated line in .git/packed-refs: $HEAD refs/zzzzz" >expected_err &&
- test_must_fail git for-each-ref >out 2>err &&
- test_cmp expected_err err
-'
-
-test_expect_success 'reject packed-refs containing junk' '
- cp .git/packed-refs .git/packed-refs.bak &&
- test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
- printf "%s\n" "bogus content" >>.git/packed-refs &&
- echo "fatal: unexpected line in .git/packed-refs: bogus content" >expected_err &&
- test_must_fail git for-each-ref >out 2>err &&
- test_cmp expected_err err
-'
-
-test_expect_success 'reject packed-refs with a short SHA-1' '
- cp .git/packed-refs .git/packed-refs.bak &&
- test_when_finished "mv .git/packed-refs.bak .git/packed-refs" &&
- printf "%.7s %s\n" $HEAD refs/zzzzz >>.git/packed-refs &&
- printf "fatal: unexpected line in .git/packed-refs: %.7s %s\n" $HEAD refs/zzzzz >expected_err &&
- test_must_fail git for-each-ref >out 2>err &&
- test_cmp expected_err err
-'
-
-test_expect_success 'timeout if packed-refs.lock exists' '
- LOCK=.git/packed-refs.lock &&
- >"$LOCK" &&
- test_when_finished "rm -f $LOCK" &&
- test_must_fail git pack-refs --all --prune
-'
-
-test_expect_success 'retry acquiring packed-refs.lock' '
- LOCK=.git/packed-refs.lock &&
- >"$LOCK" &&
- test_when_finished "wait && rm -f $LOCK" &&
- {
- ( sleep 1 && rm -f $LOCK ) &
- } &&
- git -c core.packedrefstimeout=3000 pack-refs --all --prune
-'
-
-test_expect_success SYMLINKS 'pack symlinked packed-refs' '
- # First make sure that symlinking works when reading:
- git update-ref refs/heads/lossy refs/heads/main &&
- git for-each-ref >all-refs-before &&
- mv .git/packed-refs .git/my-deviant-packed-refs &&
- ln -s my-deviant-packed-refs .git/packed-refs &&
- git for-each-ref >all-refs-linked &&
- test_cmp all-refs-before all-refs-linked &&
- git pack-refs --all --prune &&
- git for-each-ref >all-refs-packed &&
- test_cmp all-refs-before all-refs-packed &&
- test -h .git/packed-refs &&
- test "$(test_readlink .git/packed-refs)" = "my-deviant-packed-refs"
-'
-
-# The 'packed-refs' file is stored directly in .git/. This means it is global
-# to the repository, and can only contain refs that are shared across all
-# worktrees.
-test_expect_success 'refs/worktree must not be packed' '
- test_commit initial &&
- test_commit wt1 &&
- test_commit wt2 &&
- git worktree add wt1 wt1 &&
- git worktree add wt2 wt2 &&
- git checkout initial &&
- git update-ref refs/worktree/foo HEAD &&
- git -C wt1 update-ref refs/worktree/foo HEAD &&
- git -C wt2 update-ref refs/worktree/foo HEAD &&
- git pack-refs --all &&
- test_path_is_missing .git/refs/tags/wt1 &&
- test_path_is_file .git/refs/worktree/foo &&
- test_path_is_file .git/worktrees/wt1/refs/worktree/foo &&
- test_path_is_file .git/worktrees/wt2/refs/worktree/foo
-'
-
-# we do not want to count on running pack-refs to
-# actually pack it, as it is perfectly reasonable to
-# skip processing a broken ref
-test_expect_success 'create packed-refs file with broken ref' '
- test_tick && git commit --allow-empty -m one &&
- recoverable=$(git rev-parse HEAD) &&
- test_tick && git commit --allow-empty -m two &&
- missing=$(git rev-parse HEAD) &&
- rm -f .git/refs/heads/main &&
- cat >.git/packed-refs <<-EOF &&
- $missing refs/heads/main
- $recoverable refs/heads/other
- EOF
- echo $missing >expect &&
- git rev-parse refs/heads/main >actual &&
- test_cmp expect actual
-'
-
-test_expect_success 'pack-refs does not silently delete broken packed ref' '
- git pack-refs --all --prune &&
- git rev-parse refs/heads/main >actual &&
- test_cmp expect actual
-'
-
-test_expect_success 'pack-refs does not drop broken refs during deletion' '
- git update-ref -d refs/heads/other &&
- git rev-parse refs/heads/main >actual &&
- test_cmp expect actual
-'
-
-for command in "git pack-refs --all --auto" "git maintenance run --task=pack-refs --auto"
-do
- test_expect_success "$command does not repack below 16 refs without packed-refs" '
- test_when_finished "rm -rf repo" &&
- git init repo &&
- (
- cd repo &&
- git config set maintenance.auto false &&
- git commit --allow-empty --message "initial" &&
-
- # Create 14 additional references, which brings us to
- # 15 together with the default branch.
- printf "create refs/heads/loose-%d HEAD\n" $(test_seq 14) >stdin &&
- git update-ref --stdin <stdin &&
- test_path_is_missing .git/packed-refs &&
- git pack-refs --auto --all &&
- test_path_is_missing .git/packed-refs &&
-
- # Create the 16th reference, which should cause us to repack.
- git update-ref refs/heads/loose-15 HEAD &&
- git pack-refs --auto --all &&
- test_path_is_file .git/packed-refs
- )
- '
-
- test_expect_success "$command does not repack below 16 refs with small packed-refs" '
- test_when_finished "rm -rf repo" &&
- git init repo &&
- (
- cd repo &&
- git config set maintenance.auto false &&
- git commit --allow-empty --message "initial" &&
-
- git pack-refs --all &&
- test_line_count = 2 .git/packed-refs &&
-
- # Create 15 loose references.
- printf "create refs/heads/loose-%d HEAD\n" $(test_seq 15) >stdin &&
- git update-ref --stdin <stdin &&
- git pack-refs --auto --all &&
- test_line_count = 2 .git/packed-refs &&
-
- # Create the 16th loose reference, which should cause us to repack.
- git update-ref refs/heads/loose-17 HEAD &&
- git pack-refs --auto --all &&
- test_line_count = 18 .git/packed-refs
- )
- '
-
- test_expect_success "$command scales with size of packed-refs" '
- test_when_finished "rm -rf repo" &&
- git init repo &&
- (
- cd repo &&
- git config set maintenance.auto false &&
- git commit --allow-empty --message "initial" &&
-
- # Create 99 packed refs. This should cause the heuristic
- # to require more than the minimum amount of loose refs.
- test_seq 99 |
- while read i
- do
- printf "create refs/heads/packed-%d HEAD\n" $i || return 1
- done >stdin &&
- git update-ref --stdin <stdin &&
- git pack-refs --all &&
- test_line_count = 101 .git/packed-refs &&
-
- # Create 24 loose refs, which should not yet cause us to repack.
- printf "create refs/heads/loose-%d HEAD\n" $(test_seq 24) >stdin &&
- git update-ref --stdin <stdin &&
- git pack-refs --auto --all &&
- test_line_count = 101 .git/packed-refs &&
-
- # Create another handful of refs to cross the border.
- # Note that we explicitly do not check for strict
- # boundaries here, as this also depends on the size of
- # the object hash.
- printf "create refs/heads/addn-%d HEAD\n" $(test_seq 10) >stdin &&
- git update-ref --stdin <stdin &&
- git pack-refs --auto --all &&
- test_line_count = 135 .git/packed-refs
- )
- '
-done
-
-test_done
+. "$TEST_DIRECTORY"/pack-refs-tests.sh
diff --git a/t/t0614-reftable-fsck.sh b/t/t0614-reftable-fsck.sh
new file mode 100755
index 0000000000..85cc47d67e
--- /dev/null
+++ b/t/t0614-reftable-fsck.sh
@@ -0,0 +1,58 @@
+#!/bin/sh
+
+test_description='Test reftable backend consistency check'
+
+GIT_TEST_DEFAULT_REF_FORMAT=reftable
+export GIT_TEST_DEFAULT_REF_FORMAT
+
+. ./test-lib.sh
+
+test_expect_success "no errors reported on a well formed repository" '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ git commit --allow-empty -m initial &&
+
+ for i in $(test_seq 20)
+ do
+ git update-ref refs/heads/branch-$i HEAD || return 1
+ done &&
+
+ # The repository should end up with multiple tables.
+ test_line_count ">" 1 .git/reftable/tables.list &&
+
+ git refs verify 2>err &&
+ test_must_be_empty err
+ )
+'
+
+for TABLE_NAME in "foo-bar-e4d12d59.ref" \
+ "0x00000000zzzz-0x00000000zzzz-e4d12d59.ref" \
+ "0x000000000001-0x000000000002-e4d12d59.abc" \
+ "0x000000000001-0x000000000002-e4d12d59.refabc"; do
+ test_expect_success "table name $TABLE_NAME should be checked" '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ git commit --allow-empty -m initial &&
+
+ git refs verify 2>err &&
+ test_must_be_empty err &&
+
+ EXISTING_TABLE=$(head -n1 .git/reftable/tables.list) &&
+ mv ".git/reftable/$EXISTING_TABLE" ".git/reftable/$TABLE_NAME" &&
+ sed "s/${EXISTING_TABLE}/${TABLE_NAME}/g" .git/reftable/tables.list > tables.list &&
+ mv tables.list .git/reftable/tables.list &&
+
+ git refs verify 2>err &&
+ cat >expect <<-EOF &&
+ warning: ${TABLE_NAME}: badReftableTableName: invalid reftable table name
+ EOF
+ test_cmp expect err
+ )
+ '
+done
+
+test_done
diff --git a/t/t1300-config.sh b/t/t1300-config.sh
index f856821839..358d636379 100755
--- a/t/t1300-config.sh
+++ b/t/t1300-config.sh
@@ -9,6 +9,7 @@ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-terminal.sh
for mode in legacy subcommands
do
@@ -134,38 +135,39 @@ test_expect_success 'clear default config' '
rm -f .git/config
'
-cat > expect << EOF
+test_expect_success 'initial' '
+ cat >expect <<\EOF &&
[section]
penguin = little blue
EOF
-test_expect_success 'initial' '
git config ${mode_set} section.penguin "little blue" &&
test_cmp expect .git/config
'
-cat > expect << EOF
+test_expect_success 'mixed case' '
+ cat >expect <<\EOF &&
[section]
penguin = little blue
Movie = BadPhysics
EOF
-test_expect_success 'mixed case' '
git config ${mode_set} Section.Movie BadPhysics &&
test_cmp expect .git/config
'
-cat > expect << EOF
+test_expect_success 'similar section' '
+ cat >expect <<\EOF &&
[section]
penguin = little blue
Movie = BadPhysics
[Sections]
WhatEver = Second
EOF
-test_expect_success 'similar section' '
git config ${mode_set} Sections.WhatEver Second &&
test_cmp expect .git/config
'
-cat > expect << EOF
+test_expect_success 'uppercase section' '
+ cat >expect <<\EOF &&
[section]
penguin = little blue
Movie = BadPhysics
@@ -173,7 +175,6 @@ cat > expect << EOF
[Sections]
WhatEver = Second
EOF
-test_expect_success 'uppercase section' '
git config ${mode_set} SECTION.UPPERCASE true &&
test_cmp expect .git/config
'
@@ -186,7 +187,8 @@ test_expect_success 'replace with non-match (actually matching)' '
git config section.penguin "very blue" !kingpin
'
-cat > expect << EOF
+test_expect_success 'append comments' '
+ cat >expect <<\EOF &&
[section]
Movie = BadPhysics
UPPERCASE = true
@@ -198,8 +200,6 @@ cat > expect << EOF
[Sections]
WhatEver = Second
EOF
-
-test_expect_success 'append comments' '
git config --replace-all --comment="Pygoscelis papua" section.penguin gentoo &&
git config ${mode_set} --comment="find fish" section.disposition peckish &&
git config ${mode_set} --comment="#abc" section.foo bar &&
@@ -214,7 +214,9 @@ test_expect_success 'Prohibited LF in comment' '
test_must_fail git config ${mode_set} --comment="a${LF}b" section.k v
'
-test_expect_success 'non-match result' 'test_cmp expect .git/config'
+test_expect_success 'non-match result' '
+ test_cmp expect .git/config
+'
test_expect_success 'find mixed-case key by canonical name' '
test_cmp_config Second sections.whatever
@@ -265,14 +267,15 @@ test_expect_success 'unset with cont. lines' '
git config ${mode_unset} beta.baz
'
-cat > expect <<\EOF
-[alpha]
-bar = foo
-[beta]
-foo = bar
-EOF
-
-test_expect_success 'unset with cont. lines is correct' 'test_cmp expect .git/config'
+test_expect_success 'unset with cont. lines is correct' '
+ cat >expect <<-\EOF &&
+ [alpha]
+ bar = foo
+ [beta]
+ foo = bar
+ EOF
+ test_cmp expect .git/config
+'
cat > .git/config << EOF
[beta] ; silly comment # another comment
@@ -292,16 +295,15 @@ test_expect_success 'multiple unset' '
git config ${mode_unset_all} beta.haha
'
-cat > expect << EOF
+test_expect_success 'multiple unset is correct' '
+ cat >expect <<EOF &&
[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
+noIndent= sillyValue ; ${SQ}nother silly comment
# empty line
; comment
[nextSection] noNewline = ouch
EOF
-
-test_expect_success 'multiple unset is correct' '
test_cmp expect .git/config
'
@@ -318,37 +320,37 @@ test_expect_success '--replace-all' '
git config ${mode_replace_all} beta.haha gamma
'
-cat > expect << EOF
+test_expect_success 'all replaced' '
+ cat >expect <<EOF &&
[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
+noIndent= sillyValue ; ${SQ}nother silly comment
# empty line
; comment
haha = gamma
[nextSection] noNewline = ouch
EOF
-
-test_expect_success 'all replaced' '
test_cmp expect .git/config
'
-cat > expect << EOF
+test_expect_success 'really mean test' '
+ cat >expect <<EOF &&
[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
+noIndent= sillyValue ; ${SQ}nother silly comment
# empty line
; comment
haha = alpha
[nextSection] noNewline = ouch
EOF
-test_expect_success 'really mean test' '
git config ${mode_set} beta.haha alpha &&
test_cmp expect .git/config
'
-cat > expect << EOF
+test_expect_success 'really really mean test' '
+ cat >expect <<EOF &&
[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
+noIndent= sillyValue ; ${SQ}nother silly comment
# empty line
; comment
@@ -356,7 +358,6 @@ noIndent= sillyValue ; 'nother silly comment
[nextSection]
nonewline = wow
EOF
-test_expect_success 'really really mean test' '
git config ${mode_set} nextsection.nonewline wow &&
test_cmp expect .git/config
'
@@ -365,23 +366,24 @@ test_expect_success 'get value' '
test_cmp_config alpha beta.haha
'
-cat > expect << EOF
+test_expect_success 'unset' '
+ cat >expect <<EOF &&
[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
+noIndent= sillyValue ; ${SQ}nother silly comment
# empty line
; comment
[nextSection]
nonewline = wow
EOF
-test_expect_success 'unset' '
git config ${mode_unset} beta.haha &&
test_cmp expect .git/config
'
-cat > expect << EOF
+test_expect_success 'multivar' '
+ cat >expect <<EOF &&
[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
+noIndent= sillyValue ; ${SQ}nother silly comment
# empty line
; comment
@@ -389,7 +391,6 @@ noIndent= sillyValue ; 'nother silly comment
nonewline = wow
NoNewLine = wow2 for me
EOF
-test_expect_success 'multivar' '
git config nextsection.NoNewLine "wow2 for me" "for me$" &&
test_cmp expect .git/config
'
@@ -415,9 +416,10 @@ test_expect_success 'multi-valued get-all returns all' '
test_cmp expect actual
'
-cat > expect << EOF
+test_expect_success 'multivar replace' '
+ cat >expect <<EOF &&
[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
+noIndent= sillyValue ; ${SQ}nother silly comment
# empty line
; comment
@@ -425,7 +427,6 @@ noIndent= sillyValue ; 'nother silly comment
nonewline = wow3
NoNewLine = wow2 for me
EOF
-test_expect_success 'multivar replace' '
git config nextsection.nonewline "wow3" "wow$" &&
test_cmp expect .git/config
'
@@ -438,17 +439,16 @@ test_expect_success 'invalid unset' '
test_must_fail git config ${mode_unset} somesection.nonewline
'
-cat > expect << EOF
+test_expect_success 'multivar unset' '
+ cat >expect <<EOF &&
[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
+noIndent= sillyValue ; ${SQ}nother silly comment
# empty line
; comment
[nextSection]
NoNewLine = wow2 for me
EOF
-
-test_expect_success 'multivar unset' '
case "$mode" in
legacy)
git config --unset nextsection.nonewline "wow3$";;
@@ -458,17 +458,22 @@ test_expect_success 'multivar unset' '
test_cmp expect .git/config
'
-test_expect_success 'invalid key' 'test_must_fail git config inval.2key blabla'
+test_expect_success 'invalid key' '
+ test_must_fail git config inval.2key blabla
+'
-test_expect_success 'correct key' 'git config 123456.a123 987'
+test_expect_success 'correct key' '
+ git config 123456.a123 987
+'
test_expect_success 'hierarchical section' '
git config Version.1.2.3eX.Alpha beta
'
-cat > expect << EOF
+test_expect_success 'hierarchical section value' '
+ cat >expect <<EOF &&
[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
+noIndent= sillyValue ; ${SQ}nother silly comment
# empty line
; comment
@@ -479,65 +484,59 @@ noIndent= sillyValue ; 'nother silly comment
[Version "1.2.3eX"]
Alpha = beta
EOF
-
-test_expect_success 'hierarchical section value' '
test_cmp expect .git/config
'
-cat > expect << EOF
-beta.noindent=sillyValue
-nextsection.nonewline=wow2 for me
-123456.a123=987
-version.1.2.3eX.alpha=beta
-EOF
-
test_expect_success 'working --list' '
+ cat >expect <<-\EOF &&
+ beta.noindent=sillyValue
+ nextsection.nonewline=wow2 for me
+ 123456.a123=987
+ version.1.2.3eX.alpha=beta
+ EOF
git config ${mode_prefix}list > output &&
test_cmp expect output
'
+
test_expect_success '--list without repo produces empty output' '
git --git-dir=nonexistent config ${mode_prefix}list >output &&
test_must_be_empty output
'
-cat > expect << EOF
-beta.noindent
-nextsection.nonewline
-123456.a123
-version.1.2.3eX.alpha
-EOF
-
test_expect_success '--name-only --list' '
+ cat >expect <<-\EOF &&
+ beta.noindent
+ nextsection.nonewline
+ 123456.a123
+ version.1.2.3eX.alpha
+ EOF
git config ${mode_prefix}list --name-only >output &&
test_cmp expect output
'
-cat > expect << EOF
-beta.noindent sillyValue
-nextsection.nonewline wow2 for me
-EOF
-
test_expect_success '--get-regexp' '
+ cat >expect <<-\EOF &&
+ beta.noindent sillyValue
+ nextsection.nonewline wow2 for me
+ EOF
git config ${mode_get_regexp} in >output &&
test_cmp expect output
'
-cat > expect << EOF
-beta.noindent
-nextsection.nonewline
-EOF
-
test_expect_success '--name-only --get-regexp' '
+ cat >expect <<-\EOF &&
+ beta.noindent
+ nextsection.nonewline
+ EOF
git config ${mode_get_regexp} --name-only in >output &&
test_cmp expect output
'
-cat > expect << EOF
-wow2 for me
-wow4 for you
-EOF
-
test_expect_success '--add' '
+ cat >expect <<-\EOF &&
+ wow2 for me
+ wow4 for you
+ EOF
git config --add nextsection.nonewline "wow4 for you" &&
git config ${mode_get_all} nextsection.nonewline > output &&
test_cmp expect output
@@ -558,37 +557,32 @@ test_expect_success 'get variable with empty value' '
git config --get emptyvalue.variable ^$
'
-echo novalue.variable > expect
-
test_expect_success 'get-regexp variable with no value' '
+ echo novalue.variable >expect &&
git config ${mode_get_regexp} novalue > output &&
test_cmp expect output
'
-echo 'novalue.variable true' > expect
-
test_expect_success 'get-regexp --bool variable with no value' '
+ echo "novalue.variable true" >expect &&
git config ${mode_get_regexp} --bool novalue > output &&
test_cmp expect output
'
-echo 'emptyvalue.variable ' > expect
-
test_expect_success 'get-regexp variable with empty value' '
+ echo "emptyvalue.variable " >expect &&
git config ${mode_get_regexp} emptyvalue > output &&
test_cmp expect output
'
-echo true > expect
-
test_expect_success 'get bool variable with no value' '
+ echo true >expect &&
git config --bool novalue.variable > output &&
test_cmp expect output
'
-echo false > expect
-
test_expect_success 'get bool variable with empty value' '
+ echo false >expect &&
git config --bool emptyvalue.variable > output &&
test_cmp expect output
'
@@ -604,19 +598,19 @@ cat > .git/config << EOF
c = d
EOF
-cat > expect << EOF
+test_expect_success 'new section is partial match of another' '
+ cat >expect <<\EOF &&
[a.b]
c = d
[a]
x = y
EOF
-
-test_expect_success 'new section is partial match of another' '
git config a.x y &&
test_cmp expect .git/config
'
-cat > expect << EOF
+test_expect_success 'new variable inserts into proper section' '
+ cat >expect <<\EOF &&
[a.b]
c = d
[a]
@@ -625,8 +619,6 @@ cat > expect << EOF
[b]
x = y
EOF
-
-test_expect_success 'new variable inserts into proper section' '
git config b.x y &&
git config a.b c &&
test_cmp expect .git/config
@@ -642,11 +634,10 @@ cat > other-config << EOF
bahn = strasse
EOF
-cat > expect << EOF
-ein.bahn=strasse
-EOF
-
test_expect_success 'alternative GIT_CONFIG' '
+ cat >expect <<-\EOF &&
+ ein.bahn=strasse
+ EOF
GIT_CONFIG=other-config git config ${mode_prefix}list >output &&
test_cmp expect output
'
@@ -675,14 +666,13 @@ test_expect_success 'refer config from subdirectory' '
test_cmp_config -C x strasse --file=../other-config --get ein.bahn
'
-cat > expect << EOF
+test_expect_success '--set in alternative file' '
+ cat >expect <<\EOF &&
[ein]
bahn = strasse
[anwohner]
park = ausweis
EOF
-
-test_expect_success '--set in alternative file' '
git config --file=other-config anwohner.park ausweis &&
test_cmp expect other-config
'
@@ -730,7 +720,8 @@ test_expect_success 'rename another section' '
git config ${mode_prefix}rename-section branch."1 234 blabl/a" branch.drei
'
-cat > expect << EOF
+test_expect_success 'rename succeeded' '
+ cat >expect <<\EOF &&
# Hallo
#Bello
[branch "zwei"]
@@ -740,8 +731,6 @@ cat > expect << EOF
[branch "drei"]
weird
EOF
-
-test_expect_success 'rename succeeded' '
test_cmp expect .git/config
'
@@ -753,7 +742,8 @@ test_expect_success 'rename a section with a var on the same line' '
git config ${mode_prefix}rename-section branch.vier branch.zwei
'
-cat > expect << EOF
+test_expect_success 'rename succeeded' '
+ cat >expect <<\EOF &&
# Hallo
#Bello
[branch "zwei"]
@@ -765,8 +755,6 @@ weird
[branch "zwei"]
z = 1
EOF
-
-test_expect_success 'rename succeeded' '
test_cmp expect .git/config
'
@@ -816,32 +804,29 @@ test_expect_success 'remove section' '
git config ${mode_prefix}remove-section branch.zwei
'
-cat > expect << EOF
+test_expect_success 'section was removed properly' '
+ cat >expect <<\EOF &&
# Hallo
#Bello
[branch "drei"]
weird
EOF
-
-test_expect_success 'section was removed properly' '
test_cmp expect .git/config
'
-cat > expect << EOF
+test_expect_success 'section ending' '
+ cat >expect <<\EOF &&
[gitcvs]
enabled = true
dbname = %Ggitcvs2.%a.%m.sqlite
[gitcvs "ext"]
dbname = %Ggitcvs1.%a.%m.sqlite
EOF
-
-test_expect_success 'section ending' '
rm -f .git/config &&
git config ${mode_set} gitcvs.enabled true &&
git config ${mode_set} gitcvs.ext.dbname %Ggitcvs1.%a.%m.sqlite &&
git config ${mode_set} gitcvs.dbname %Ggitcvs2.%a.%m.sqlite &&
test_cmp expect .git/config
-
'
test_expect_success numbers '
@@ -885,19 +870,17 @@ test_expect_success 'invalid stdin config' '
test_grep "bad config line 1 in standard input" output
'
-cat > expect << EOF
-true
-false
-true
-false
-true
-false
-true
-false
-EOF
-
test_expect_success bool '
-
+ cat >expect <<-\EOF &&
+ true
+ false
+ true
+ false
+ true
+ false
+ true
+ false
+ EOF
git config ${mode_set} bool.true1 01 &&
git config ${mode_set} bool.true2 -1 &&
git config ${mode_set} bool.true3 YeS &&
@@ -912,18 +895,20 @@ test_expect_success bool '
git config --bool --get bool.true$i >>result &&
git config --bool --get bool.false$i >>result || return 1
done &&
- test_cmp expect result'
+ test_cmp expect result
+'
test_expect_success 'invalid bool (--get)' '
-
git config ${mode_set} bool.nobool foobar &&
- test_must_fail git config --bool --get bool.nobool'
+ test_must_fail git config --bool --get bool.nobool
+'
test_expect_success 'invalid bool (set)' '
+ test_must_fail git config --bool bool.nobool foobar
+'
- test_must_fail git config --bool bool.nobool foobar'
-
-cat > expect <<\EOF
+test_expect_success 'set --bool' '
+ cat >expect <<\EOF &&
[bool]
true1 = true
true2 = true
@@ -934,9 +919,6 @@ cat > expect <<\EOF
false3 = false
false4 = false
EOF
-
-test_expect_success 'set --bool' '
-
rm -f .git/config &&
git config --bool bool.true1 01 &&
git config --bool bool.true2 -1 &&
@@ -948,15 +930,13 @@ test_expect_success 'set --bool' '
git config --bool bool.false4 FALSE &&
test_cmp expect .git/config'
-cat > expect <<\EOF
+test_expect_success 'set --int' '
+ cat >expect <<\EOF &&
[int]
val1 = 1
val2 = -1
val3 = 5242880
EOF
-
-test_expect_success 'set --int' '
-
rm -f .git/config &&
git config --int int.val1 01 &&
git config --int int.val2 -1 &&
@@ -994,7 +974,8 @@ test_expect_success 'get --bool-or-int' '
test_cmp expect actual
'
-cat >expect <<\EOF
+test_expect_success 'set --bool-or-int' '
+ cat >expect <<\EOF &&
[bool]
true1 = true
false1 = false
@@ -1005,8 +986,6 @@ cat >expect <<\EOF
int2 = 1
int3 = -1
EOF
-
-test_expect_success 'set --bool-or-int' '
rm -f .git/config &&
git config --bool-or-int bool.true1 true &&
git config --bool-or-int bool.false1 false &&
@@ -1018,44 +997,42 @@ test_expect_success 'set --bool-or-int' '
test_cmp expect .git/config
'
-cat >expect <<\EOF
+test_expect_success !MINGW 'set --path' '
+ cat >expect <<\EOF &&
[path]
home = ~/
normal = /dev/null
trailingtilde = foo~
EOF
-
-test_expect_success !MINGW 'set --path' '
rm -f .git/config &&
git config --path path.home "~/" &&
git config --path path.normal "/dev/null" &&
git config --path path.trailingtilde "foo~" &&
- test_cmp expect .git/config'
+ test_cmp expect .git/config
+'
if test_have_prereq !MINGW && test "${HOME+set}"
then
test_set_prereq HOMEVAR
fi
-cat >expect <<EOF
-$HOME/
-/dev/null
-foo~
-EOF
-
test_expect_success HOMEVAR 'get --path' '
+ cat >expect <<-EOF &&
+ $HOME/
+ /dev/null
+ foo~
+ EOF
git config --get --path path.home > result &&
git config --get --path path.normal >> result &&
git config --get --path path.trailingtilde >> result &&
test_cmp expect result
'
-cat >expect <<\EOF
-/dev/null
-foo~
-EOF
-
test_expect_success !MINGW 'get --path copes with unset $HOME' '
+ cat >expect <<-\EOF &&
+ /dev/null
+ foo~
+ EOF
(
sane_unset HOME &&
test_must_fail git config --get --path path.home \
@@ -1107,17 +1084,35 @@ test_expect_success 'get --type=color' '
rm .git/config &&
git config ${mode_set} foo.color "red" &&
git config --get --type=color foo.color >actual.raw &&
+ git config get --type=color foo.color >actual-subcommand.raw &&
+ test_cmp actual.raw actual-subcommand.raw &&
+ test_decode_color <actual.raw >actual &&
+ echo "<RED>" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'get --type=color with default value only' '
+ git config --get-color "" "red" >actual.raw &&
+ test_decode_color <actual.raw >actual &&
+ echo "<RED>" >expect &&
+ test_cmp expect actual &&
+ git config get --type=color --default="red" "" >actual-subcommand.raw &&
+ test_cmp actual.raw actual-subcommand.raw
+'
+
+test_expect_success TTY 'get --type=color does not use a pager' '
+ test_config core.pager "echo foobar" &&
+ test_terminal git config get --type=color --default="red" "" >actual.raw &&
test_decode_color <actual.raw >actual &&
echo "<RED>" >expect &&
test_cmp expect actual
'
-cat >expect << EOF
+test_expect_success 'set --type=color' '
+ cat >expect <<\EOF &&
[foo]
color = red
EOF
-
-test_expect_success 'set --type=color' '
rm .git/config &&
git config --type=color foo.color "red" &&
test_cmp expect .git/config
@@ -1133,14 +1128,14 @@ test_expect_success 'set --type=color barfs on non-color' '
test_grep "cannot parse color" error
'
-cat > expect << EOF
+test_expect_success 'quoting' '
+ cat >expect <<\EOF &&
[quote]
leading = " test"
ending = "test "
semicolon = "test;test"
hash = "test#test"
EOF
-test_expect_success 'quoting' '
rm -f .git/config &&
git config ${mode_set} quote.leading " test" &&
git config ${mode_set} quote.ending "test " &&
@@ -1151,10 +1146,13 @@ test_expect_success 'quoting' '
test_expect_success 'key with newline' '
test_must_fail git config ${mode_get} "key.with
-newline" 123'
+newline" 123
+'
-test_expect_success 'value with newline' 'git config ${mode_set} key.sub value.with\\\
-newline'
+test_expect_success 'value with newline' '
+ git config ${mode_set} key.sub value.with\\\
+newline
+'
cat > .git/config <<\EOF
[section]
@@ -1166,13 +1164,12 @@ inued
inued"
EOF
-cat > expect <<\EOF
-section.continued=continued
-section.noncont=not continued
-section.quotecont=cont;inued
-EOF
-
test_expect_success 'value continued on next line' '
+ cat >expect <<-\EOF &&
+ section.continued=continued
+ section.noncont=not continued
+ section.quotecont=cont;inued
+ EOF
git config ${mode_prefix}list > result &&
test_cmp expect result
'
@@ -1365,7 +1362,6 @@ test_expect_success 'multiple git -c appends config' '
'
test_expect_success 'last one wins: two level vars' '
-
# sec.var and sec.VAR are the same variable, as the first
# and the last level of a configuration variable name is
# case insensitive.
@@ -1384,7 +1380,6 @@ test_expect_success 'last one wins: two level vars' '
'
test_expect_success 'last one wins: three level vars' '
-
# v.a.r and v.A.r are not the same variable, as the middle
# level of a three-level configuration variable name is
# case sensitive.
diff --git a/t/t1421-reflog-write.sh b/t/t1421-reflog-write.sh
index 46df64c176..603ec3f6ed 100755
--- a/t/t1421-reflog-write.sh
+++ b/t/t1421-reflog-write.sh
@@ -108,6 +108,42 @@ test_expect_success 'simple writes' '
)
'
+test_expect_success 'uses user.name and user.email config' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit initial &&
+ COMMIT_OID=$(git rev-parse HEAD) &&
+
+ sane_unset GIT_COMMITTER_NAME &&
+ sane_unset GIT_COMMITTER_EMAIL &&
+ git config --local user.name "Author" &&
+ git config --local user.email "a@uth.or" &&
+ git reflog write refs/heads/something $ZERO_OID $COMMIT_OID first &&
+ test_reflog_matches . refs/heads/something <<-EOF
+ $ZERO_OID $COMMIT_OID Author <a@uth.or> $GIT_COMMITTER_DATE first
+ EOF
+ )
+'
+
+test_expect_success 'environment variables take precedence over config' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit initial &&
+ COMMIT_OID=$(git rev-parse HEAD) &&
+
+ git config --local user.name "Author" &&
+ git config --local user.email "a@uth.or" &&
+ git reflog write refs/heads/something $ZERO_OID $COMMIT_OID first &&
+ test_reflog_matches . refs/heads/something <<-EOF
+ $ZERO_OID $COMMIT_OID $SIGNATURE first
+ EOF
+ )
+'
+
test_expect_success 'can write to root ref' '
test_when_finished "rm -rf repo" &&
git init repo &&
diff --git a/t/t1463-refs-optimize.sh b/t/t1463-refs-optimize.sh
new file mode 100755
index 0000000000..c11c905d79
--- /dev/null
+++ b/t/t1463-refs-optimize.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+test_description='git refs optimize should not change the branch semantic
+
+This test runs git refs optimize and git show-ref and checks that the branch
+semantic is still the same.
+'
+
+GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
+export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
+GIT_TEST_DEFAULT_REF_FORMAT=files
+export GIT_TEST_DEFAULT_REF_FORMAT
+
+. ./test-lib.sh
+
+pack_refs='refs optimize'
+. "$TEST_DIRECTORY"/pack-refs-tests.sh
diff --git a/t/t3206-range-diff.sh b/t/t3206-range-diff.sh
index e091df6d01..1e812df806 100755
--- a/t/t3206-range-diff.sh
+++ b/t/t3206-range-diff.sh
@@ -707,7 +707,7 @@ test_expect_success 'format-patch --range-diff does not compare notes by default
! grep "note" 0000-*
'
-test_expect_success 'format-patch --notes=custom --range-diff only compares custom notes' '
+test_expect_success 'format-patch --notes=custom --range-diff --cover-letter only compares custom notes' '
test_when_finished "git notes remove topic unmodified || :" &&
git notes add -m "topic note" topic &&
git notes add -m "unmodified note" unmodified &&
@@ -721,6 +721,20 @@ test_expect_success 'format-patch --notes=custom --range-diff only compares cust
! grep "## Notes ##" 0000-*
'
+# --range-diff on a single commit requires --no-cover-letter
+test_expect_success 'format-patch --notes=custom --range-diff on single commit only compares custom notes' '
+ test_when_finished "git notes remove HEAD unmodified || :" &&
+ git notes add -m "topic note" HEAD &&
+ test_when_finished "git notes --ref=custom remove HEAD unmodified || :" &&
+ git notes add -m "unmodified note" unmodified &&
+ git notes --ref=custom add -m "topic note (custom)" HEAD &&
+ git notes --ref=custom add -m "unmodified note (custom)" unmodified &&
+ git format-patch --notes=custom --range-diff=$prev \
+ -1 --stdout >actual &&
+ test_grep "## Notes (custom) ##" actual &&
+ test_grep ! "## Notes ##" actual
+'
+
test_expect_success 'format-patch --range-diff with --no-notes' '
test_when_finished "git notes remove topic unmodified || :" &&
git notes add -m "topic note" topic &&
diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh
index d9fe289a7a..b3cc152cc4 100755
--- a/t/t3701-add-interactive.sh
+++ b/t/t3701-add-interactive.sh
@@ -1354,4 +1354,35 @@ do
'
done
+test_expect_success 'splitting previous hunk marks split hunks as undecided' '
+ test_write_lines a " " b c d e f g h i j k >file &&
+ git add file &&
+ test_write_lines x " " b y d e f g h i j x >file &&
+ test_write_lines n K s n y q | git add -p file &&
+ git cat-file blob :file >actual &&
+ test_write_lines a " " b y d e f g h i j k >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'splitting edited hunk' '
+ # Before the first hunk is edited it can be split into two
+ # hunks, after editing it can be split into three hunks.
+
+ write_script fake-editor.sh <<-\EOF &&
+ sed "s/^ c/-c/" "$1" >"$1.tmp" &&
+ mv "$1.tmp" "$1"
+ EOF
+
+ test_write_lines a b c d e f g h i j k l m n >file &&
+ git add file &&
+ test_write_lines A b c d E f g h i j k l M n >file &&
+ (
+ test_set_editor "$(pwd)/fake-editor.sh" &&
+ test_write_lines e K s j y n y q | git add -p file
+ ) &&
+ git cat-file blob :file >actual &&
+ test_write_lines a b d e f g h i j k l M n >expect &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t6302-for-each-ref-filter.sh b/t/t6302-for-each-ref-filter.sh
index 9b80ea1e3b..7f060d97bf 100755
--- a/t/t6302-for-each-ref-filter.sh
+++ b/t/t6302-for-each-ref-filter.sh
@@ -754,4 +754,69 @@ test_expect_success 'start after used with custom sort order' '
test_cmp expect actual
'
+test_expect_success 'start after with packed refs' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit default &&
+
+ git update-ref --stdin <<-\EOF &&
+ create refs/heads/branch @
+ create refs/heads/side @
+ create refs/odd/spot @
+ create refs/tags/one @
+ create refs/tags/two @
+ commit
+ EOF
+
+ cat >expect <<-\EOF &&
+ refs/tags/default
+ refs/tags/one
+ refs/tags/two
+ EOF
+
+ git pack-refs --all &&
+ git for-each-ref --format="%(refname)" --start-after=refs/odd/spot >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'start after with packed refs and some loose refs' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit default &&
+
+ git update-ref --stdin <<-\EOF &&
+ create refs/heads/branch @
+ create refs/heads/side @
+ create refs/odd/spot @
+ create refs/tags/one @
+ create refs/tags/two @
+ commit
+ EOF
+
+ git pack-refs --all &&
+
+ git update-ref --stdin <<-\EOF &&
+ create refs/heads/foo @
+ create refs/odd/tee @
+ commit
+ EOF
+
+ cat >expect <<-\EOF &&
+ refs/odd/tee
+ refs/tags/default
+ refs/tags/one
+ refs/tags/two
+ EOF
+
+
+ git for-each-ref --format="%(refname)" --start-after=refs/odd/spot >actual &&
+ test_cmp expect actual
+ )
+'
+
test_done
diff --git a/t/t7500-commit-template-squash-signoff.sh b/t/t7500-commit-template-squash-signoff.sh
index 4dca8d97a7..1935171d68 100755
--- a/t/t7500-commit-template-squash-signoff.sh
+++ b/t/t7500-commit-template-squash-signoff.sh
@@ -31,52 +31,70 @@ test_expect_success 'nonexistent template file should return error' '
echo changes >> foo &&
git add foo &&
(
- GIT_EDITOR="echo hello >\"\$1\"" &&
+ GIT_EDITOR="echo hello >" &&
export GIT_EDITOR &&
test_must_fail git commit --template "$PWD"/notexist
)
'
+test_expect_success 'nonexistent optional template file on command line' '
+ echo changes >> foo &&
+ git add foo &&
+ (
+ GIT_EDITOR="echo hello >\"\$1\"" &&
+ export GIT_EDITOR &&
+ git commit --template ":(optional)$PWD/notexist"
+ )
+'
+
test_expect_success 'nonexistent template file in config should return error' '
test_config commit.template "$PWD"/notexist &&
(
- GIT_EDITOR="echo hello >\"\$1\"" &&
+ GIT_EDITOR="echo hello >" &&
export GIT_EDITOR &&
- test_must_fail git commit
+ test_must_fail git commit --allow-empty
)
'
+test_expect_success 'nonexistent optional template file in config' '
+ test_config commit.template ":(optional)$PWD"/notexist &&
+ GIT_EDITOR="echo hello >" git commit --allow-empty &&
+ git cat-file commit HEAD | sed -e "1,/^$/d" >actual &&
+ echo hello >expect &&
+ test_cmp expect actual
+'
+
# From now on we'll use a template file that exists.
TEMPLATE="$PWD"/template
test_expect_success 'unedited template should not commit' '
- echo "template line" > "$TEMPLATE" &&
- test_must_fail git commit --template "$TEMPLATE"
+ echo "template line" >"$TEMPLATE" &&
+ test_must_fail git commit --allow-empty --template "$TEMPLATE"
'
test_expect_success 'unedited template with comments should not commit' '
- echo "# comment in template" >> "$TEMPLATE" &&
- test_must_fail git commit --template "$TEMPLATE"
+ echo "# comment in template" >>"$TEMPLATE" &&
+ test_must_fail git commit --allow-empty --template "$TEMPLATE"
'
test_expect_success 'a Signed-off-by line by itself should not commit' '
(
test_set_editor "$TEST_DIRECTORY"/t7500/add-signed-off &&
- test_must_fail git commit --template "$TEMPLATE"
+ test_must_fail git commit --allow-empty --template "$TEMPLATE"
)
'
test_expect_success 'adding comments to a template should not commit' '
(
test_set_editor "$TEST_DIRECTORY"/t7500/add-comments &&
- test_must_fail git commit --template "$TEMPLATE"
+ test_must_fail git commit --allow-empty --template "$TEMPLATE"
)
'
test_expect_success 'adding real content to a template should commit' '
(
test_set_editor "$TEST_DIRECTORY"/t7500/add-content &&
- git commit --template "$TEMPLATE"
+ git commit --allow-empty --template "$TEMPLATE"
) &&
commit_msg_is "template linecommit message"
'
diff --git a/t/t8020-last-modified.sh b/t/t8020-last-modified.sh
index e13aad1439..61f00bc15c 100755
--- a/t/t8020-last-modified.sh
+++ b/t/t8020-last-modified.sh
@@ -33,7 +33,6 @@ check_last_modified() {
done &&
cat >expect &&
- test_when_finished "rm -f tmp.*" &&
git ${indir:+-C "$indir"} last-modified "$@" >tmp.1 &&
git name-rev --annotate-stdin --name-only --tags \
<tmp.1 >tmp.2 &&
@@ -128,20 +127,25 @@ test_expect_success 'only last-modified files in the current tree' '
EOF
'
-test_expect_success 'last-modified with subdir and criss-cross merge' '
- git checkout -b branch-k1 1 &&
- mkdir -p a k &&
- test_commit k1 a/file2 &&
- git checkout -b branch-k2 &&
- test_commit k2 k/file2 &&
- git checkout branch-k1 &&
- test_merge km2 branch-k2 &&
- test_merge km3 3 &&
- check_last_modified <<-\EOF
- km3 a
- k2 k
- 1 file
- EOF
+test_expect_success 'subdirectory modified via merge' '
+ test_when_finished rm -rf repo &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit base &&
+ git switch --create left &&
+ mkdir subdir &&
+ test_commit left subdir/left &&
+ git switch --create right base &&
+ mkdir subdir &&
+ test_commit right subdir/right &&
+ git switch - &&
+ test_merge merge right &&
+ check_last_modified <<-\EOF
+ merge subdir
+ base base.t
+ EOF
+ )
'
test_expect_success 'cross merge boundaries in blaming' '
diff --git a/t/t9305-fast-import-signatures.sh b/t/t9305-fast-import-signatures.sh
new file mode 100755
index 0000000000..c2b4271658
--- /dev/null
+++ b/t/t9305-fast-import-signatures.sh
@@ -0,0 +1,106 @@
+#!/bin/sh
+
+test_description='git fast-import --signed-commits=<mode>'
+
+GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
+
+. ./test-lib.sh
+. "$TEST_DIRECTORY/lib-gpg.sh"
+
+test_expect_success 'set up unsigned initial commit and import repo' '
+ test_commit first &&
+ git init new
+'
+
+test_expect_success GPG 'set up OpenPGP signed commit' '
+ git checkout -b openpgp-signing main &&
+ echo "Content for OpenPGP signing." >file-sign &&
+ git add file-sign &&
+ git commit -S -m "OpenPGP signed commit" &&
+ OPENPGP_SIGNING=$(git rev-parse --verify openpgp-signing)
+'
+
+test_expect_success GPG 'import OpenPGP signature with --signed-commits=verbatim' '
+ git fast-export --signed-commits=verbatim openpgp-signing >output &&
+ git -C new fast-import --quiet --signed-commits=verbatim <output >log 2>&1 &&
+ IMPORTED=$(git -C new rev-parse --verify refs/heads/openpgp-signing) &&
+ test $OPENPGP_SIGNING = $IMPORTED &&
+ test_must_be_empty log
+'
+
+test_expect_success GPGSM 'set up X.509 signed commit' '
+ git checkout -b x509-signing main &&
+ test_config gpg.format x509 &&
+ test_config user.signingkey $GIT_COMMITTER_EMAIL &&
+ echo "Content for X.509 signing." >file-sign &&
+ git add file-sign &&
+ git commit -S -m "X.509 signed commit" &&
+ X509_SIGNING=$(git rev-parse HEAD)
+'
+
+test_expect_success GPGSM 'import X.509 signature fails with --signed-commits=abort' '
+ git fast-export --signed-commits=verbatim x509-signing >output &&
+ test_must_fail git -C new fast-import --quiet --signed-commits=abort <output
+'
+
+test_expect_success GPGSM 'import X.509 signature with --signed-commits=warn-verbatim' '
+ git -C new fast-import --quiet --signed-commits=warn-verbatim <output >log 2>&1 &&
+ IMPORTED=$(git -C new rev-parse --verify refs/heads/x509-signing) &&
+ test $X509_SIGNING = $IMPORTED &&
+ test_grep "importing a commit signature" log
+'
+
+test_expect_success GPGSSH 'set up SSH signed commit' '
+ git checkout -b ssh-signing main &&
+ test_config gpg.format ssh &&
+ test_config user.signingkey "${GPGSSH_KEY_PRIMARY}" &&
+ echo "Content for SSH signing." >file-sign &&
+ git add file-sign &&
+ git commit -S -m "SSH signed commit" &&
+ SSH_SIGNING=$(git rev-parse HEAD)
+'
+
+test_expect_success GPGSSH 'strip SSH signature with --signed-commits=strip' '
+ git fast-export --signed-commits=verbatim ssh-signing >output &&
+ git -C new fast-import --quiet --signed-commits=strip <output >log 2>&1 &&
+ IMPORTED=$(git -C new rev-parse --verify refs/heads/ssh-signing) &&
+ test $SSH_SIGNING != $IMPORTED &&
+ git -C new cat-file commit "$IMPORTED" >actual &&
+ test_grep ! -E "^gpgsig" actual &&
+ test_must_be_empty log
+'
+
+test_expect_success GPG 'setup a commit with dual OpenPGP signatures on its SHA-1 and SHA-256 formats' '
+ # Create a signed SHA-256 commit
+ git init --object-format=sha256 explicit-sha256 &&
+ git -C explicit-sha256 config extensions.compatObjectFormat sha1 &&
+ git -C explicit-sha256 checkout -b dual-signed &&
+ test_commit -C explicit-sha256 A &&
+ echo B >explicit-sha256/B &&
+ git -C explicit-sha256 add B &&
+ test_tick &&
+ git -C explicit-sha256 commit -S -m "signed" B &&
+ SHA256_B=$(git -C explicit-sha256 rev-parse dual-signed) &&
+
+ # Create the corresponding SHA-1 commit
+ SHA1_B=$(git -C explicit-sha256 rev-parse --output-object-format=sha1 dual-signed) &&
+
+ # Check that the resulting SHA-1 commit has both signatures
+ git -C explicit-sha256 cat-file -p $SHA1_B >out &&
+ test_grep -E "^gpgsig " out &&
+ test_grep -E "^gpgsig-sha256 " out
+'
+
+test_expect_success GPG 'strip both OpenPGP signatures with --signed-commits=warn-strip' '
+ git -C explicit-sha256 fast-export --signed-commits=verbatim dual-signed >output &&
+ test_grep -E "^gpgsig sha1 openpgp" output &&
+ test_grep -E "^gpgsig sha256 openpgp" output &&
+ git -C new fast-import --quiet --signed-commits=warn-strip <output >log 2>&1 &&
+ git -C new cat-file commit refs/heads/dual-signed >actual &&
+ test_grep ! -E "^gpgsig " actual &&
+ test_grep ! -E "^gpgsig-sha256 " actual &&
+ test_grep "stripping a commit signature" log >out &&
+ test_line_count = 2 out
+'
+
+test_done
diff --git a/t/unit-tests/u-reftable-basics.c b/t/unit-tests/u-reftable-basics.c
index a0471083e7..73566ed0eb 100644
--- a/t/unit-tests/u-reftable-basics.c
+++ b/t/unit-tests/u-reftable-basics.c
@@ -9,6 +9,7 @@ https://developers.google.com/open-source/licenses/bsd
#include "unit-test.h"
#include "lib-reftable.h"
#include "reftable/basics.h"
+#include "reftable/reftable-error.h"
struct integer_needle_lesseq_args {
int needle;
@@ -79,14 +80,18 @@ void test_reftable_basics__names_equal(void)
void test_reftable_basics__parse_names(void)
{
char in1[] = "line\n";
- char in2[] = "a\nb\nc";
- char **out = parse_names(in1, strlen(in1));
+ char in2[] = "a\nb\nc\n";
+ char **out = NULL;
+ int err = parse_names(in1, strlen(in1), &out);
+ cl_assert(err == 0);
cl_assert(out != NULL);
cl_assert_equal_s(out[0], "line");
cl_assert(!out[1]);
free_names(out);
- out = parse_names(in2, strlen(in2));
+ out = NULL;
+ err = parse_names(in2, strlen(in2), &out);
+ cl_assert(err == 0);
cl_assert(out != NULL);
cl_assert_equal_s(out[0], "a");
cl_assert_equal_s(out[1], "b");
@@ -95,10 +100,21 @@ void test_reftable_basics__parse_names(void)
free_names(out);
}
+void test_reftable_basics__parse_names_missing_newline(void)
+{
+ char in1[] = "line\nline2";
+ char **out = NULL;
+ int err = parse_names(in1, strlen(in1), &out);
+ cl_assert(err == REFTABLE_FORMAT_ERROR);
+ cl_assert(out == NULL);
+}
+
void test_reftable_basics__parse_names_drop_empty_string(void)
{
char in[] = "a\n\nb\n";
- char **out = parse_names(in, strlen(in));
+ char **out = NULL;
+ int err = parse_names(in, strlen(in), &out);
+ cl_assert(err == 0);
cl_assert(out != NULL);
cl_assert_equal_s(out[0], "a");
/* simply '\n' should be dropped as empty string */
diff --git a/transport-helper.c b/transport-helper.c
index 0789e5bca5..4d95d84f9e 100644
--- a/transport-helper.c
+++ b/transport-helper.c
@@ -450,7 +450,7 @@ static int fetch_with_fetch(struct transport *transport,
}
strbuf_release(&buf);
- reprepare_packed_git(the_repository);
+ odb_reprepare(the_repository->objects);
return 0;
}
diff --git a/usage.c b/usage.c
index 4c245ba0cb..527edb1e79 100644
--- a/usage.c
+++ b/usage.c
@@ -7,6 +7,7 @@
#include "git-compat-util.h"
#include "gettext.h"
#include "trace2.h"
+#include "strbuf.h"
static void vfreportf(FILE *f, const char *prefix, const char *err, va_list params)
{
@@ -376,14 +377,32 @@ void bug_fl(const char *file, int line, const char *fmt, ...)
va_end(ap);
}
-NORETURN void you_still_use_that(const char *command_name)
+
+NORETURN void you_still_use_that(const char *command_name, const char *hint)
{
+ struct strbuf percent_encoded = STRBUF_INIT;
+ strbuf_add_percentencode(&percent_encoded,
+ command_name,
+ STRBUF_ENCODE_SLASH);
+
+ fprintf(stderr,
+ _("'%s' is nominated for removal.\n"), command_name);
+
+ if (hint)
+ fputs(hint, stderr);
+
fprintf(stderr,
- _("'%s' is nominated for removal.\n"
- "If you still use this command, please add an extra\n"
- "option, '--i-still-use-this', on the command line\n"
- "and let us know you still use it by sending an e-mail\n"
- "to <git@vger.kernel.org>. Thanks.\n"),
- command_name);
+ _("If you still use this command, here's what you can do:\n"
+ "\n"
+ "- read https://git-scm.com/docs/BreakingChanges.html\n"
+ "- check if anyone has discussed this on the mailing\n"
+ " list and if they came up with something that can\n"
+ " help you: https://lore.kernel.org/git/?q=%s\n"
+ "- send an email to <git@vger.kernel.org> to let us\n"
+ " know that you still use this command and were unable\n"
+ " to determine a suitable replacement\n"
+ "\n"),
+ percent_encoded.buf);
+ strbuf_release(&percent_encoded);
die(_("refusing to run without --i-still-use-this"));
}
diff --git a/varint.c b/varint.c
index 409c4977a1..03cd54416b 100644
--- a/varint.c
+++ b/varint.c
@@ -1,11 +1,11 @@
#include "git-compat-util.h"
#include "varint.h"
-uintmax_t decode_varint(const unsigned char **bufp)
+uint64_t decode_varint(const unsigned char **bufp)
{
const unsigned char *buf = *bufp;
unsigned char c = *buf++;
- uintmax_t val = c & 127;
+ uint64_t val = c & 127;
while (c & 128) {
val += 1;
if (!val || MSB(val, 7))
@@ -17,7 +17,7 @@ uintmax_t decode_varint(const unsigned char **bufp)
return val;
}
-int encode_varint(uintmax_t value, unsigned char *buf)
+uint8_t encode_varint(uint64_t value, unsigned char *buf)
{
unsigned char varint[16];
unsigned pos = sizeof(varint) - 1;
diff --git a/varint.h b/varint.h
index f78bb0ca52..eb401935bd 100644
--- a/varint.h
+++ b/varint.h
@@ -1,7 +1,7 @@
#ifndef VARINT_H
#define VARINT_H
-int encode_varint(uintmax_t, unsigned char *);
-uintmax_t decode_varint(const unsigned char **);
+uint8_t encode_varint(uint64_t, unsigned char *);
+uint64_t decode_varint(const unsigned char **);
#endif /* VARINT_H */
diff --git a/wrapper.c b/wrapper.c
index 2f00d2ac87..3d507d4204 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -721,6 +721,19 @@ int xgethostname(char *buf, size_t len)
return ret;
}
+int is_missing_file(const char *filename)
+{
+ struct stat st;
+
+ if (stat(filename, &st) < 0) {
+ if (errno == ENOENT)
+ return 1;
+ die_errno(_("could not stat %s"), filename);
+ }
+
+ return 0;
+}
+
int is_empty_or_missing_file(const char *filename)
{
struct stat st;
diff --git a/wrapper.h b/wrapper.h
index 7df824e34a..44a8597ac3 100644
--- a/wrapper.h
+++ b/wrapper.h
@@ -66,7 +66,9 @@ void write_file_buf(const char *path, const char *buf, size_t len);
__attribute__((format (printf, 2, 3)))
void write_file(const char *path, const char *fmt, ...);
-/* Return 1 if the file is empty or does not exists, 0 otherwise. */
+/* Return 1 if the file does not exist, 0 otherwise. */
+int is_missing_file(const char *filename);
+/* Return 1 if the file is empty or does not exist, 0 otherwise. */
int is_empty_or_missing_file(const char *filename);
enum fsync_action {
diff --git a/xdiff/xdiffi.c b/xdiff/xdiffi.c
index 5a96e36dfb..6f3998ee54 100644
--- a/xdiff/xdiffi.c
+++ b/xdiff/xdiffi.c
@@ -22,6 +22,11 @@
#include "xinclude.h"
+static unsigned long get_hash(xdfile_t *xdf, long index)
+{
+ return xdf->recs[xdf->rindex[index]].ha;
+}
+
#define XDL_MAX_COST_MIN 256
#define XDL_HEUR_MIN_COST 256
#define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
@@ -42,8 +47,8 @@ typedef struct s_xdpsplit {
* using this algorithm, so a little bit of heuristic is needed to cut the
* search and to return a suboptimal point.
*/
-static long xdl_split(unsigned long const *ha1, long off1, long lim1,
- unsigned long const *ha2, long off2, long lim2,
+static long xdl_split(xdfile_t *xdf1, long off1, long lim1,
+ xdfile_t *xdf2, long off2, long lim2,
long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl,
xdalgoenv_t *xenv) {
long dmin = off1 - lim2, dmax = lim1 - off2;
@@ -87,7 +92,7 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
i1 = kvdf[d + 1];
prev1 = i1;
i2 = i1 - d;
- for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++);
+ for (; i1 < lim1 && i2 < lim2 && get_hash(xdf1, i1) == get_hash(xdf2, i2); i1++, i2++);
if (i1 - prev1 > xenv->snake_cnt)
got_snake = 1;
kvdf[d] = i1;
@@ -124,7 +129,7 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
i1 = kvdb[d + 1] - 1;
prev1 = i1;
i2 = i1 - d;
- for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--);
+ for (; i1 > off1 && i2 > off2 && get_hash(xdf1, i1 - 1) == get_hash(xdf2, i2 - 1); i1--, i2--);
if (prev1 - i1 > xenv->snake_cnt)
got_snake = 1;
kvdb[d] = i1;
@@ -159,7 +164,7 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
if (v > XDL_K_HEUR * ec && v > best &&
off1 + xenv->snake_cnt <= i1 && i1 < lim1 &&
off2 + xenv->snake_cnt <= i2 && i2 < lim2) {
- for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++)
+ for (k = 1; get_hash(xdf1, i1 - k) == get_hash(xdf2, i2 - k); k++)
if (k == xenv->snake_cnt) {
best = v;
spl->i1 = i1;
@@ -183,7 +188,7 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
if (v > XDL_K_HEUR * ec && v > best &&
off1 < i1 && i1 <= lim1 - xenv->snake_cnt &&
off2 < i2 && i2 <= lim2 - xenv->snake_cnt) {
- for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++)
+ for (k = 0; get_hash(xdf1, i1 + k) == get_hash(xdf2, i2 + k); k++)
if (k == xenv->snake_cnt - 1) {
best = v;
spl->i1 = i1;
@@ -257,33 +262,26 @@ static long xdl_split(unsigned long const *ha1, long off1, long lim1,
* sub-boxes by calling the box splitting function. Note that the real job
* (marking changed lines) is done in the two boundary reaching checks.
*/
-int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1,
- diffdata_t *dd2, long off2, long lim2,
+int xdl_recs_cmp(xdfile_t *xdf1, long off1, long lim1,
+ xdfile_t *xdf2, long off2, long lim2,
long *kvdf, long *kvdb, int need_min, xdalgoenv_t *xenv) {
- unsigned long const *ha1 = dd1->ha, *ha2 = dd2->ha;
/*
* Shrink the box by walking through each diagonal snake (SW and NE).
*/
- for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++);
- for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--);
+ for (; off1 < lim1 && off2 < lim2 && get_hash(xdf1, off1) == get_hash(xdf2, off2); off1++, off2++);
+ for (; off1 < lim1 && off2 < lim2 && get_hash(xdf1, lim1 - 1) == get_hash(xdf2, lim2 - 1); lim1--, lim2--);
/*
* If one dimension is empty, then all records on the other one must
* be obviously changed.
*/
if (off1 == lim1) {
- char *rchg2 = dd2->rchg;
- long *rindex2 = dd2->rindex;
-
for (; off2 < lim2; off2++)
- rchg2[rindex2[off2]] = 1;
+ xdf2->changed[xdf2->rindex[off2]] = true;
} else if (off2 == lim2) {
- char *rchg1 = dd1->rchg;
- long *rindex1 = dd1->rindex;
-
for (; off1 < lim1; off1++)
- rchg1[rindex1[off1]] = 1;
+ xdf1->changed[xdf1->rindex[off1]] = true;
} else {
xdpsplit_t spl;
spl.i1 = spl.i2 = 0;
@@ -291,7 +289,7 @@ int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1,
/*
* Divide ...
*/
- if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb,
+ if (xdl_split(xdf1, off1, lim1, xdf2, off2, lim2, kvdf, kvdb,
need_min, &spl, xenv) < 0) {
return -1;
@@ -300,9 +298,9 @@ int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1,
/*
* ... et Impera.
*/
- if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2,
+ if (xdl_recs_cmp(xdf1, off1, spl.i1, xdf2, off2, spl.i2,
kvdf, kvdb, spl.min_lo, xenv) < 0 ||
- xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2,
+ xdl_recs_cmp(xdf1, spl.i1, lim1, xdf2, spl.i2, lim2,
kvdf, kvdb, spl.min_hi, xenv) < 0) {
return -1;
@@ -318,7 +316,6 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
long ndiags;
long *kvd, *kvdf, *kvdb;
xdalgoenv_t xenv;
- diffdata_t dd1, dd2;
int res;
if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0)
@@ -357,16 +354,7 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
xenv.snake_cnt = XDL_SNAKE_CNT;
xenv.heur_min = XDL_HEUR_MIN_COST;
- dd1.nrec = xe->xdf1.nreff;
- dd1.ha = xe->xdf1.ha;
- dd1.rchg = xe->xdf1.rchg;
- dd1.rindex = xe->xdf1.rindex;
- dd2.nrec = xe->xdf2.nreff;
- dd2.ha = xe->xdf2.ha;
- dd2.rchg = xe->xdf2.rchg;
- dd2.rindex = xe->xdf2.rindex;
-
- res = xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
+ res = xdl_recs_cmp(&xe->xdf1, 0, xe->xdf1.nreff, &xe->xdf2, 0, xe->xdf2.nreff,
kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0,
&xenv);
xdl_free(kvd);
@@ -501,13 +489,13 @@ static void measure_split(const xdfile_t *xdf, long split,
m->indent = -1;
} else {
m->end_of_file = 0;
- m->indent = get_indent(xdf->recs[split]);
+ m->indent = get_indent(&xdf->recs[split]);
}
m->pre_blank = 0;
m->pre_indent = -1;
for (i = split - 1; i >= 0; i--) {
- m->pre_indent = get_indent(xdf->recs[i]);
+ m->pre_indent = get_indent(&xdf->recs[i]);
if (m->pre_indent != -1)
break;
m->pre_blank += 1;
@@ -520,7 +508,7 @@ static void measure_split(const xdfile_t *xdf, long split,
m->post_blank = 0;
m->post_indent = -1;
for (i = split + 1; i < xdf->nrec; i++) {
- m->post_indent = get_indent(xdf->recs[i]);
+ m->post_indent = get_indent(&xdf->recs[i]);
if (m->post_indent != -1)
break;
m->post_blank += 1;
@@ -720,7 +708,7 @@ struct xdlgroup {
static void group_init(xdfile_t *xdf, struct xdlgroup *g)
{
g->start = g->end = 0;
- while (xdf->rchg[g->end])
+ while (xdf->changed[g->end])
g->end++;
}
@@ -734,7 +722,7 @@ static inline int group_next(xdfile_t *xdf, struct xdlgroup *g)
return -1;
g->start = g->end + 1;
- for (g->end = g->start; xdf->rchg[g->end]; g->end++)
+ for (g->end = g->start; xdf->changed[g->end]; g->end++)
;
return 0;
@@ -750,7 +738,7 @@ static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
return -1;
g->end = g->start - 1;
- for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--)
+ for (g->start = g->end; xdf->changed[g->start - 1]; g->start--)
;
return 0;
@@ -764,11 +752,11 @@ static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g)
{
if (g->end < xdf->nrec &&
- recs_match(xdf->recs[g->start], xdf->recs[g->end])) {
- xdf->rchg[g->start++] = 0;
- xdf->rchg[g->end++] = 1;
+ recs_match(&xdf->recs[g->start], &xdf->recs[g->end])) {
+ xdf->changed[g->start++] = false;
+ xdf->changed[g->end++] = true;
- while (xdf->rchg[g->end])
+ while (xdf->changed[g->end])
g->end++;
return 0;
@@ -785,11 +773,11 @@ static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g)
static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g)
{
if (g->start > 0 &&
- recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1])) {
- xdf->rchg[--g->start] = 1;
- xdf->rchg[--g->end] = 0;
+ recs_match(&xdf->recs[g->start - 1], &xdf->recs[g->end - 1])) {
+ xdf->changed[--g->start] = true;
+ xdf->changed[--g->end] = false;
- while (xdf->rchg[g->start - 1])
+ while (xdf->changed[g->start - 1])
g->start--;
return 0;
@@ -944,16 +932,16 @@ int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) {
xdchange_t *cscr = NULL, *xch;
- char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg;
+ bool *changed1 = xe->xdf1.changed, *changed2 = xe->xdf2.changed;
long i1, i2, l1, l2;
/*
* Trivial. Collects "groups" of changes and creates an edit script.
*/
for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--)
- if (rchg1[i1 - 1] || rchg2[i2 - 1]) {
- for (l1 = i1; rchg1[i1 - 1]; i1--);
- for (l2 = i2; rchg2[i2 - 1]; i2--);
+ if (changed1[i1 - 1] || changed2[i2 - 1]) {
+ for (l1 = i1; changed1[i1 - 1]; i1--);
+ for (l2 = i2; changed2[i2 - 1]; i2--);
if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) {
xdl_free_script(cscr);
@@ -1000,16 +988,16 @@ static void xdl_mark_ignorable_lines(xdchange_t *xscr, xdfenv_t *xe, long flags)
for (xch = xscr; xch; xch = xch->next) {
int ignore = 1;
- xrecord_t **rec;
+ xrecord_t *rec;
long i;
rec = &xe->xdf1.recs[xch->i1];
for (i = 0; i < xch->chg1 && ignore; i++)
- ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags);
+ ignore = xdl_blankline(rec[i].ptr, rec[i].size, flags);
rec = &xe->xdf2.recs[xch->i2];
for (i = 0; i < xch->chg2 && ignore; i++)
- ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags);
+ ignore = xdl_blankline(rec[i].ptr, rec[i].size, flags);
xch->ignore = ignore;
}
@@ -1033,7 +1021,7 @@ static void xdl_mark_ignorable_regex(xdchange_t *xscr, const xdfenv_t *xe,
xdchange_t *xch;
for (xch = xscr; xch; xch = xch->next) {
- xrecord_t **rec;
+ xrecord_t *rec;
int ignore = 1;
long i;
@@ -1045,11 +1033,11 @@ static void xdl_mark_ignorable_regex(xdchange_t *xscr, const xdfenv_t *xe,
rec = &xe->xdf1.recs[xch->i1];
for (i = 0; i < xch->chg1 && ignore; i++)
- ignore = record_matches_regex(rec[i], xpp);
+ ignore = record_matches_regex(&rec[i], xpp);
rec = &xe->xdf2.recs[xch->i2];
for (i = 0; i < xch->chg2 && ignore; i++)
- ignore = record_matches_regex(rec[i], xpp);
+ ignore = record_matches_regex(&rec[i], xpp);
xch->ignore = ignore;
}
diff --git a/xdiff/xdiffi.h b/xdiff/xdiffi.h
index 126c9d8ff4..49e52c67f9 100644
--- a/xdiff/xdiffi.h
+++ b/xdiff/xdiffi.h
@@ -24,13 +24,6 @@
#define XDIFFI_H
-typedef struct s_diffdata {
- long nrec;
- unsigned long const *ha;
- long *rindex;
- char *rchg;
-} diffdata_t;
-
typedef struct s_xdalgoenv {
long mxcost;
long snake_cnt;
@@ -46,8 +39,8 @@ typedef struct s_xdchange {
-int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1,
- diffdata_t *dd2, long off2, long lim2,
+int xdl_recs_cmp(xdfile_t *xdf1, long off1, long lim1,
+ xdfile_t *xdf2, long off2, long lim2,
long *kvdf, long *kvdb, int need_min, xdalgoenv_t *xenv);
int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
xdfenv_t *xe);
diff --git a/xdiff/xemit.c b/xdiff/xemit.c
index 1d40c9cb40..b2f1f30cd3 100644
--- a/xdiff/xemit.c
+++ b/xdiff/xemit.c
@@ -22,23 +22,13 @@
#include "xinclude.h"
-static long xdl_get_rec(xdfile_t *xdf, long ri, char const **rec) {
- *rec = xdf->recs[ri]->ptr;
-
- return xdf->recs[ri]->size;
-}
-
-
-static int xdl_emit_record(xdfile_t *xdf, long ri, char const *pre, xdemitcb_t *ecb) {
- long size, psize = strlen(pre);
- char const *rec;
-
- size = xdl_get_rec(xdf, ri, &rec);
- if (xdl_emit_diffrec(rec, size, pre, psize, ecb) < 0) {
+static int xdl_emit_record(xdfile_t *xdf, long ri, char const *pre, xdemitcb_t *ecb)
+{
+ xrecord_t *rec = &xdf->recs[ri];
+ if (xdl_emit_diffrec(rec->ptr, rec->size, pre, strlen(pre), ecb) < 0)
return -1;
- }
return 0;
}
@@ -120,11 +110,11 @@ static long def_ff(const char *rec, long len, char *buf, long sz)
static long match_func_rec(xdfile_t *xdf, xdemitconf_t const *xecfg, long ri,
char *buf, long sz)
{
- const char *rec;
- long len = xdl_get_rec(xdf, ri, &rec);
+ xrecord_t *rec = &xdf->recs[ri];
+
if (!xecfg->find_func)
- return def_ff(rec, len, buf, sz);
- return xecfg->find_func(rec, len, buf, sz, xecfg->find_func_priv);
+ return def_ff(rec->ptr, rec->size, buf, sz);
+ return xecfg->find_func(rec->ptr, rec->size, buf, sz, xecfg->find_func_priv);
}
static int is_func_rec(xdfile_t *xdf, xdemitconf_t const *xecfg, long ri)
@@ -160,14 +150,12 @@ static long get_func_line(xdfenv_t *xe, xdemitconf_t const *xecfg,
static int is_empty_rec(xdfile_t *xdf, long ri)
{
- const char *rec;
- long len = xdl_get_rec(xdf, ri, &rec);
+ xrecord_t *rec = &xdf->recs[ri];
+ long i = 0;
- while (len > 0 && XDL_ISSPACE(*rec)) {
- rec++;
- len--;
- }
- return !len;
+ for (; i < rec->size && XDL_ISSPACE(rec->ptr[i]); i++);
+
+ return i == rec->size;
}
int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
diff --git a/xdiff/xhistogram.c b/xdiff/xhistogram.c
index 040d81e0bc..6dc450b1fe 100644
--- a/xdiff/xhistogram.c
+++ b/xdiff/xhistogram.c
@@ -86,7 +86,7 @@ struct region {
((LINE_MAP(index, ptr))->cnt)
#define REC(env, s, l) \
- (env->xdf##s.recs[l - 1])
+ (&env->xdf##s.recs[l - 1])
static int cmp_recs(xrecord_t *r1, xrecord_t *r2)
{
@@ -318,11 +318,11 @@ redo:
if (!count1) {
while(count2--)
- env->xdf2.rchg[line2++ - 1] = 1;
+ env->xdf2.changed[line2++ - 1] = true;
return 0;
} else if (!count2) {
while(count1--)
- env->xdf1.rchg[line1++ - 1] = 1;
+ env->xdf1.changed[line1++ - 1] = true;
return 0;
}
@@ -335,9 +335,9 @@ redo:
else {
if (lcs.begin1 == 0 && lcs.begin2 == 0) {
while (count1--)
- env->xdf1.rchg[line1++ - 1] = 1;
+ env->xdf1.changed[line1++ - 1] = true;
while (count2--)
- env->xdf2.rchg[line2++ - 1] = 1;
+ env->xdf2.changed[line2++ - 1] = true;
result = 0;
} else {
result = histogram_diff(xpp, env,
diff --git a/xdiff/xmerge.c b/xdiff/xmerge.c
index af40c88a5b..fd600cbb5d 100644
--- a/xdiff/xmerge.c
+++ b/xdiff/xmerge.c
@@ -97,12 +97,12 @@ static int xdl_merge_cmp_lines(xdfenv_t *xe1, int i1, xdfenv_t *xe2, int i2,
int line_count, long flags)
{
int i;
- xrecord_t **rec1 = xe1->xdf2.recs + i1;
- xrecord_t **rec2 = xe2->xdf2.recs + i2;
+ xrecord_t *rec1 = xe1->xdf2.recs + i1;
+ xrecord_t *rec2 = xe2->xdf2.recs + i2;
for (i = 0; i < line_count; i++) {
- int result = xdl_recmatch(rec1[i]->ptr, rec1[i]->size,
- rec2[i]->ptr, rec2[i]->size, flags);
+ int result = xdl_recmatch(rec1[i].ptr, rec1[i].size,
+ rec2[i].ptr, rec2[i].size, flags);
if (!result)
return -1;
}
@@ -111,7 +111,7 @@ static int xdl_merge_cmp_lines(xdfenv_t *xe1, int i1, xdfenv_t *xe2, int i2,
static int xdl_recs_copy_0(int use_orig, xdfenv_t *xe, int i, int count, int needs_cr, int add_nl, char *dest)
{
- xrecord_t **recs;
+ xrecord_t *recs;
int size = 0;
recs = (use_orig ? xe->xdf1.recs : xe->xdf2.recs) + i;
@@ -119,12 +119,12 @@ static int xdl_recs_copy_0(int use_orig, xdfenv_t *xe, int i, int count, int nee
if (count < 1)
return 0;
- for (i = 0; i < count; size += recs[i++]->size)
+ for (i = 0; i < count; size += recs[i++].size)
if (dest)
- memcpy(dest + size, recs[i]->ptr, recs[i]->size);
+ memcpy(dest + size, recs[i].ptr, recs[i].size);
if (add_nl) {
- i = recs[count - 1]->size;
- if (i == 0 || recs[count - 1]->ptr[i - 1] != '\n') {
+ i = recs[count - 1].size;
+ if (i == 0 || recs[count - 1].ptr[i - 1] != '\n') {
if (needs_cr) {
if (dest)
dest[size] = '\r';
@@ -160,22 +160,22 @@ static int is_eol_crlf(xdfile_t *file, int i)
if (i < file->nrec - 1)
/* All lines before the last *must* end in LF */
- return (size = file->recs[i]->size) > 1 &&
- file->recs[i]->ptr[size - 2] == '\r';
+ return (size = file->recs[i].size) > 1 &&
+ file->recs[i].ptr[size - 2] == '\r';
if (!file->nrec)
/* Cannot determine eol style from empty file */
return -1;
- if ((size = file->recs[i]->size) &&
- file->recs[i]->ptr[size - 1] == '\n')
+ if ((size = file->recs[i].size) &&
+ file->recs[i].ptr[size - 1] == '\n')
/* Last line; ends in LF; Is it CR/LF? */
return size > 1 &&
- file->recs[i]->ptr[size - 2] == '\r';
+ file->recs[i].ptr[size - 2] == '\r';
if (!i)
/* The only line has no eol */
return -1;
/* Determine eol from second-to-last line */
- return (size = file->recs[i - 1]->size) > 1 &&
- file->recs[i - 1]->ptr[size - 2] == '\r';
+ return (size = file->recs[i - 1].size) > 1 &&
+ file->recs[i - 1].ptr[size - 2] == '\r';
}
static int is_cr_needed(xdfenv_t *xe1, xdfenv_t *xe2, xdmerge_t *m)
@@ -334,22 +334,22 @@ static int recmatch(xrecord_t *rec1, xrecord_t *rec2, unsigned long flags)
static void xdl_refine_zdiff3_conflicts(xdfenv_t *xe1, xdfenv_t *xe2, xdmerge_t *m,
xpparam_t const *xpp)
{
- xrecord_t **rec1 = xe1->xdf2.recs, **rec2 = xe2->xdf2.recs;
+ xrecord_t *rec1 = xe1->xdf2.recs, *rec2 = xe2->xdf2.recs;
for (; m; m = m->next) {
/* let's handle just the conflicts */
if (m->mode)
continue;
while(m->chg1 && m->chg2 &&
- recmatch(rec1[m->i1], rec2[m->i2], xpp->flags)) {
+ recmatch(&rec1[m->i1], &rec2[m->i2], xpp->flags)) {
m->chg1--;
m->chg2--;
m->i1++;
m->i2++;
}
while (m->chg1 && m->chg2 &&
- recmatch(rec1[m->i1 + m->chg1 - 1],
- rec2[m->i2 + m->chg2 - 1], xpp->flags)) {
+ recmatch(&rec1[m->i1 + m->chg1 - 1],
+ &rec2[m->i2 + m->chg2 - 1], xpp->flags)) {
m->chg1--;
m->chg2--;
}
@@ -381,12 +381,12 @@ static int xdl_refine_conflicts(xdfenv_t *xe1, xdfenv_t *xe2, xdmerge_t *m,
* This probably does not work outside git, since
* we have a very simple mmfile structure.
*/
- t1.ptr = (char *)xe1->xdf2.recs[m->i1]->ptr;
- t1.size = xe1->xdf2.recs[m->i1 + m->chg1 - 1]->ptr
- + xe1->xdf2.recs[m->i1 + m->chg1 - 1]->size - t1.ptr;
- t2.ptr = (char *)xe2->xdf2.recs[m->i2]->ptr;
- t2.size = xe2->xdf2.recs[m->i2 + m->chg2 - 1]->ptr
- + xe2->xdf2.recs[m->i2 + m->chg2 - 1]->size - t2.ptr;
+ t1.ptr = (char *)xe1->xdf2.recs[m->i1].ptr;
+ t1.size = xe1->xdf2.recs[m->i1 + m->chg1 - 1].ptr
+ + xe1->xdf2.recs[m->i1 + m->chg1 - 1].size - t1.ptr;
+ t2.ptr = (char *)xe2->xdf2.recs[m->i2].ptr;
+ t2.size = xe2->xdf2.recs[m->i2 + m->chg2 - 1].ptr
+ + xe2->xdf2.recs[m->i2 + m->chg2 - 1].size - t2.ptr;
if (xdl_do_diff(&t1, &t2, xpp, &xe) < 0)
return -1;
if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 ||
@@ -440,8 +440,8 @@ static int line_contains_alnum(const char *ptr, long size)
static int lines_contain_alnum(xdfenv_t *xe, int i, int chg)
{
for (; chg; chg--, i++)
- if (line_contains_alnum(xe->xdf2.recs[i]->ptr,
- xe->xdf2.recs[i]->size))
+ if (line_contains_alnum(xe->xdf2.recs[i].ptr,
+ xe->xdf2.recs[i].size))
return 1;
return 0;
}
diff --git a/xdiff/xpatience.c b/xdiff/xpatience.c
index 77dc411d19..669b653580 100644
--- a/xdiff/xpatience.c
+++ b/xdiff/xpatience.c
@@ -88,9 +88,9 @@ static int is_anchor(xpparam_t const *xpp, const char *line)
static void insert_record(xpparam_t const *xpp, int line, struct hashmap *map,
int pass)
{
- xrecord_t **records = pass == 1 ?
+ xrecord_t *records = pass == 1 ?
map->env->xdf1.recs : map->env->xdf2.recs;
- xrecord_t *record = records[line - 1];
+ xrecord_t *record = &records[line - 1];
/*
* After xdl_prepare_env() (or more precisely, due to
* xdl_classify_record()), the "ha" member of the records (AKA lines)
@@ -121,7 +121,7 @@ static void insert_record(xpparam_t const *xpp, int line, struct hashmap *map,
return;
map->entries[index].line1 = line;
map->entries[index].hash = record->ha;
- map->entries[index].anchor = is_anchor(xpp, map->env->xdf1.recs[line - 1]->ptr);
+ map->entries[index].anchor = is_anchor(xpp, map->env->xdf1.recs[line - 1].ptr);
if (!map->first)
map->first = map->entries + index;
if (map->last) {
@@ -246,8 +246,8 @@ static int find_longest_common_sequence(struct hashmap *map, struct entry **res)
static int match(struct hashmap *map, int line1, int line2)
{
- xrecord_t *record1 = map->env->xdf1.recs[line1 - 1];
- xrecord_t *record2 = map->env->xdf2.recs[line2 - 1];
+ xrecord_t *record1 = &map->env->xdf1.recs[line1 - 1];
+ xrecord_t *record2 = &map->env->xdf2.recs[line2 - 1];
return record1->ha == record2->ha;
}
@@ -331,11 +331,11 @@ static int patience_diff(xpparam_t const *xpp, xdfenv_t *env,
/* trivial case: one side is empty */
if (!count1) {
while(count2--)
- env->xdf2.rchg[line2++ - 1] = 1;
+ env->xdf2.changed[line2++ - 1] = true;
return 0;
} else if (!count2) {
while(count1--)
- env->xdf1.rchg[line1++ - 1] = 1;
+ env->xdf1.changed[line1++ - 1] = true;
return 0;
}
@@ -347,9 +347,9 @@ static int patience_diff(xpparam_t const *xpp, xdfenv_t *env,
/* are there any matching lines at all? */
if (!map.has_matches) {
while(count1--)
- env->xdf1.rchg[line1++ - 1] = 1;
+ env->xdf1.changed[line1++ - 1] = true;
while(count2--)
- env->xdf2.rchg[line2++ - 1] = 1;
+ env->xdf2.changed[line2++ - 1] = true;
xdl_free(map.entries);
return 0;
}
diff --git a/xdiff/xprepare.c b/xdiff/xprepare.c
index e1d4017b2d..192334f1b7 100644
--- a/xdiff/xprepare.c
+++ b/xdiff/xprepare.c
@@ -29,12 +29,13 @@
#define XDL_GUESS_NLINES1 256
#define XDL_GUESS_NLINES2 20
+#define DISCARD 0
+#define KEEP 1
+#define INVESTIGATE 2
typedef struct s_xdlclass {
struct s_xdlclass *next;
- unsigned long ha;
- char const *line;
- long size;
+ xrecord_t rec;
long idx;
long len1, len2;
} xdlclass_t;
@@ -53,21 +54,6 @@ typedef struct s_xdlclassifier {
-static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags);
-static void xdl_free_classifier(xdlclassifier_t *cf);
-static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
- unsigned int hbits, xrecord_t *rec);
-static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_t const *xpp,
- xdlclassifier_t *cf, xdfile_t *xdf);
-static void xdl_free_ctx(xdfile_t *xdf);
-static int xdl_clean_mmatch(char const *dis, long i, long s, long e);
-static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
-static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2);
-static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
-
-
-
-
static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags) {
cf->flags = flags;
@@ -106,17 +92,14 @@ static void xdl_free_classifier(xdlclassifier_t *cf) {
}
-static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
- unsigned int hbits, xrecord_t *rec) {
+static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t *rec) {
long hi;
- char const *line;
xdlclass_t *rcrec;
- line = rec->ptr;
hi = (long) XDL_HASHLONG(rec->ha, cf->hbits);
for (rcrec = cf->rchash[hi]; rcrec; rcrec = rcrec->next)
- if (rcrec->ha == rec->ha &&
- xdl_recmatch(rcrec->line, rcrec->size,
+ if (rcrec->rec.ha == rec->ha &&
+ xdl_recmatch(rcrec->rec.ptr, rcrec->rec.size,
rec->ptr, rec->size, cf->flags))
break;
@@ -129,9 +112,7 @@ static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t
if (XDL_ALLOC_GROW(cf->rcrecs, cf->count, cf->alloc))
return -1;
cf->rcrecs[rcrec->idx] = rcrec;
- rcrec->line = line;
- rcrec->size = rec->size;
- rcrec->ha = rec->ha;
+ rcrec->rec = *rec;
rcrec->len1 = rcrec->len2 = 0;
rcrec->next = cf->rchash[hi];
cf->rchash[hi] = rcrec;
@@ -141,158 +122,70 @@ static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t
rec->ha = (unsigned long) rcrec->idx;
- hi = (long) XDL_HASHLONG(rec->ha, hbits);
- rec->next = rhash[hi];
- rhash[hi] = rec;
-
return 0;
}
+static void xdl_free_ctx(xdfile_t *xdf)
+{
+ xdl_free(xdf->rindex);
+ xdl_free(xdf->changed - 1);
+ xdl_free(xdf->recs);
+}
+
+
static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_t const *xpp,
xdlclassifier_t *cf, xdfile_t *xdf) {
- unsigned int hbits;
- long nrec, hsize, bsize;
+ long bsize;
unsigned long hav;
char const *blk, *cur, *top, *prev;
xrecord_t *crec;
- xrecord_t **recs;
- xrecord_t **rhash;
- unsigned long *ha;
- char *rchg;
- long *rindex;
-
- ha = NULL;
- rindex = NULL;
- rchg = NULL;
- rhash = NULL;
- recs = NULL;
-
- if (xdl_cha_init(&xdf->rcha, sizeof(xrecord_t), narec / 4 + 1) < 0)
- goto abort;
- if (!XDL_ALLOC_ARRAY(recs, narec))
- goto abort;
- hbits = xdl_hashbits((unsigned int) narec);
- hsize = 1 << hbits;
- if (!XDL_CALLOC_ARRAY(rhash, hsize))
+ xdf->rindex = NULL;
+ xdf->changed = NULL;
+ xdf->recs = NULL;
+
+ if (!XDL_ALLOC_ARRAY(xdf->recs, narec))
goto abort;
- nrec = 0;
+ xdf->nrec = 0;
if ((cur = blk = xdl_mmfile_first(mf, &bsize))) {
for (top = blk + bsize; cur < top; ) {
prev = cur;
hav = xdl_hash_record(&cur, top, xpp->flags);
- if (XDL_ALLOC_GROW(recs, nrec + 1, narec))
- goto abort;
- if (!(crec = xdl_cha_alloc(&xdf->rcha)))
+ if (XDL_ALLOC_GROW(xdf->recs, xdf->nrec + 1, narec))
goto abort;
+ crec = &xdf->recs[xdf->nrec++];
crec->ptr = prev;
crec->size = (long) (cur - prev);
crec->ha = hav;
- recs[nrec++] = crec;
- if (xdl_classify_record(pass, cf, rhash, hbits, crec) < 0)
+ if (xdl_classify_record(pass, cf, crec) < 0)
goto abort;
}
}
- if (!XDL_CALLOC_ARRAY(rchg, nrec + 2))
+ if (!XDL_CALLOC_ARRAY(xdf->changed, xdf->nrec + 2))
goto abort;
if ((XDF_DIFF_ALG(xpp->flags) != XDF_PATIENCE_DIFF) &&
(XDF_DIFF_ALG(xpp->flags) != XDF_HISTOGRAM_DIFF)) {
- if (!XDL_ALLOC_ARRAY(rindex, nrec + 1))
- goto abort;
- if (!XDL_ALLOC_ARRAY(ha, nrec + 1))
+ if (!XDL_ALLOC_ARRAY(xdf->rindex, xdf->nrec + 1))
goto abort;
}
- xdf->nrec = nrec;
- xdf->recs = recs;
- xdf->hbits = hbits;
- xdf->rhash = rhash;
- xdf->rchg = rchg + 1;
- xdf->rindex = rindex;
+ xdf->changed += 1;
xdf->nreff = 0;
- xdf->ha = ha;
xdf->dstart = 0;
- xdf->dend = nrec - 1;
+ xdf->dend = xdf->nrec - 1;
return 0;
abort:
- xdl_free(ha);
- xdl_free(rindex);
- xdl_free(rchg);
- xdl_free(rhash);
- xdl_free(recs);
- xdl_cha_free(&xdf->rcha);
+ xdl_free_ctx(xdf);
return -1;
}
-static void xdl_free_ctx(xdfile_t *xdf) {
-
- xdl_free(xdf->rhash);
- xdl_free(xdf->rindex);
- xdl_free(xdf->rchg - 1);
- xdl_free(xdf->ha);
- xdl_free(xdf->recs);
- xdl_cha_free(&xdf->rcha);
-}
-
-
-int xdl_prepare_env(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
- xdfenv_t *xe) {
- long enl1, enl2, sample;
- xdlclassifier_t cf;
-
- memset(&cf, 0, sizeof(cf));
-
- /*
- * For histogram diff, we can afford a smaller sample size and
- * thus a poorer estimate of the number of lines, as the hash
- * table (rhash) won't be filled up/grown. The number of lines
- * (nrecs) will be updated correctly anyway by
- * xdl_prepare_ctx().
- */
- sample = (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF
- ? XDL_GUESS_NLINES2 : XDL_GUESS_NLINES1);
-
- enl1 = xdl_guess_lines(mf1, sample) + 1;
- enl2 = xdl_guess_lines(mf2, sample) + 1;
-
- if (xdl_init_classifier(&cf, enl1 + enl2 + 1, xpp->flags) < 0)
- return -1;
-
- if (xdl_prepare_ctx(1, mf1, enl1, xpp, &cf, &xe->xdf1) < 0) {
-
- xdl_free_classifier(&cf);
- return -1;
- }
- if (xdl_prepare_ctx(2, mf2, enl2, xpp, &cf, &xe->xdf2) < 0) {
-
- xdl_free_ctx(&xe->xdf1);
- xdl_free_classifier(&cf);
- return -1;
- }
-
- if ((XDF_DIFF_ALG(xpp->flags) != XDF_PATIENCE_DIFF) &&
- (XDF_DIFF_ALG(xpp->flags) != XDF_HISTOGRAM_DIFF) &&
- xdl_optimize_ctxs(&cf, &xe->xdf1, &xe->xdf2) < 0) {
-
- xdl_free_ctx(&xe->xdf2);
- xdl_free_ctx(&xe->xdf1);
- xdl_free_classifier(&cf);
- return -1;
- }
-
- xdl_free_classifier(&cf);
-
- return 0;
-}
-
-
void xdl_free_env(xdfenv_t *xe) {
xdl_free_ctx(&xe->xdf2);
@@ -300,15 +193,15 @@ void xdl_free_env(xdfenv_t *xe) {
}
-static int xdl_clean_mmatch(char const *dis, long i, long s, long e) {
+static bool xdl_clean_mmatch(uint8_t const *action, long i, long s, long e) {
long r, rdis0, rpdis0, rdis1, rpdis1;
/*
- * Limits the window the is examined during the similar-lines
- * scan. The loops below stops when dis[i - r] == 1 (line that
- * has no match), but there are corner cases where the loop
- * proceed all the way to the extremities by causing huge
- * performance penalties in case of big files.
+ * Limits the window that is examined during the similar-lines
+ * scan. The loops below stops when action[i - r] == KEEP
+ * (line that has no match), but there are corner cases where
+ * the loop proceed all the way to the extremities by causing
+ * huge performance penalties in case of big files.
*/
if (i - s > XDL_SIMSCAN_WINDOW)
s = i - XDL_SIMSCAN_WINDOW;
@@ -317,40 +210,47 @@ static int xdl_clean_mmatch(char const *dis, long i, long s, long e) {
/*
* Scans the lines before 'i' to find a run of lines that either
- * have no match (dis[j] == 0) or have multiple matches (dis[j] > 1).
- * Note that we always call this function with dis[i] > 1, so the
- * current line (i) is already a multimatch line.
+ * have no match (action[j] == DISCARD) or have multiple matches
+ * (action[j] == INVESTIGATE). Note that we always call this
+ * function with action[i] == INVESTIGATE, so the current line
+ * (i) is already a multimatch line.
*/
for (r = 1, rdis0 = 0, rpdis0 = 1; (i - r) >= s; r++) {
- if (!dis[i - r])
+ if (action[i - r] == DISCARD)
rdis0++;
- else if (dis[i - r] == 2)
+ else if (action[i - r] == INVESTIGATE)
rpdis0++;
- else
+ else if (action[i - r] == KEEP)
break;
+ else
+ BUG("Illegal value for action[i - r]");
}
/*
- * If the run before the line 'i' found only multimatch lines, we
- * return 0 and hence we don't make the current line (i) discarded.
- * We want to discard multimatch lines only when they appear in the
- * middle of runs with nomatch lines (dis[j] == 0).
+ * If the run before the line 'i' found only multimatch lines,
+ * we return false and hence we don't make the current line (i)
+ * discarded. We want to discard multimatch lines only when
+ * they appear in the middle of runs with nomatch lines
+ * (action[j] == DISCARD).
*/
if (rdis0 == 0)
return 0;
for (r = 1, rdis1 = 0, rpdis1 = 1; (i + r) <= e; r++) {
- if (!dis[i + r])
+ if (action[i + r] == DISCARD)
rdis1++;
- else if (dis[i + r] == 2)
+ else if (action[i + r] == INVESTIGATE)
rpdis1++;
- else
+ else if (action[i + r] == KEEP)
break;
+ else
+ BUG("Illegal value for action[i + r]");
}
/*
- * If the run after the line 'i' found only multimatch lines, we
- * return 0 and hence we don't make the current line (i) discarded.
+ * If the run after the line 'i' found only multimatch lines,
+ * we return false and hence we don't make the current line (i)
+ * discarded.
*/
if (rdis1 == 0)
- return 0;
+ return false;
rdis1 += rdis0;
rpdis1 += rpdis0;
@@ -361,63 +261,81 @@ static int xdl_clean_mmatch(char const *dis, long i, long s, long e) {
/*
* Try to reduce the problem complexity, discard records that have no
* matches on the other file. Also, lines that have multiple matches
- * might be potentially discarded if they happear in a run of discardable.
+ * might be potentially discarded if they appear in a run of discardable.
*/
static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) {
long i, nm, nreff, mlim;
- xrecord_t **recs;
+ xrecord_t *recs;
xdlclass_t *rcrec;
- char *dis, *dis1, *dis2;
- int need_min = !!(cf->flags & XDF_NEED_MINIMAL);
+ uint8_t *action1 = NULL, *action2 = NULL;
+ bool need_min = !!(cf->flags & XDF_NEED_MINIMAL);
+ int ret = 0;
- if (!XDL_CALLOC_ARRAY(dis, xdf1->nrec + xdf2->nrec + 2))
- return -1;
- dis1 = dis;
- dis2 = dis1 + xdf1->nrec + 1;
+ /*
+ * Create temporary arrays that will help us decide if
+ * changed[i] should remain false, or become true.
+ */
+ if (!XDL_CALLOC_ARRAY(action1, xdf1->nrec + 1)) {
+ ret = -1;
+ goto cleanup;
+ }
+ if (!XDL_CALLOC_ARRAY(action2, xdf2->nrec + 1)) {
+ ret = -1;
+ goto cleanup;
+ }
+ /*
+ * Initialize temporary arrays with DISCARD, KEEP, or INVESTIGATE.
+ */
if ((mlim = xdl_bogosqrt(xdf1->nrec)) > XDL_MAX_EQLIMIT)
mlim = XDL_MAX_EQLIMIT;
for (i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart]; i <= xdf1->dend; i++, recs++) {
- rcrec = cf->rcrecs[(*recs)->ha];
+ rcrec = cf->rcrecs[recs->ha];
nm = rcrec ? rcrec->len2 : 0;
- dis1[i] = (nm == 0) ? 0: (nm >= mlim && !need_min) ? 2: 1;
+ action1[i] = (nm == 0) ? DISCARD: (nm >= mlim && !need_min) ? INVESTIGATE: KEEP;
}
if ((mlim = xdl_bogosqrt(xdf2->nrec)) > XDL_MAX_EQLIMIT)
mlim = XDL_MAX_EQLIMIT;
for (i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart]; i <= xdf2->dend; i++, recs++) {
- rcrec = cf->rcrecs[(*recs)->ha];
+ rcrec = cf->rcrecs[recs->ha];
nm = rcrec ? rcrec->len1 : 0;
- dis2[i] = (nm == 0) ? 0: (nm >= mlim && !need_min) ? 2: 1;
+ action2[i] = (nm == 0) ? DISCARD: (nm >= mlim && !need_min) ? INVESTIGATE: KEEP;
}
+ /*
+ * Use temporary arrays to decide if changed[i] should remain
+ * false, or become true.
+ */
for (nreff = 0, i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart];
i <= xdf1->dend; i++, recs++) {
- if (dis1[i] == 1 ||
- (dis1[i] == 2 && !xdl_clean_mmatch(dis1, i, xdf1->dstart, xdf1->dend))) {
- xdf1->rindex[nreff] = i;
- xdf1->ha[nreff] = (*recs)->ha;
- nreff++;
+ if (action1[i] == KEEP ||
+ (action1[i] == INVESTIGATE && !xdl_clean_mmatch(action1, i, xdf1->dstart, xdf1->dend))) {
+ xdf1->rindex[nreff++] = i;
+ /* changed[i] remains false, i.e. keep */
} else
- xdf1->rchg[i] = 1;
+ xdf1->changed[i] = true;
+ /* i.e. discard */
}
xdf1->nreff = nreff;
for (nreff = 0, i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart];
i <= xdf2->dend; i++, recs++) {
- if (dis2[i] == 1 ||
- (dis2[i] == 2 && !xdl_clean_mmatch(dis2, i, xdf2->dstart, xdf2->dend))) {
- xdf2->rindex[nreff] = i;
- xdf2->ha[nreff] = (*recs)->ha;
- nreff++;
+ if (action2[i] == KEEP ||
+ (action2[i] == INVESTIGATE && !xdl_clean_mmatch(action2, i, xdf2->dstart, xdf2->dend))) {
+ xdf2->rindex[nreff++] = i;
+ /* changed[i] remains false, i.e. keep */
} else
- xdf2->rchg[i] = 1;
+ xdf2->changed[i] = true;
+ /* i.e. discard */
}
xdf2->nreff = nreff;
- xdl_free(dis);
+cleanup:
+ xdl_free(action1);
+ xdl_free(action2);
- return 0;
+ return ret;
}
@@ -426,13 +344,13 @@ static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xd
*/
static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2) {
long i, lim;
- xrecord_t **recs1, **recs2;
+ xrecord_t *recs1, *recs2;
recs1 = xdf1->recs;
recs2 = xdf2->recs;
for (i = 0, lim = XDL_MIN(xdf1->nrec, xdf2->nrec); i < lim;
i++, recs1++, recs2++)
- if ((*recs1)->ha != (*recs2)->ha)
+ if (recs1->ha != recs2->ha)
break;
xdf1->dstart = xdf2->dstart = i;
@@ -440,7 +358,7 @@ static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2) {
recs1 = xdf1->recs + xdf1->nrec - 1;
recs2 = xdf2->recs + xdf2->nrec - 1;
for (lim -= i, i = 0; i < lim; i++, recs1--, recs2--)
- if ((*recs1)->ha != (*recs2)->ha)
+ if (recs1->ha != recs2->ha)
break;
xdf1->dend = xdf1->nrec - i - 1;
@@ -460,3 +378,53 @@ static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2
return 0;
}
+
+int xdl_prepare_env(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
+ xdfenv_t *xe) {
+ long enl1, enl2, sample;
+ xdlclassifier_t cf;
+
+ memset(&cf, 0, sizeof(cf));
+
+ /*
+ * For histogram diff, we can afford a smaller sample size and
+ * thus a poorer estimate of the number of lines, as the hash
+ * table (rhash) won't be filled up/grown. The number of lines
+ * (nrecs) will be updated correctly anyway by
+ * xdl_prepare_ctx().
+ */
+ sample = (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF
+ ? XDL_GUESS_NLINES2 : XDL_GUESS_NLINES1);
+
+ enl1 = xdl_guess_lines(mf1, sample) + 1;
+ enl2 = xdl_guess_lines(mf2, sample) + 1;
+
+ if (xdl_init_classifier(&cf, enl1 + enl2 + 1, xpp->flags) < 0)
+ return -1;
+
+ if (xdl_prepare_ctx(1, mf1, enl1, xpp, &cf, &xe->xdf1) < 0) {
+
+ xdl_free_classifier(&cf);
+ return -1;
+ }
+ if (xdl_prepare_ctx(2, mf2, enl2, xpp, &cf, &xe->xdf2) < 0) {
+
+ xdl_free_ctx(&xe->xdf1);
+ xdl_free_classifier(&cf);
+ return -1;
+ }
+
+ if ((XDF_DIFF_ALG(xpp->flags) != XDF_PATIENCE_DIFF) &&
+ (XDF_DIFF_ALG(xpp->flags) != XDF_HISTOGRAM_DIFF) &&
+ xdl_optimize_ctxs(&cf, &xe->xdf1, &xe->xdf2) < 0) {
+
+ xdl_free_ctx(&xe->xdf2);
+ xdl_free_ctx(&xe->xdf1);
+ xdl_free_classifier(&cf);
+ return -1;
+ }
+
+ xdl_free_classifier(&cf);
+
+ return 0;
+}
diff --git a/xdiff/xtypes.h b/xdiff/xtypes.h
index 8442bd436e..f145abba3e 100644
--- a/xdiff/xtypes.h
+++ b/xdiff/xtypes.h
@@ -39,23 +39,18 @@ typedef struct s_chastore {
} chastore_t;
typedef struct s_xrecord {
- struct s_xrecord *next;
char const *ptr;
long size;
unsigned long ha;
} xrecord_t;
typedef struct s_xdfile {
- chastore_t rcha;
+ xrecord_t *recs;
long nrec;
- unsigned int hbits;
- xrecord_t **rhash;
long dstart, dend;
- xrecord_t **recs;
- char *rchg;
+ bool *changed;
long *rindex;
long nreff;
- unsigned long *ha;
} xdfile_t;
typedef struct s_xdfenv {
diff --git a/xdiff/xutils.c b/xdiff/xutils.c
index 78d1cf74b1..447e66c719 100644
--- a/xdiff/xutils.c
+++ b/xdiff/xutils.c
@@ -464,17 +464,17 @@ int xdl_fall_back_diff(xdfenv_t *diff_env, xpparam_t const *xpp,
mmfile_t subfile1, subfile2;
xdfenv_t env;
- subfile1.ptr = (char *)diff_env->xdf1.recs[line1 - 1]->ptr;
- subfile1.size = diff_env->xdf1.recs[line1 + count1 - 2]->ptr +
- diff_env->xdf1.recs[line1 + count1 - 2]->size - subfile1.ptr;
- subfile2.ptr = (char *)diff_env->xdf2.recs[line2 - 1]->ptr;
- subfile2.size = diff_env->xdf2.recs[line2 + count2 - 2]->ptr +
- diff_env->xdf2.recs[line2 + count2 - 2]->size - subfile2.ptr;
+ subfile1.ptr = (char *)diff_env->xdf1.recs[line1 - 1].ptr;
+ subfile1.size = diff_env->xdf1.recs[line1 + count1 - 2].ptr +
+ diff_env->xdf1.recs[line1 + count1 - 2].size - subfile1.ptr;
+ subfile2.ptr = (char *)diff_env->xdf2.recs[line2 - 1].ptr;
+ subfile2.size = diff_env->xdf2.recs[line2 + count2 - 2].ptr +
+ diff_env->xdf2.recs[line2 + count2 - 2].size - subfile2.ptr;
if (xdl_do_diff(&subfile1, &subfile2, xpp, &env) < 0)
return -1;
- memcpy(diff_env->xdf1.rchg + line1 - 1, env.xdf1.rchg, count1);
- memcpy(diff_env->xdf2.rchg + line2 - 1, env.xdf2.rchg, count2);
+ memcpy(diff_env->xdf1.changed + line1 - 1, env.xdf1.changed, count1);
+ memcpy(diff_env->xdf2.changed + line2 - 1, env.xdf2.changed, count2);
xdl_free_env(&env);