aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/main.yml28
-rw-r--r--Documentation/MyFirstContribution.adoc5
-rw-r--r--Documentation/RelNotes/2.52.0.adoc48
-rw-r--r--Documentation/SubmittingPatches28
-rw-r--r--Documentation/config/maintenance.adoc49
-rw-r--r--Documentation/git-bisect.adoc43
-rw-r--r--Documentation/git-checkout.adoc4
-rw-r--r--Documentation/git-patch-id.adoc22
-rw-r--r--Documentation/git-repo.adoc30
-rw-r--r--Documentation/gitcli.adoc2
-rw-r--r--Documentation/gitignore.adoc5
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--add-patch.c13
-rw-r--r--builtin/bisect.c21
-rw-r--r--builtin/fast-export.c79
-rw-r--r--builtin/fast-import.c284
-rw-r--r--builtin/gc.c313
-rw-r--r--builtin/pack-objects.c37
-rw-r--r--builtin/repo.c380
-rw-r--r--commit-reach.c14
-rw-r--r--config.c2
-rw-r--r--contrib/credential/libsecret/Makefile7
-rw-r--r--contrib/credential/osxkeychain/Makefile7
-rw-r--r--diff.c57
-rw-r--r--diff.h2
-rw-r--r--dir.c17
-rw-r--r--gpg-interface.c4
-rw-r--r--gpg-interface.h6
-rw-r--r--http-push.c6
-rw-r--r--http-walker.c26
-rw-r--r--http.c21
-rw-r--r--http.h5
-rw-r--r--midx.c2
-rw-r--r--packfile.c224
-rw-r--r--packfile.h70
-rw-r--r--parse-options.c8
-rw-r--r--ref-filter.c6
-rw-r--r--ref-filter.h2
-rw-r--r--refs/debug.c9
-rw-r--r--refs/files-backend.c2
-rw-r--r--refs/reftable-backend.c2
-rw-r--r--t/helper/test-delete-gpgsig.c7
-rw-r--r--t/lib-gpg.sh1
-rw-r--r--t/meson.build1
-rwxr-xr-xt/perf/p6010-merge-base.sh101
-rwxr-xr-xt/t0008-ignores.sh11
-rw-r--r--t/t0450/adoc-help-mismatches1
-rwxr-xr-xt/t1016-compatObjectFormat.sh6
-rwxr-xr-xt/t1016/gpg2
-rwxr-xr-xt/t1901-repo-structure.sh129
-rwxr-xr-xt/t3070-wildmatch.sh2
-rwxr-xr-xt/t3701-add-interactive.sh11
-rwxr-xr-xt/t4020-diff-external.sh10
-rwxr-xr-xt/t7900-maintenance.sh245
-rwxr-xr-xt/t9300-fast-import.sh20
55 files changed, 1875 insertions, 564 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index cc54824c38..816d5a34c4 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -123,7 +123,7 @@ jobs:
- name: zip up tracked files
run: git archive -o artifacts/tracked.tar.gz HEAD
- name: upload tracked files and build artifacts
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v5
with:
name: windows-artifacts
path: artifacts
@@ -140,7 +140,7 @@ jobs:
cancel-in-progress: ${{ needs.ci-config.outputs.skip_concurrent == 'yes' }}
steps:
- name: download tracked files and build artifacts
- uses: actions/download-artifact@v5
+ uses: actions/download-artifact@v6
with:
name: windows-artifacts
path: ${{github.workspace}}
@@ -157,7 +157,7 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v5
with:
name: failed-tests-windows-${{ matrix.nr }}
path: ${{env.FAILED_TEST_ARTIFACTS}}
@@ -208,7 +208,7 @@ jobs:
- name: zip up tracked files
run: git archive -o artifacts/tracked.tar.gz HEAD
- name: upload tracked files and build artifacts
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v5
with:
name: vs-artifacts
path: artifacts
@@ -226,7 +226,7 @@ jobs:
steps:
- uses: git-for-windows/setup-git-for-windows-sdk@v1
- name: download tracked files and build artifacts
- uses: actions/download-artifact@v5
+ uses: actions/download-artifact@v6
with:
name: vs-artifacts
path: ${{github.workspace}}
@@ -244,7 +244,7 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v5
with:
name: failed-tests-windows-vs-${{ matrix.nr }}
path: ${{env.FAILED_TEST_ARTIFACTS}}
@@ -270,7 +270,7 @@ jobs:
shell: pwsh
run: meson compile -C build
- name: Upload build artifacts
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v5
with:
name: windows-meson-artifacts
path: build
@@ -292,7 +292,7 @@ jobs:
shell: pwsh
run: pip install meson ninja
- name: Download build artifacts
- uses: actions/download-artifact@v5
+ uses: actions/download-artifact@v6
with:
name: windows-meson-artifacts
path: build
@@ -313,16 +313,16 @@ jobs:
vector:
- jobname: osx-clang
cc: clang
- pool: macos-13
+ pool: macos-14
- jobname: osx-reftable
cc: clang
- pool: macos-13
+ pool: macos-14
- jobname: osx-gcc
cc: gcc-13
- pool: macos-13
+ pool: macos-14
- jobname: osx-meson
cc: clang
- pool: macos-13
+ pool: macos-14
env:
CC: ${{matrix.vector.cc}}
CC_PACKAGE: ${{matrix.vector.cc_package}}
@@ -339,7 +339,7 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v5
with:
name: failed-tests-${{matrix.vector.jobname}}
path: ${{env.FAILED_TEST_ARTIFACTS}}
@@ -439,7 +439,7 @@ jobs:
run: sudo --preserve-env --set-home --user=builder ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v5
with:
name: failed-tests-${{matrix.vector.jobname}}
path: ${{env.FAILED_TEST_ARTIFACTS}}
diff --git a/Documentation/MyFirstContribution.adoc b/Documentation/MyFirstContribution.adoc
index 02ba8ba5f6..f186dfbc89 100644
--- a/Documentation/MyFirstContribution.adoc
+++ b/Documentation/MyFirstContribution.adoc
@@ -1153,6 +1153,11 @@ NOTE: When you are sending a real patch, it will go to git@vger.kernel.org - but
please don't send your patchset from the tutorial to the real mailing list! For
now, you can send it to yourself, to make sure you understand how it will look.
+NOTE: After sending your patches, you can confirm that they reached the mailing
+list by visiting https://lore.kernel.org/git/. Use the search bar to find your
+name or the subject of your patch. If it appears, your email was successfully
+delivered.
+
After you run the command above, you will be presented with an interactive
prompt for each patch that's about to go out. This gives you one last chance to
edit or quit sending something (but again, don't edit code this way). Once you
diff --git a/Documentation/RelNotes/2.52.0.adoc b/Documentation/RelNotes/2.52.0.adoc
index a86e2c09e0..6c0e7d05c0 100644
--- a/Documentation/RelNotes/2.52.0.adoc
+++ b/Documentation/RelNotes/2.52.0.adoc
@@ -70,6 +70,15 @@ UI, Workflows & Features
* "Symlink symref" has been added to the list of things that will
disappear at Git 3.0 boundary.
+ * "git maintenance" command learns the "geometric" strategy where it
+ avoids doing maintenance tasks that rebuilds everything from
+ scratch.
+
+ * "git repo structure", a new command.
+
+ * The help text and manual page of "git bisect" command have been
+ made consistent with each other.
+
Performance, Internal Implementation, Development Support etc.
--------------------------------------------------------------
@@ -158,6 +167,21 @@ Performance, Internal Implementation, Development Support etc.
* Two slightly different ways to get at "all the packfiles" in API
has been cleaned up.
+ * The code to walk revision graph to compute merge base has been
+ optimized.
+
+ * AI guidelines has been added to our documentation set.
+
+ * Contributed credential helpers (obviously in contrib/) now have "cd
+ $there && make install" target.
+
+ * The "MyFirstContribution" tutorial tells the reader how to send out
+ their patches; the section gained a hint to verify the message
+ reached the mailing list.
+
+ * The "debug" ref-backend was missing a method implementation, which
+ has been corrected.
+
Fixes since v2.51
-----------------
@@ -382,6 +406,27 @@ including security updates, are included in this release.
and "git bisect unknown", which has been corrected.
(merge 2bb3a012f3 rz/bisect-help-unknown later to maint).
+ * The 'q'(uit) command in "git add -p" has been improved to quit
+ without doing any meaningless work before leaving, and giving EOF
+ (typically control-D) to the prompt is made to behave the same way.
+
+ * The wildmatch code had a corner case bug that mistakenly makes
+ "foo**/bar" match with "foobar", which has been corrected.
+ (merge 1940a02dc1 jk/match-pathname-fix later to maint).
+
+ * Tests did not set up GNUPGHOME correctly, which is fixed but some
+ flaky tests are exposed in t1016, which needs to be addressed
+ before this topic can move forward.
+ (merge 6cd8369ef3 tz/test-prepare-gnupghome later to maint).
+
+ * The patterns used in the .gitignore files use backslash in the way
+ documented for fnmatch(3); document as such to reduce confusion.
+ (merge 8a6d158a1d jk/doc-backslash-in-exclude later to maint).
+
+ * The version of macos image used in GitHub CI has been updated to
+ macos-14, as the macos-13 that we have been using got deprecated.
+ (merge 73b9cdb7c4 jc/ci-use-macos-14 later to maint).
+
* Other code cleanup, docfix, build fix, etc.
(merge 529a60a885 ua/t1517-short-help-tests later to maint).
(merge 22d421fed9 ac/deglobal-fmt-merge-log-config later to maint).
@@ -393,3 +438,6 @@ including security updates, are included in this release.
(merge a66fc22bf9 rs/get-oid-with-flags-cleanup later to maint).
(merge 15b8abde07 js/mingw-includes-cleanup later to maint).
(merge 2cebca0582 tb/cat-file-objectmode-update later to maint).
+ (merge 8f487db07a kh/doc-patch-id-1 later to maint).
+ (merge f711f37b05 eb/t1016-hash-transition-fix later to maint).
+ (merge 85333aa1af jk/test-delete-gpgsig-leakfix later to maint).
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index d620bd93bd..e270ccbe85 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -446,6 +446,34 @@ highlighted above.
Only capitalize the very first letter of the trailer, i.e. favor
"Signed-off-by" over "Signed-Off-By" and "Acked-by:" over "Acked-By".
+[[ai]]
+=== Use of Artificial Intelligence (AI)
+
+The Developer's Certificate of Origin requires contributors to certify
+that they know the origin of their contributions to the project and
+that they have the right to submit it under the project's license.
+It's not yet clear that this can be legally satisfied when submitting
+significant amount of content that has been generated by AI tools.
+
+Another issue with AI generated content is that AIs still often
+hallucinate or just produce bad code, commit messages, documentation
+or output, even when you point out their mistakes.
+
+To avoid these issues, we will reject anything that looks AI
+generated, that sounds overly formal or bloated, that looks like AI
+slop, that looks good on the surface but makes no sense, or that
+senders don’t understand or cannot explain.
+
+We strongly recommend using AI tools carefully and responsibly.
+
+Contributors would often benefit more from AI by using it to guide and
+help them step by step towards producing a solution by themselves
+rather than by asking for a full solution that they would then mostly
+copy-paste. They can also use AI to help with debugging, or with
+checking for obvious mistakes, things that can be improved, things
+that don’t match our style, guidelines or our feedback, before sending
+it to us.
+
[[git-tools]]
=== Generate your patch using Git tools out of your commits.
diff --git a/Documentation/config/maintenance.adoc b/Documentation/config/maintenance.adoc
index 2f71934218..d0c38f03fa 100644
--- a/Documentation/config/maintenance.adoc
+++ b/Documentation/config/maintenance.adoc
@@ -16,19 +16,36 @@ detach.
maintenance.strategy::
This string config option provides a way to specify one of a few
- recommended schedules for background maintenance. This only affects
- which tasks are run during `git maintenance run --schedule=X`
- commands, provided no `--task=<task>` arguments are provided.
- Further, if a `maintenance.<task>.schedule` config value is set,
- then that value is used instead of the one provided by
- `maintenance.strategy`. The possible strategy strings are:
+ recommended strategies for repository maintenance. This affects
+ which tasks are run during `git maintenance run`, provided no
+ `--task=<task>` arguments are provided. This setting impacts manual
+ maintenance, auto-maintenance as well as scheduled maintenance. The
+ tasks that run may be different depending on the maintenance type.
+
-* `none`: This default setting implies no tasks are run at any schedule.
+The maintenance strategy can be further tweaked by setting
+`maintenance.<task>.enabled` and `maintenance.<task>.schedule`. If set, these
+values are used instead of the defaults provided by `maintenance.strategy`.
++
+The possible strategies are:
++
+* `none`: This strategy implies no tasks are run at all. This is the default
+ strategy for scheduled maintenance.
+* `gc`: This strategy runs the `gc` task. This is the default strategy for
+ manual maintenance.
+* `geometric`: This strategy performs geometric repacking of packfiles and
+ keeps auxiliary data structures up-to-date. The strategy expires data in the
+ reflog and removes worktrees that cannot be located anymore. When the
+ geometric repacking strategy would decide to do an all-into-one repack, then
+ the strategy generates a cruft pack for all unreachable objects. Objects that
+ are already part of a cruft pack will be expired.
++
+This repacking strategy is a full replacement for the `gc` strategy and is
+recommended for large repositories.
* `incremental`: This setting optimizes for performing small maintenance
activities that do not delete any data. This does not schedule the `gc`
task, but runs the `prefetch` and `commit-graph` tasks hourly, the
`loose-objects` and `incremental-repack` tasks daily, and the `pack-refs`
- task weekly.
+ task weekly. Manual repository maintenance uses the `gc` task.
maintenance.<task>.enabled::
This boolean config option controls whether the maintenance task
@@ -75,6 +92,22 @@ maintenance.incremental-repack.auto::
number of pack-files not in the multi-pack-index is at least the value
of `maintenance.incremental-repack.auto`. The default value is 10.
+maintenance.geometric-repack.auto::
+ This integer config option controls how often the `geometric-repack`
+ task should be run as part of `git maintenance run --auto`. If zero,
+ then the `geometric-repack` task will not run with the `--auto`
+ option. A negative value will force the task to run every time.
+ Otherwise, a positive value implies the command should run either when
+ there are packfiles that need to be merged together to retain the
+ geometric progression, or when there are at least this many loose
+ objects that would be written into a new packfile. The default value is
+ 100.
+
+maintenance.geometric-repack.splitFactor::
+ This integer config option controls the factor used for the geometric
+ sequence. See the `--geometric=` option in linkgit:git-repack[1] for
+ more details. Defaults to `2`.
+
maintenance.reflog-expire.auto::
This integer config option controls how often the `reflog-expire` task
should be run as part of `git maintenance run --auto`. If zero, then
diff --git a/Documentation/git-bisect.adoc b/Documentation/git-bisect.adoc
index 58dbb74a15..b0078dda0e 100644
--- a/Documentation/git-bisect.adoc
+++ b/Documentation/git-bisect.adoc
@@ -9,26 +9,22 @@ git-bisect - Use binary search to find the commit that introduced a bug
SYNOPSIS
--------
[verse]
-'git bisect' <subcommand> <options>
+'git bisect' start [--term-(bad|new)=<term-new> --term-(good|old)=<term-old>]
+ [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<pathspec>...]
+'git bisect' (bad|new|<term-new>) [<rev>]
+'git bisect' (good|old|<term-old>) [<rev>...]
+'git bisect' terms [--term-(good|old) | --term-(bad|new)]
+'git bisect' skip [(<rev>|<range>)...]
+'git bisect' next
+'git bisect' reset [<commit>]
+'git bisect' (visualize|view)
+'git bisect' replay <logfile>
+'git bisect' log
+'git bisect' run <cmd> [<arg>...]
+'git bisect' help
DESCRIPTION
-----------
-The command takes various subcommands, and different options depending
-on the subcommand:
-
- git bisect start [--term-(bad|new)=<term-new> --term-(good|old)=<term-old>]
- [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<pathspec>...]
- git bisect (bad|new|<term-new>) [<rev>]
- git bisect (good|old|<term-old>) [<rev>...]
- git bisect terms [--term-(good|old) | --term-(bad|new)]
- git bisect skip [(<rev>|<range>)...]
- git bisect reset [<commit>]
- git bisect (visualize|view)
- git bisect replay <logfile>
- git bisect log
- git bisect run <cmd> [<arg>...]
- git bisect help
-
This command uses a binary search algorithm to find which commit in
your project's history introduced a bug. You use it by first telling
it a "bad" commit that is known to contain the bug, and a "good"
@@ -295,6 +291,19 @@ $ git bisect skip v2.5 v2.5..v2.6
This tells the bisect process that the commits between `v2.5` and
`v2.6` (inclusive) should be skipped.
+Bisect next
+~~~~~~~~~~~
+
+Normally, after marking a revision as good or bad, Git automatically
+computes and checks out the next revision to test. However, if you need to
+explicitly request the next bisection step, you can use:
+
+------------
+$ git bisect next
+------------
+
+You might use this to resume the bisection process after interrupting it
+by checking out a different revision.
Cutting down bisection by giving more parameters to bisect start
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/git-checkout.adoc b/Documentation/git-checkout.adoc
index 431185ca0b..6f281b298e 100644
--- a/Documentation/git-checkout.adoc
+++ b/Documentation/git-checkout.adoc
@@ -61,7 +61,7 @@ uncommitted changes.
`git checkout -B <branch> [<start-point>]`::
The same as `-b`, except that if the branch already exists it
- resets `_<branch>_` to the start point instead of failing.
+ resets _<branch>_ to the start point instead of failing.
`git checkout --detach [<branch>]`::
`git checkout [--detach] <commit>`::
@@ -155,7 +155,7 @@ of it").
`-B <new-branch>`::
The same as `-b`, except that if the branch already exists it
- resets `_<branch>_` to the start point instead of failing.
+ resets _<branch>_ to the start point instead of failing.
`-t`::
`--track[=(direct|inherit)]`::
diff --git a/Documentation/git-patch-id.adoc b/Documentation/git-patch-id.adoc
index 45da0f27ac..92a1af36a2 100644
--- a/Documentation/git-patch-id.adoc
+++ b/Documentation/git-patch-id.adoc
@@ -7,8 +7,8 @@ git-patch-id - Compute unique ID for a patch
SYNOPSIS
--------
-[verse]
-'git patch-id' [--stable | --unstable | --verbatim]
+[synopsis]
+git patch-id [--stable | --unstable | --verbatim]
DESCRIPTION
-----------
@@ -21,7 +21,7 @@ the same time also reasonably unique, i.e., two patches that have the same
The main usecase for this command is to look for likely duplicate commits.
-When dealing with 'git diff-tree' output, it takes advantage of
+When dealing with `git diff-tree` output, it takes advantage of
the fact that the patch is prefixed with the object name of the
commit, and outputs two 40-byte hexadecimal strings. The first
string is the patch ID, and the second string is the commit ID.
@@ -30,35 +30,35 @@ This can be used to make a mapping from patch ID to commit ID.
OPTIONS
-------
---verbatim::
+`--verbatim`::
Calculate the patch-id of the input as it is given, do not strip
any whitespace.
+
-This is the default if patchid.verbatim is true.
+This is the default if `patchid.verbatim` is `true`.
---stable::
+`--stable`::
Use a "stable" sum of hashes as the patch ID. With this option:
+
--
- Reordering file diffs that make up a patch does not affect the ID.
In particular, two patches produced by comparing the same two trees
- with two different settings for "-O<orderfile>" result in the same
+ with two different settings for `-O<orderfile>` result in the same
patch ID signature, thereby allowing the computed result to be used
as a key to index some meta-information about the change between
the two trees;
- Result is different from the value produced by git 1.9 and older
- or produced when an "unstable" hash (see --unstable below) is
+ or produced when an "unstable" hash (see `--unstable` below) is
configured - even when used on a diff output taken without any use
- of "-O<orderfile>", thereby making existing databases storing such
+ of `-O<orderfile>`, thereby making existing databases storing such
"unstable" or historical patch-ids unusable.
- All whitespace within the patch is ignored and does not affect the id.
--
+
-This is the default if patchid.stable is set to true.
+This is the default if `patchid.stable` is set to `true`.
---unstable::
+`--unstable`::
Use an "unstable" hash as the patch ID. With this option,
the result produced is compatible with the patch-id value produced
by git 1.9 and older and whitespace is ignored. Users with pre-existing
diff --git a/Documentation/git-repo.adoc b/Documentation/git-repo.adoc
index 209afd1b61..ce43cb19c8 100644
--- a/Documentation/git-repo.adoc
+++ b/Documentation/git-repo.adoc
@@ -9,6 +9,7 @@ SYNOPSIS
--------
[synopsis]
git repo info [--format=(keyvalue|nul)] [-z] [<key>...]
+git repo structure [--format=(table|keyvalue|nul)]
DESCRIPTION
-----------
@@ -43,6 +44,35 @@ supported:
+
`-z` is an alias for `--format=nul`.
+`structure [--format=(table|keyvalue|nul)]`::
+ Retrieve statistics about the current repository structure. The
+ following kinds of information are reported:
++
+* Reference counts categorized by type
+* Reachable object counts categorized by type
+
++
+The output format can be chosen through the flag `--format`. Three formats are
+supported:
++
+`table`:::
+ Outputs repository stats in a human-friendly table. This format may
+ change and is not intended for machine parsing. This is the default
+ format.
+
+`keyvalue`:::
+ Each line of output contains a key-value pair for a repository stat.
+ The '=' character is used to delimit between the key and the value.
+ Values containing "unusual" characters are quoted as explained for the
+ configuration variable `core.quotePath` (see linkgit:git-config[1]).
+
+`nul`:::
+ Similar to `keyvalue`, but uses a NUL character to delimit between
+ key-value pairs instead of a newline. Also uses a newline character as
+ the delimiter between the key and value instead of '='. Unlike the
+ `keyvalue` format, values containing "unusual" characters are never
+ quoted.
+
INFO KEYS
---------
In order to obtain a set of values from `git repo info`, you should provide
diff --git a/Documentation/gitcli.adoc b/Documentation/gitcli.adoc
index ef2a0a399d..6815d6bfb7 100644
--- a/Documentation/gitcli.adoc
+++ b/Documentation/gitcli.adoc
@@ -223,7 +223,7 @@ Options that take a filename allow a prefix `:(optional)`. For example:
----------------------------
git commit -F :(optional)COMMIT_EDITMSG
-# if COMMIT_EDITMSG does not exist, equivalent to
+# if COMMIT_EDITMSG does not exist, the above is equivalent to
git commit
----------------------------
diff --git a/Documentation/gitignore.adoc b/Documentation/gitignore.adoc
index 5e0964ef41..9fccab4ae8 100644
--- a/Documentation/gitignore.adoc
+++ b/Documentation/gitignore.adoc
@@ -111,6 +111,11 @@ PATTERN FORMAT
one of the characters in a range. See fnmatch(3) and the
FNM_PATHNAME flag for a more detailed description.
+ - A backslash ("`\`") can be used to escape any character. E.g., "`\*`"
+ matches a literal asterisk (and "`\a`" matches "`a`", even though
+ there is no need for escaping there). As with fnmatch(3), a backslash
+ at the end of a pattern is an invalid pattern that never matches.
+
Two consecutive asterisks ("`**`") in patterns matched against
full pathname may have special meaning:
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index b16db85e77..4929570f2c 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,6 +1,6 @@
#!/bin/sh
-DEF_VER=v2.51.GIT
+DEF_VER=v2.52.0-rc1
LF='
'
diff --git a/add-patch.c b/add-patch.c
index ae9a20d8f2..173a53241e 100644
--- a/add-patch.c
+++ b/add-patch.c
@@ -1569,8 +1569,10 @@ static int patch_update_file(struct add_p_state *s,
if (*s->s.reset_color_interactive)
fputs(s->s.reset_color_interactive, stdout);
fflush(stdout);
- if (read_single_character(s) == EOF)
+ if (read_single_character(s) == EOF) {
+ quit = 1;
break;
+ }
if (!s->answer.len)
continue;
@@ -1601,7 +1603,7 @@ soft_increment:
} else if (hunk->use == UNDECIDED_HUNK) {
hunk->use = USE_HUNK;
}
- } else if (ch == 'd' || ch == 'q') {
+ } else if (ch == 'd') {
if (file_diff->hunk_nr) {
for (; hunk_index < file_diff->hunk_nr; hunk_index++) {
hunk = file_diff->hunk + hunk_index;
@@ -1613,10 +1615,9 @@ soft_increment:
} else if (hunk->use == UNDECIDED_HUNK) {
hunk->use = SKIP_HUNK;
}
- if (ch == 'q') {
- quit = 1;
- break;
- }
+ } else if (ch == 'q') {
+ quit = 1;
+ break;
} else if (s->answer.buf[0] == 'K') {
if (permitted & ALLOW_GOTO_PREVIOUS_HUNK)
hunk_index = dec_mod(hunk_index,
diff --git a/builtin/bisect.c b/builtin/bisect.c
index 993caf545d..ccff4e1a1b 100644
--- a/builtin/bisect.c
+++ b/builtin/bisect.c
@@ -27,13 +27,14 @@ static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT")
static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN")
#define BUILTIN_GIT_BISECT_START_USAGE \
- N_("git bisect start [--term-(new|bad)=<term> --term-(old|good)=<term>]" \
- " [--no-checkout] [--first-parent] [<bad> [<good>...]] [--]" \
- " [<pathspec>...]")
-#define BUILTIN_GIT_BISECT_STATE_USAGE \
- N_("git bisect (good|bad) [<rev>...]")
+ N_("git bisect start [--term-(bad|new)=<term-new> --term-(good|old)=<term-old>]\n" \
+ " [--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<pathspec>...]")
+#define BUILTIN_GIT_BISECT_BAD_USAGE \
+ N_("git bisect (bad|new|<term-new>) [<rev>]")
+#define BUILTIN_GIT_BISECT_GOOD_USAGE \
+ N_("git bisect (good|old|<term-old>) [<rev>...]")
#define BUILTIN_GIT_BISECT_TERMS_USAGE \
- "git bisect terms [--term-good | --term-bad]"
+ "git bisect terms [--term-(good|old) | --term-(bad|new)]"
#define BUILTIN_GIT_BISECT_SKIP_USAGE \
N_("git bisect skip [(<rev>|<range>)...]")
#define BUILTIN_GIT_BISECT_NEXT_USAGE \
@@ -41,17 +42,20 @@ static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN")
#define BUILTIN_GIT_BISECT_RESET_USAGE \
N_("git bisect reset [<commit>]")
#define BUILTIN_GIT_BISECT_VISUALIZE_USAGE \
- "git bisect visualize"
+ "git bisect (visualize|view)"
#define BUILTIN_GIT_BISECT_REPLAY_USAGE \
N_("git bisect replay <logfile>")
#define BUILTIN_GIT_BISECT_LOG_USAGE \
"git bisect log"
#define BUILTIN_GIT_BISECT_RUN_USAGE \
N_("git bisect run <cmd> [<arg>...]")
+#define BUILTIN_GIT_BISECT_HELP_USAGE \
+ "git bisect help"
static const char * const git_bisect_usage[] = {
BUILTIN_GIT_BISECT_START_USAGE,
- BUILTIN_GIT_BISECT_STATE_USAGE,
+ BUILTIN_GIT_BISECT_BAD_USAGE,
+ BUILTIN_GIT_BISECT_GOOD_USAGE,
BUILTIN_GIT_BISECT_TERMS_USAGE,
BUILTIN_GIT_BISECT_SKIP_USAGE,
BUILTIN_GIT_BISECT_NEXT_USAGE,
@@ -60,6 +64,7 @@ static const char * const git_bisect_usage[] = {
BUILTIN_GIT_BISECT_REPLAY_USAGE,
BUILTIN_GIT_BISECT_LOG_USAGE,
BUILTIN_GIT_BISECT_RUN_USAGE,
+ BUILTIN_GIT_BISECT_HELP_USAGE,
NULL
};
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index 7adbc55f0d..0421360ab7 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -65,7 +65,7 @@ static int parse_opt_sign_mode(const struct option *opt,
return 0;
if (parse_sign_mode(arg, val))
- return error("Unknown %s mode: %s", opt->long_name, arg);
+ return error(_("unknown %s mode: %s"), opt->long_name, arg);
return 0;
}
@@ -82,7 +82,7 @@ static int parse_opt_tag_of_filtered_mode(const struct option *opt,
else if (!strcmp(arg, "rewrite"))
*val = REWRITE;
else
- return error("Unknown tag-of-filtered mode: %s", arg);
+ return error(_("unknown tag-of-filtered mode: %s"), arg);
return 0;
}
@@ -107,7 +107,7 @@ static int parse_opt_reencode_mode(const struct option *opt,
if (!strcasecmp(arg, "abort"))
*val = REENCODE_ABORT;
else
- return error("Unknown reencoding mode: %s", arg);
+ return error(_("unknown reencoding mode: %s"), arg);
}
return 0;
@@ -318,16 +318,16 @@ static void export_blob(const struct object_id *oid)
} else {
buf = odb_read_object(the_repository->objects, oid, &type, &size);
if (!buf)
- die("could not read blob %s", oid_to_hex(oid));
+ die(_("could not read blob %s"), oid_to_hex(oid));
if (check_object_signature(the_repository, oid, buf, size,
type) < 0)
- die("oid mismatch in blob %s", oid_to_hex(oid));
+ die(_("oid mismatch in blob %s"), oid_to_hex(oid));
object = parse_object_buffer(the_repository, oid, type,
size, buf, &eaten);
}
if (!object)
- die("Could not read blob %s", oid_to_hex(oid));
+ die(_("could not read blob %s"), oid_to_hex(oid));
mark_next_object(object);
@@ -336,7 +336,7 @@ static void export_blob(const struct object_id *oid)
printf("original-oid %s\n", oid_to_hex(oid));
printf("data %"PRIuMAX"\n", (uintmax_t)size);
if (size && fwrite(buf, size, 1, stdout) != 1)
- die_errno("could not write blob '%s'", oid_to_hex(oid));
+ die_errno(_("could not write blob '%s'"), oid_to_hex(oid));
printf("\n");
show_progress();
@@ -499,10 +499,10 @@ static void show_filemodify(struct diff_queue_struct *q,
break;
default:
- die("Unexpected comparison status '%c' for %s, %s",
- q->queue[i]->status,
- ospec->path ? ospec->path : "none",
- spec->path ? spec->path : "none");
+ die(_("unexpected comparison status '%c' for %s, %s"),
+ q->queue[i]->status,
+ ospec->path ? ospec->path : _("none"),
+ spec->path ? spec->path : _("none"));
}
}
}
@@ -699,14 +699,14 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
author = strstr(commit_buffer_cursor, "\nauthor ");
if (!author)
- die("could not find author in commit %s",
+ die(_("could not find author in commit %s"),
oid_to_hex(&commit->object.oid));
author++;
commit_buffer_cursor = author_end = strchrnul(author, '\n');
committer = strstr(commit_buffer_cursor, "\ncommitter ");
if (!committer)
- die("could not find committer in commit %s",
+ die(_("could not find committer in commit %s"),
oid_to_hex(&commit->object.oid));
committer++;
commit_buffer_cursor = committer_end = strchrnul(committer, '\n');
@@ -781,8 +781,8 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
case REENCODE_NO:
break;
case REENCODE_ABORT:
- die("Encountered commit-specific encoding %.*s in commit "
- "%s; use --reencode=[yes|no] to handle it",
+ die(_("encountered commit-specific encoding %.*s in commit "
+ "%s; use --reencode=[yes|no] to handle it"),
(int)encoding_len, encoding,
oid_to_hex(&commit->object.oid));
}
@@ -798,11 +798,11 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
if (signatures.nr) {
switch (signed_commit_mode) {
case SIGN_ABORT:
- die("encountered signed commit %s; use "
- "--signed-commits=<mode> to handle it",
+ die(_("encountered signed commit %s; use "
+ "--signed-commits=<mode> to handle it"),
oid_to_hex(&commit->object.oid));
case SIGN_WARN_VERBATIM:
- warning("exporting %"PRIuMAX" signature(s) for commit %s",
+ warning(_("exporting %"PRIuMAX" signature(s) for commit %s"),
(uintmax_t)signatures.nr, oid_to_hex(&commit->object.oid));
/* fallthru */
case SIGN_VERBATIM:
@@ -812,7 +812,7 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
}
break;
case SIGN_WARN_STRIP:
- warning("stripping signature(s) from commit %s",
+ warning(_("stripping signature(s) from commit %s"),
oid_to_hex(&commit->object.oid));
/* fallthru */
case SIGN_STRIP:
@@ -890,7 +890,8 @@ static void handle_tag(const char *name, struct tag *tag)
tagged = ((struct tag *)tagged)->tagged;
}
if (tagged->type == OBJ_TREE) {
- warning("Omitting tag %s,\nsince tags of trees (or tags of tags of trees, etc.) are not supported.",
+ warning(_("omitting tag %s,\nsince tags of trees (or tags "
+ "of tags of trees, etc.) are not supported."),
oid_to_hex(&tag->object.oid));
return;
}
@@ -898,7 +899,7 @@ static void handle_tag(const char *name, struct tag *tag)
buf = odb_read_object(the_repository->objects, &tag->object.oid,
&type, &size);
if (!buf)
- die("could not read tag %s", oid_to_hex(&tag->object.oid));
+ die(_("could not read tag %s"), oid_to_hex(&tag->object.oid));
message = memmem(buf, size, "\n\n", 2);
if (message) {
message += 2;
@@ -935,17 +936,17 @@ static void handle_tag(const char *name, struct tag *tag)
if (sig_offset < message_size)
switch (signed_tag_mode) {
case SIGN_ABORT:
- die("encountered signed tag %s; use "
- "--signed-tags=<mode> to handle it",
+ die(_("encountered signed tag %s; use "
+ "--signed-tags=<mode> to handle it"),
oid_to_hex(&tag->object.oid));
case SIGN_WARN_VERBATIM:
- warning("exporting signed tag %s",
+ warning(_("exporting signed tag %s"),
oid_to_hex(&tag->object.oid));
/* fallthru */
case SIGN_VERBATIM:
break;
case SIGN_WARN_STRIP:
- warning("stripping signature from tag %s",
+ warning(_("stripping signature from tag %s"),
oid_to_hex(&tag->object.oid));
/* fallthru */
case SIGN_STRIP:
@@ -960,8 +961,8 @@ static void handle_tag(const char *name, struct tag *tag)
if (!tagged_mark) {
switch (tag_of_filtered_mode) {
case TAG_FILTERING_ABORT:
- die("tag %s tags unexported object; use "
- "--tag-of-filtered-object=<mode> to handle it",
+ die(_("tag %s tags unexported object; use "
+ "--tag-of-filtered-object=<mode> to handle it"),
oid_to_hex(&tag->object.oid));
case DROP:
/* Ignore this tag altogether */
@@ -969,7 +970,7 @@ static void handle_tag(const char *name, struct tag *tag)
return;
case REWRITE:
if (tagged->type == OBJ_TAG && !mark_tags) {
- die(_("Error: Cannot export nested tags unless --mark-tags is specified."));
+ die(_("cannot export nested tags unless --mark-tags is specified."));
} else if (tagged->type == OBJ_COMMIT) {
p = rewrite_commit((struct commit *)tagged);
if (!p) {
@@ -1025,7 +1026,7 @@ static struct commit *get_commit(struct rev_cmdline_entry *e, const char *full_n
tag = (struct tag *)tag->tagged;
}
if (!tag)
- die("Tag %s points nowhere?", e->name);
+ die(_("tag %s points nowhere?"), e->name);
return (struct commit *)tag;
}
default:
@@ -1063,7 +1064,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info)
commit = get_commit(e, full_name);
if (!commit) {
- warning("%s: Unexpected object of type %s, skipping.",
+ warning(_("%s: unexpected object of type %s, skipping."),
e->name,
type_name(e->item->type));
free(full_name);
@@ -1078,7 +1079,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info)
free(full_name);
continue;
default: /* OBJ_TAG (nested tags) is already handled */
- warning("Tag points to object of unexpected type %s, skipping.",
+ warning(_("tag points to object of unexpected type %s, skipping."),
type_name(commit->object.type));
free(full_name);
continue;
@@ -1174,7 +1175,7 @@ static void export_marks(char *file)
f = fopen_for_writing(file);
if (!f)
- die_errno("Unable to open marks file %s for writing.", file);
+ die_errno(_("unable to open marks file %s for writing."), file);
for (i = 0; i < idnums.size; i++) {
if (deco->base && deco->base->type == 1) {
@@ -1191,7 +1192,7 @@ static void export_marks(char *file)
e |= ferror(f);
e |= fclose(f);
if (e)
- error("Unable to write marks file %s.", file);
+ error(_("unable to write marks file %s."), file);
}
static void import_marks(char *input_file, int check_exists)
@@ -1214,20 +1215,20 @@ static void import_marks(char *input_file, int check_exists)
line_end = strchr(line, '\n');
if (line[0] != ':' || !line_end)
- die("corrupt mark line: %s", line);
+ die(_("corrupt mark line: %s"), line);
*line_end = '\0';
mark = strtoumax(line + 1, &mark_end, 10);
if (!mark || mark_end == line + 1
|| *mark_end != ' ' || get_oid_hex(mark_end + 1, &oid))
- die("corrupt mark line: %s", line);
+ die(_("corrupt mark line: %s"), line);
if (last_idnum < mark)
last_idnum = mark;
type = odb_read_object_info(the_repository->objects, &oid, NULL);
if (type < 0)
- die("object not found: %s", oid_to_hex(&oid));
+ die(_("object not found: %s"), oid_to_hex(&oid));
if (type != OBJ_COMMIT)
/* only commits */
@@ -1235,12 +1236,12 @@ static void import_marks(char *input_file, int check_exists)
commit = lookup_commit(the_repository, &oid);
if (!commit)
- die("not a commit? can't happen: %s", oid_to_hex(&oid));
+ die(_("not a commit? can't happen: %s"), oid_to_hex(&oid));
object = &commit->object;
if (object->flags & SHOWN)
- error("Object %s already has a mark", oid_to_hex(&oid));
+ error(_("object %s already has a mark"), oid_to_hex(&oid));
mark_object(object, mark);
@@ -1394,7 +1395,7 @@ int cmd_fast_export(int argc,
get_tags_and_duplicates(&revs.cmdline);
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
revs.reverse = 1;
revs.diffopt.format_callback = show_filemodify;
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index 54d3e592c6..4cd0b079b6 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -339,12 +339,12 @@ static void write_crash_report(const char *err)
struct recent_command *rc;
if (!rpt) {
- error_errno("can't write crash report %s", loc);
+ error_errno(_("can't write crash report %s"), loc);
free(loc);
return;
}
- fprintf(stderr, "fast-import: dumping crash report to %s\n", loc);
+ fprintf(stderr, _("fast-import: dumping crash report to %s\n"), loc);
fprintf(rpt, "fast-import crash report:\n");
fprintf(rpt, " fast-import process: %"PRIuMAX"\n", (uintmax_t) getpid());
@@ -588,7 +588,7 @@ static void *find_mark(struct mark_set *s, uintmax_t idnum)
oe = s->data.marked[idnum];
}
if (!oe)
- die("mark :%" PRIuMAX " not declared", orig_idnum);
+ die(_("mark :%" PRIuMAX " not declared"), orig_idnum);
return oe;
}
@@ -628,9 +628,9 @@ static struct branch *new_branch(const char *name)
struct branch *b = lookup_branch(name);
if (b)
- die("Invalid attempt to create duplicate branch: %s", name);
+ die(_("invalid attempt to create duplicate branch: %s"), name);
if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
- die("Branch name doesn't conform to GIT standards: %s", name);
+ die(_("branch name doesn't conform to Git standards: %s"), name);
b = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct branch));
b->name = mem_pool_strdup(&fi_mem_pool, name);
@@ -801,7 +801,7 @@ static const char *create_index(void)
*c++ = &e->idx;
last = idx + object_count;
if (c != last)
- die("internal consistency error creating the index");
+ die(_("internal consistency error creating the index"));
tmpfile = write_idx_file(the_repository, NULL, idx, object_count,
&pack_idx_opts, pack_data->hash);
@@ -819,18 +819,18 @@ static char *keep_pack(const char *curr_index_name)
keep_fd = safe_create_file_with_leading_directories(pack_data->repo,
name.buf);
if (keep_fd < 0)
- die_errno("cannot create keep file");
+ die_errno(_("cannot create keep file"));
write_or_die(keep_fd, keep_msg, strlen(keep_msg));
if (close(keep_fd))
- die_errno("failed to write keep file");
+ die_errno(_("failed to write keep file"));
odb_pack_name(pack_data->repo, &name, pack_data->hash, "pack");
if (finalize_object_file(pack_data->repo, pack_data->pack_name, name.buf))
- die("cannot store pack file");
+ die(_("cannot store pack file"));
odb_pack_name(pack_data->repo, &name, pack_data->hash, "idx");
if (finalize_object_file(pack_data->repo, curr_index_name, name.buf))
- die("cannot store index file");
+ die(_("cannot store index file"));
free((void *)curr_index_name);
return strbuf_detach(&name, NULL);
}
@@ -853,7 +853,7 @@ static int loosen_small_pack(const struct packed_git *p)
struct child_process unpack = CHILD_PROCESS_INIT;
if (lseek(p->pack_fd, 0, SEEK_SET) < 0)
- die_errno("Failed seeking to start of '%s'", p->pack_name);
+ die_errno(_("failed seeking to start of '%s'"), p->pack_name);
unpack.in = p->pack_fd;
unpack.git_cmd = 1;
@@ -903,7 +903,7 @@ static void end_packfile(void)
new_p = packfile_store_load_pack(pack_data->repo->objects->packfiles,
idx_name, 1);
if (!new_p)
- die("core git rejected index %s", idx_name);
+ die(_("core Git rejected index %s"), idx_name);
all_packs[pack_id] = new_p;
free(idx_name);
@@ -979,7 +979,7 @@ static int store_object(
if (e->idx.offset) {
duplicate_count_by_type[type]++;
return 1;
- } else if (find_oid_pack(&oid, packfile_store_get_packs(packs))) {
+ } else if (packfile_list_find_oid(packfile_store_get_packs(packs), &oid)) {
e->type = type;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -1090,7 +1090,7 @@ static int store_object(
static void truncate_pack(struct hashfile_checkpoint *checkpoint)
{
if (hashfile_truncate(pack_file, checkpoint))
- die_errno("cannot truncate pack to skip duplicate");
+ die_errno(_("cannot truncate pack to skip duplicate"));
pack_size = checkpoint->offset;
}
@@ -1138,7 +1138,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
size_t cnt = in_sz < len ? in_sz : (size_t)len;
size_t n = fread(in_buf, 1, cnt, stdin);
if (!n && feof(stdin))
- die("EOF in data (%" PRIuMAX " bytes remaining)", len);
+ die(_("EOF in data (%" PRIuMAX " bytes remaining)"), len);
git_hash_update(&c, in_buf, n);
s.next_in = in_buf;
@@ -1162,7 +1162,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
case Z_STREAM_END:
continue;
default:
- die("unexpected deflate failure: %d", status);
+ die(_("unexpected deflate failure: %d"), status);
}
}
git_deflate_end(&s);
@@ -1180,7 +1180,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
duplicate_count_by_type[OBJ_BLOB]++;
truncate_pack(&checkpoint);
- } else if (find_oid_pack(&oid, packfile_store_get_packs(packs))) {
+ } else if (packfile_list_find_oid(packfile_store_get_packs(packs), &oid)) {
e->type = OBJ_BLOB;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -1264,16 +1264,16 @@ static void load_tree(struct tree_entry *root)
myoe = find_object(oid);
if (myoe && myoe->pack_id != MAX_PACK_ID) {
if (myoe->type != OBJ_TREE)
- die("Not a tree: %s", oid_to_hex(oid));
+ die(_("not a tree: %s"), oid_to_hex(oid));
t->delta_depth = myoe->depth;
buf = gfi_unpack_entry(myoe, &size);
if (!buf)
- die("Can't load tree %s", oid_to_hex(oid));
+ die(_("can't load tree %s"), oid_to_hex(oid));
} else {
enum object_type type;
buf = odb_read_object(the_repository->objects, oid, &type, &size);
if (!buf || type != OBJ_TREE)
- die("Can't load tree %s", oid_to_hex(oid));
+ die(_("can't load tree %s"), oid_to_hex(oid));
}
c = buf;
@@ -1287,7 +1287,7 @@ static void load_tree(struct tree_entry *root)
e->tree = NULL;
c = parse_mode(c, &e->versions[1].mode);
if (!c)
- die("Corrupt mode in %s", oid_to_hex(oid));
+ die(_("corrupt mode in %s"), oid_to_hex(oid));
e->versions[0].mode = e->versions[1].mode;
e->name = to_atom(c, strlen(c));
c += e->name->str_len + 1;
@@ -1399,7 +1399,7 @@ static void tree_content_replace(
struct tree_content *newtree)
{
if (!S_ISDIR(mode))
- die("Root cannot be a non-directory");
+ die(_("root cannot be a non-directory"));
oidclr(&root->versions[0].oid, the_repository->hash_algo);
oidcpy(&root->versions[1].oid, oid);
if (root->tree)
@@ -1422,9 +1422,9 @@ static int tree_content_set(
slash1 = strchrnul(p, '/');
n = slash1 - p;
if (!n)
- die("Empty path component found in input");
+ die(_("empty path component found in input"));
if (!*slash1 && !S_ISDIR(mode) && subtree)
- die("Non-directories cannot have subtrees");
+ die(_("non-directories cannot have subtrees"));
if (!root->tree)
load_tree(root);
@@ -1576,7 +1576,7 @@ static int tree_content_get(
slash1 = strchrnul(p, '/');
n = slash1 - p;
if (!n && !allow_root)
- die("Empty path component found in input");
+ die(_("empty path component found in input"));
if (!root->tree)
load_tree(root);
@@ -1622,8 +1622,8 @@ static int update_branch(struct branch *b)
!strcmp(b->name + strlen(replace_prefix),
oid_to_hex(&b->oid))) {
if (!quiet)
- warning("Dropping %s since it would point to "
- "itself (i.e. to %s)",
+ warning(_("dropping %s since it would point to "
+ "itself (i.e. to %s)"),
b->name, oid_to_hex(&b->oid));
refs_delete_ref(get_main_ref_store(the_repository),
NULL, b->name, NULL, 0);
@@ -1646,14 +1646,14 @@ static int update_branch(struct branch *b)
new_cmit = lookup_commit_reference_gently(the_repository,
&b->oid, 0);
if (!old_cmit || !new_cmit)
- return error("Branch %s is missing commits.", b->name);
+ return error(_("branch %s is missing commits."), b->name);
ret = repo_in_merge_bases(the_repository, old_cmit, new_cmit);
if (ret < 0)
exit(128);
if (!ret) {
- warning("Not updating %s"
- " (new tip %s does not contain %s)",
+ warning(_("not updating %s"
+ " (new tip %s does not contain %s)"),
b->name, oid_to_hex(&b->oid),
oid_to_hex(&old_oid));
return -1;
@@ -1729,13 +1729,13 @@ static void dump_marks(void)
return;
if (safe_create_leading_directories_const(the_repository, export_marks_file)) {
- failure |= error_errno("unable to create leading directories of %s",
+ failure |= error_errno(_("unable to create leading directories of %s"),
export_marks_file);
return;
}
if (hold_lock_file_for_update(&mark_lock, export_marks_file, 0) < 0) {
- failure |= error_errno("Unable to write marks file %s",
+ failure |= error_errno(_("unable to write marks file %s"),
export_marks_file);
return;
}
@@ -1744,14 +1744,14 @@ static void dump_marks(void)
if (!f) {
int saved_errno = errno;
rollback_lock_file(&mark_lock);
- failure |= error("Unable to write marks file %s: %s",
+ failure |= error(_("unable to write marks file %s: %s"),
export_marks_file, strerror(saved_errno));
return;
}
for_each_mark(marks, 0, dump_marks_fn, f);
if (commit_lock_file(&mark_lock)) {
- failure |= error_errno("Unable to write file %s",
+ failure |= error_errno(_("unable to write file %s"),
export_marks_file);
return;
}
@@ -1765,7 +1765,7 @@ static void insert_object_entry(struct mark_set **s, struct object_id *oid, uint
enum object_type type = odb_read_object_info(the_repository->objects,
oid, NULL);
if (type < 0)
- die("object not found: %s", oid_to_hex(oid));
+ die(_("object not found: %s"), oid_to_hex(oid));
e = insert_object(oid);
e->type = type;
e->pack_id = MAX_PACK_ID;
@@ -1792,13 +1792,13 @@ static void read_mark_file(struct mark_set **s, FILE *f, mark_set_inserter_t ins
end = strchr(line, '\n');
if (line[0] != ':' || !end)
- die("corrupt mark line: %s", line);
+ die(_("corrupt mark line: %s"), line);
*end = 0;
mark = strtoumax(line + 1, &end, 10);
if (!mark || end == line + 1
|| *end != ' '
|| get_oid_hex_any(end + 1, &oid) == GIT_HASH_UNKNOWN)
- die("corrupt mark line: %s", line);
+ die(_("corrupt mark line: %s"), line);
inserter(s, &oid, mark);
}
}
@@ -1811,7 +1811,7 @@ static void read_marks(void)
else if (import_marks_file_ignore_missing && errno == ENOENT)
goto done; /* Marks file does not exist */
else
- die_errno("cannot read '%s'", import_marks_file);
+ die_errno(_("cannot read '%s'"), import_marks_file);
read_mark_file(&marks, f, insert_object_entry);
fclose(f);
done:
@@ -1897,7 +1897,7 @@ static int parse_data(struct strbuf *sb, uintmax_t limit, uintmax_t *len_res)
strbuf_reset(sb);
if (!skip_prefix(command_buf.buf, "data ", &data))
- die("Expected 'data n' command, found: %s", command_buf.buf);
+ die(_("expected 'data n' command, found: %s"), command_buf.buf);
if (skip_prefix(data, "<<", &data)) {
char *term = xstrdup(data);
@@ -1905,7 +1905,7 @@ static int parse_data(struct strbuf *sb, uintmax_t limit, uintmax_t *len_res)
for (;;) {
if (strbuf_getline_lf(&command_buf, stdin) == EOF)
- die("EOF in data (terminator '%s' not found)", term);
+ die(_("EOF in data (terminator '%s' not found)"), term);
if (term_len == command_buf.len
&& !strcmp(term, command_buf.buf))
break;
@@ -1923,12 +1923,12 @@ static int parse_data(struct strbuf *sb, uintmax_t limit, uintmax_t *len_res)
return 0;
}
if (length < len)
- die("data is too large to use in this context");
+ die(_("data is too large to use in this context"));
while (n < length) {
size_t s = strbuf_fread(sb, length - n, stdin);
if (!s && feof(stdin))
- die("EOF in data (%lu bytes remaining)",
+ die(_("EOF in data (%lu bytes remaining)"),
(unsigned long)(length - n));
n += s;
}
@@ -1985,15 +1985,15 @@ static char *parse_ident(const char *buf)
ltgt = buf + strcspn(buf, "<>");
if (*ltgt != '<')
- die("Missing < in ident string: %s", buf);
+ die(_("missing < in ident string: %s"), buf);
if (ltgt != buf && ltgt[-1] != ' ')
- die("Missing space before < in ident string: %s", buf);
+ die(_("missing space before < in ident string: %s"), buf);
ltgt = ltgt + 1 + strcspn(ltgt + 1, "<>");
if (*ltgt != '>')
- die("Missing > in ident string: %s", buf);
+ die(_("missing > in ident string: %s"), buf);
ltgt++;
if (*ltgt != ' ')
- die("Missing space after > in ident string: %s", buf);
+ die(_("missing space after > in ident string: %s"), buf);
ltgt++;
name_len = ltgt - buf;
strbuf_add(&ident, buf, name_len);
@@ -2001,19 +2001,19 @@ static char *parse_ident(const char *buf)
switch (whenspec) {
case WHENSPEC_RAW:
if (validate_raw_date(ltgt, &ident, 1) < 0)
- die("Invalid raw date \"%s\" in ident: %s", ltgt, buf);
+ die(_("invalid raw date \"%s\" in ident: %s"), ltgt, buf);
break;
case WHENSPEC_RAW_PERMISSIVE:
if (validate_raw_date(ltgt, &ident, 0) < 0)
- die("Invalid raw date \"%s\" in ident: %s", ltgt, buf);
+ die(_("invalid raw date \"%s\" in ident: %s"), ltgt, buf);
break;
case WHENSPEC_RFC2822:
if (parse_date(ltgt, &ident) < 0)
- die("Invalid rfc2822 date \"%s\" in ident: %s", ltgt, buf);
+ die(_("invalid rfc2822 date \"%s\" in ident: %s"), ltgt, buf);
break;
case WHENSPEC_NOW:
if (strcmp("now", ltgt))
- die("Date in ident must be 'now': %s", buf);
+ die(_("date in ident must be 'now': %s"), buf);
datestamp(&ident);
break;
}
@@ -2107,7 +2107,7 @@ static void construct_path_with_fanout(const char *hex_sha1,
{
unsigned int i = 0, j = 0;
if (fanout >= the_hash_algo->rawsz)
- die("Too large fanout (%u)", fanout);
+ die(_("too large fanout (%u)"), fanout);
while (fanout) {
path[i++] = hex_sha1[j++];
path[i++] = hex_sha1[j++];
@@ -2181,7 +2181,7 @@ static uintmax_t do_change_note_fanout(
/* Rename fullpath to realpath */
if (!tree_content_remove(orig_root, fullpath, &leaf, 0))
- die("Failed to remove path %s", fullpath);
+ die(_("failed to remove path %s"), fullpath);
tree_content_set(orig_root, realpath,
&leaf.versions[1].oid,
leaf.versions[1].mode,
@@ -2254,7 +2254,7 @@ static uintmax_t parse_mark_ref(const char *p, char **endptr)
p++;
mark = strtoumax(p, endptr, 10);
if (*endptr == p)
- die("No value after ':' in mark: %s", command_buf.buf);
+ die(_("no value after ':' in mark: %s"), command_buf.buf);
return mark;
}
@@ -2269,7 +2269,7 @@ static uintmax_t parse_mark_ref_eol(const char *p)
mark = parse_mark_ref(p, &end);
if (*end != '\0')
- die("Garbage after mark: %s", command_buf.buf);
+ die(_("garbage after mark: %s"), command_buf.buf);
return mark;
}
@@ -2284,7 +2284,7 @@ static uintmax_t parse_mark_ref_space(const char **p)
mark = parse_mark_ref(*p, &end);
if (*end++ != ' ')
- die("Missing space after mark: %s", command_buf.buf);
+ die(_("missing space after mark: %s"), command_buf.buf);
*p = end;
return mark;
}
@@ -2300,9 +2300,9 @@ static void parse_path(struct strbuf *sb, const char *p, const char **endp,
{
if (*p == '"') {
if (unquote_c_style(sb, p, endp))
- die("Invalid %s: %s", field, command_buf.buf);
+ die(_("invalid %s: %s"), field, command_buf.buf);
if (strlen(sb->buf) != sb->len)
- die("NUL in %s: %s", field, command_buf.buf);
+ die(_("NUL in %s: %s"), field, command_buf.buf);
} else {
/*
* Unless we are parsing the last field of a line,
@@ -2325,7 +2325,7 @@ static void parse_path_eol(struct strbuf *sb, const char *p, const char *field)
parse_path(sb, p, &end, 1, field);
if (*end)
- die("Garbage after %s: %s", field, command_buf.buf);
+ die(_("garbage after %s: %s"), field, command_buf.buf);
}
/*
@@ -2338,7 +2338,7 @@ static void parse_path_space(struct strbuf *sb, const char *p,
{
parse_path(sb, p, endp, 0, field);
if (**endp != ' ')
- die("Missing space after %s: %s", field, command_buf.buf);
+ die(_("missing space after %s: %s"), field, command_buf.buf);
(*endp)++;
}
@@ -2351,7 +2351,7 @@ static void file_change_m(const char *p, struct branch *b)
p = parse_mode(p, &mode);
if (!p)
- die("Corrupt mode: %s", command_buf.buf);
+ die(_("corrupt mode: %s"), command_buf.buf);
switch (mode) {
case 0644:
case 0755:
@@ -2364,7 +2364,7 @@ static void file_change_m(const char *p, struct branch *b)
/* ok */
break;
default:
- die("Corrupt mode: %s", command_buf.buf);
+ die(_("corrupt mode: %s"), command_buf.buf);
}
if (*p == ':') {
@@ -2375,10 +2375,10 @@ static void file_change_m(const char *p, struct branch *b)
oe = NULL; /* not used with inline_data, but makes gcc happy */
} else {
if (parse_mapped_oid_hex(p, &oid, &p))
- die("Invalid dataref: %s", command_buf.buf);
+ die(_("invalid dataref: %s"), command_buf.buf);
oe = find_object(&oid);
if (*p++ != ' ')
- die("Missing space after SHA1: %s", command_buf.buf);
+ die(_("missing space after SHA1: %s"), command_buf.buf);
}
strbuf_reset(&path);
@@ -2394,11 +2394,11 @@ static void file_change_m(const char *p, struct branch *b)
if (S_ISGITLINK(mode)) {
if (inline_data)
- die("Git links cannot be specified 'inline': %s",
+ die(_("Git links cannot be specified 'inline': %s"),
command_buf.buf);
else if (oe) {
if (oe->type != OBJ_COMMIT)
- die("Not a commit (actually a %s): %s",
+ die(_("not a commit (actually a %s): %s"),
type_name(oe->type), command_buf.buf);
}
/*
@@ -2407,7 +2407,7 @@ static void file_change_m(const char *p, struct branch *b)
*/
} else if (inline_data) {
if (S_ISDIR(mode))
- die("Directories cannot be specified 'inline': %s",
+ die(_("directories cannot be specified 'inline': %s"),
command_buf.buf);
while (read_next_command() != EOF) {
const char *v;
@@ -2425,11 +2425,11 @@ static void file_change_m(const char *p, struct branch *b)
odb_read_object_info(the_repository->objects,
&oid, NULL);
if (type < 0)
- die("%s not found: %s",
- S_ISDIR(mode) ? "Tree" : "Blob",
- command_buf.buf);
+ die(_("%s not found: %s"),
+ S_ISDIR(mode) ? _("tree") : _("blob"),
+ command_buf.buf);
if (type != expected)
- die("Not a %s (actually a %s): %s",
+ die(_("not a %s (actually a %s): %s"),
type_name(expected), type_name(type),
command_buf.buf);
}
@@ -2440,7 +2440,7 @@ static void file_change_m(const char *p, struct branch *b)
}
if (!verify_path(path.buf, mode))
- die("invalid path '%s'", path.buf);
+ die(_("invalid path '%s'"), path.buf);
tree_content_set(&b->branch_tree, path.buf, &oid, mode, NULL);
}
@@ -2470,7 +2470,7 @@ static void file_change_cr(const char *p, struct branch *b, int rename)
else
tree_content_get(&b->branch_tree, source.buf, &leaf, 1);
if (!leaf.versions[1].mode)
- die("Path %s not in branch", source.buf);
+ die(_("path %s not in branch"), source.buf);
if (!*dest.buf) { /* C "path/to/subdir" "" */
tree_content_replace(&b->branch_tree,
&leaf.versions[1].oid,
@@ -2479,7 +2479,7 @@ static void file_change_cr(const char *p, struct branch *b, int rename)
return;
}
if (!verify_path(dest.buf, leaf.versions[1].mode))
- die("invalid path '%s'", dest.buf);
+ die(_("invalid path '%s'"), dest.buf);
tree_content_set(&b->branch_tree, dest.buf,
&leaf.versions[1].oid,
leaf.versions[1].mode,
@@ -2521,23 +2521,23 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
oe = NULL; /* not used with inline_data, but makes gcc happy */
} else {
if (parse_mapped_oid_hex(p, &oid, &p))
- die("Invalid dataref: %s", command_buf.buf);
+ die(_("invalid dataref: %s"), command_buf.buf);
oe = find_object(&oid);
if (*p++ != ' ')
- die("Missing space after SHA1: %s", command_buf.buf);
+ die(_("missing space after SHA1: %s"), command_buf.buf);
}
/* <commit-ish> */
s = lookup_branch(p);
if (s) {
if (is_null_oid(&s->oid))
- die("Can't add a note on empty branch.");
+ die(_("can't add a note on empty branch."));
oidcpy(&commit_oid, &s->oid);
} else if (*p == ':') {
uintmax_t commit_mark = parse_mark_ref_eol(p);
struct object_entry *commit_oe = find_mark(marks, commit_mark);
if (commit_oe->type != OBJ_COMMIT)
- die("Mark :%" PRIuMAX " not a commit", commit_mark);
+ die(_("mark :%" PRIuMAX " not a commit"), commit_mark);
oidcpy(&commit_oid, &commit_oe->idx.oid);
} else if (!repo_get_oid(the_repository, p, &commit_oid)) {
unsigned long size;
@@ -2545,25 +2545,25 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
&commit_oid, OBJ_COMMIT, &size,
&commit_oid);
if (!buf || size < the_hash_algo->hexsz + 6)
- die("Not a valid commit: %s", p);
+ die(_("not a valid commit: %s"), p);
free(buf);
} else
- die("Invalid ref name or SHA1 expression: %s", p);
+ die(_("invalid ref name or SHA1 expression: %s"), p);
if (inline_data) {
read_next_command();
parse_and_store_blob(&last_blob, &oid, 0);
} else if (oe) {
if (oe->type != OBJ_BLOB)
- die("Not a blob (actually a %s): %s",
+ die(_("not a blob (actually a %s): %s"),
type_name(oe->type), command_buf.buf);
} else if (!is_null_oid(&oid)) {
enum object_type type = odb_read_object_info(the_repository->objects, &oid,
NULL);
if (type < 0)
- die("Blob not found: %s", command_buf.buf);
+ die(_("blob not found: %s"), command_buf.buf);
if (type != OBJ_BLOB)
- die("Not a blob (actually a %s): %s",
+ die(_("not a blob (actually a %s): %s"),
type_name(type), command_buf.buf);
}
@@ -2592,10 +2592,10 @@ static void file_change_deleteall(struct branch *b)
static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
{
if (!buf || size < the_hash_algo->hexsz + 6)
- die("Not a valid commit: %s", oid_to_hex(&b->oid));
+ die(_("not a valid commit: %s"), oid_to_hex(&b->oid));
if (memcmp("tree ", buf, 5)
|| get_oid_hex(buf + 5, &b->branch_tree.versions[1].oid))
- die("The commit %s is corrupt", oid_to_hex(&b->oid));
+ die(_("the commit %s is corrupt"), oid_to_hex(&b->oid));
oidcpy(&b->branch_tree.versions[0].oid,
&b->branch_tree.versions[1].oid);
}
@@ -2625,7 +2625,7 @@ static int parse_objectish(struct branch *b, const char *objectish)
s = lookup_branch(objectish);
if (b == s)
- die("Can't create a branch from itself: %s", b->name);
+ die(_("can't create a branch from itself: %s"), b->name);
else if (s) {
struct object_id *t = &s->branch_tree.versions[1].oid;
oidcpy(&b->oid, &s->oid);
@@ -2635,7 +2635,7 @@ static int parse_objectish(struct branch *b, const char *objectish)
uintmax_t idnum = parse_mark_ref_eol(objectish);
struct object_entry *oe = find_mark(marks, idnum);
if (oe->type != OBJ_COMMIT)
- die("Mark :%" PRIuMAX " not a commit", idnum);
+ die(_("mark :%" PRIuMAX " not a commit"), idnum);
if (!oideq(&b->oid, &oe->idx.oid)) {
oidcpy(&b->oid, &oe->idx.oid);
if (oe->pack_id != MAX_PACK_ID) {
@@ -2652,7 +2652,7 @@ static int parse_objectish(struct branch *b, const char *objectish)
b->delete = 1;
}
else
- die("Invalid ref name or SHA1 expression: %s", objectish);
+ die(_("invalid ref name or SHA1 expression: %s"), objectish);
if (b->branch_tree.tree && !oideq(&oid, &b->branch_tree.versions[1].oid)) {
release_tree_content_recursive(b->branch_tree.tree);
@@ -2699,7 +2699,7 @@ static struct hash_list *parse_merge(unsigned int *count)
uintmax_t idnum = parse_mark_ref_eol(from);
struct object_entry *oe = find_mark(marks, idnum);
if (oe->type != OBJ_COMMIT)
- die("Mark :%" PRIuMAX " not a commit", idnum);
+ die(_("mark :%" PRIuMAX " not a commit"), idnum);
oidcpy(&n->oid, &oe->idx.oid);
} else if (!repo_get_oid(the_repository, from, &n->oid)) {
unsigned long size;
@@ -2707,10 +2707,10 @@ static struct hash_list *parse_merge(unsigned int *count)
&n->oid, OBJ_COMMIT,
&size, &n->oid);
if (!buf || size < the_hash_algo->hexsz + 6)
- die("Not a valid commit: %s", from);
+ die(_("not a valid commit: %s"), from);
free(buf);
} else
- die("Invalid ref name or SHA1 expression: %s", from);
+ die(_("invalid ref name or SHA1 expression: %s"), from);
n->next = NULL;
*tail = n;
@@ -2734,8 +2734,8 @@ static void parse_one_signature(struct signature_data *sig, const char *v)
char *space = strchr(args, ' ');
if (!space)
- die("Expected gpgsig format: 'gpgsig <hash-algo> <signature-format>', "
- "got 'gpgsig %s'", args);
+ die(_("expected gpgsig format: 'gpgsig <hash-algo> <signature-format>', "
+ "got 'gpgsig %s'"), args);
*space = '\0';
sig->hash_algo = args;
@@ -2744,13 +2744,13 @@ static void parse_one_signature(struct signature_data *sig, const char *v)
/* Validate hash algorithm */
if (strcmp(sig->hash_algo, "sha1") &&
strcmp(sig->hash_algo, "sha256"))
- die("Unknown git hash algorithm in gpgsig: '%s'", sig->hash_algo);
+ die(_("unknown git hash algorithm in gpgsig: '%s'"), sig->hash_algo);
/* Validate signature format */
if (!valid_signature_format(sig->sig_format))
- die("Invalid signature format in gpgsig: '%s'", sig->sig_format);
+ die(_("invalid signature format in gpgsig: '%s'"), sig->sig_format);
if (!strcmp(sig->sig_format, "unknown"))
- warning("'unknown' signature format in gpgsig");
+ warning(_("'unknown' signature format in gpgsig"));
/* Read signature data */
read_next_command();
@@ -2789,8 +2789,8 @@ static void store_signature(struct signature_data *stored_sig,
const char *hash_type)
{
if (stored_sig->hash_algo) {
- warning("multiple %s signatures found, "
- "ignoring additional signature",
+ warning(_("multiple %s signatures found, "
+ "ignoring additional signature"),
hash_type);
strbuf_release(&new_sig->data);
free(new_sig->hash_algo);
@@ -2845,15 +2845,15 @@ static void parse_new_commit(const char *arg)
read_next_command();
}
if (!committer)
- die("Expected committer but didn't get one");
+ die(_("expected committer but didn't get one"));
while (skip_prefix(command_buf.buf, "gpgsig ", &v)) {
switch (signed_commit_mode) {
/* First, modes that don't need the signature to be parsed */
case SIGN_ABORT:
- die("encountered signed commit; use "
- "--signed-commits=<mode> to handle it");
+ die(_("encountered signed commit; use "
+ "--signed-commits=<mode> to handle it"));
case SIGN_WARN_STRIP:
warning(_("stripping a commit signature"));
/* fallthru */
@@ -3025,11 +3025,11 @@ static void parse_new_tag(const char *arg)
/* from ... */
if (!skip_prefix(command_buf.buf, "from ", &from))
- die("Expected from command, got %s", command_buf.buf);
+ die(_("expected 'from' command, got '%s'"), command_buf.buf);
s = lookup_branch(from);
if (s) {
if (is_null_oid(&s->oid))
- die("Can't tag an empty branch.");
+ die(_("can't tag an empty branch."));
oidcpy(&oid, &s->oid);
type = OBJ_COMMIT;
} else if (*from == ':') {
@@ -3044,11 +3044,11 @@ static void parse_new_tag(const char *arg)
type = odb_read_object_info(the_repository->objects,
&oid, NULL);
if (type < 0)
- die("Not a valid object: %s", from);
+ die(_("not a valid object: %s"), from);
} else
type = oe->type;
} else
- die("Invalid ref name or SHA1 expression: %s", from);
+ die(_("invalid ref name or SHA1 expression: %s"), from);
read_next_command();
/* original-oid ... */
@@ -3139,7 +3139,7 @@ static void parse_reset_branch(const char *arg)
static void cat_blob_write(const char *buf, unsigned long size)
{
if (write_in_full(cat_blob_fd, buf, size) < 0)
- die_errno("Write to frontend failed");
+ die_errno(_("write to frontend failed"));
}
static void cat_blob(struct object_entry *oe, struct object_id *oid)
@@ -3168,9 +3168,9 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
return;
}
if (!buf)
- die("Can't read object %s", oid_to_hex(oid));
+ die(_("can't read object %s"), oid_to_hex(oid));
if (type != OBJ_BLOB)
- die("Object %s is a %s but a blob was expected.",
+ die(_("object %s is a %s but a blob was expected."),
oid_to_hex(oid), type_name(type));
strbuf_reset(&line);
strbuf_addf(&line, "%s %s %"PRIuMAX"\n", oid_to_hex(oid),
@@ -3194,11 +3194,11 @@ static void parse_get_mark(const char *p)
/* get-mark SP <object> LF */
if (*p != ':')
- die("Not a mark: %s", p);
+ die(_("not a mark: %s"), p);
oe = find_mark(marks, parse_mark_ref_eol(p));
if (!oe)
- die("Unknown mark: %s", command_buf.buf);
+ die(_("unknown mark: %s"), command_buf.buf);
xsnprintf(output, sizeof(output), "%s\n", oid_to_hex(&oe->idx.oid));
cat_blob_write(output, the_hash_algo->hexsz + 1);
@@ -3213,13 +3213,13 @@ static void parse_cat_blob(const char *p)
if (*p == ':') {
oe = find_mark(marks, parse_mark_ref_eol(p));
if (!oe)
- die("Unknown mark: %s", command_buf.buf);
+ die(_("unknown mark: %s"), command_buf.buf);
oidcpy(&oid, &oe->idx.oid);
} else {
if (parse_mapped_oid_hex(p, &oid, &p))
- die("Invalid dataref: %s", command_buf.buf);
+ die(_("invalid dataref: %s"), command_buf.buf);
if (*p)
- die("Garbage after SHA1: %s", command_buf.buf);
+ die(_("garbage after SHA1: %s"), command_buf.buf);
oe = find_object(&oid);
}
@@ -3237,7 +3237,7 @@ static struct object_entry *dereference(struct object_entry *oe,
enum object_type type = odb_read_object_info(the_repository->objects,
oid, NULL);
if (type < 0)
- die("object not found: %s", oid_to_hex(oid));
+ die(_("object not found: %s"), oid_to_hex(oid));
/* cache it! */
oe = insert_object(oid);
oe->type = type;
@@ -3251,7 +3251,7 @@ static struct object_entry *dereference(struct object_entry *oe,
case OBJ_TAG:
break;
default:
- die("Not a tree-ish: %s", command_buf.buf);
+ die(_("not a tree-ish: %s"), command_buf.buf);
}
if (oe->pack_id != MAX_PACK_ID) { /* in a pack being written */
@@ -3262,19 +3262,19 @@ static struct object_entry *dereference(struct object_entry *oe,
&unused, &size);
}
if (!buf)
- die("Can't load object %s", oid_to_hex(oid));
+ die(_("can't load object %s"), oid_to_hex(oid));
/* Peel one layer. */
switch (oe->type) {
case OBJ_TAG:
if (size < hexsz + strlen("object ") ||
get_oid_hex(buf + strlen("object "), oid))
- die("Invalid SHA1 in tag: %s", command_buf.buf);
+ die(_("invalid SHA1 in tag: %s"), command_buf.buf);
break;
case OBJ_COMMIT:
if (size < hexsz + strlen("tree ") ||
get_oid_hex(buf + strlen("tree "), oid))
- die("Invalid SHA1 in commit: %s", command_buf.buf);
+ die(_("invalid SHA1 in commit: %s"), command_buf.buf);
}
free(buf);
@@ -3309,9 +3309,9 @@ static void build_mark_map(struct string_list *from, struct string_list *to)
for_each_string_list_item(fromp, from) {
top = string_list_lookup(to, fromp->string);
if (!fromp->util) {
- die(_("Missing from marks for submodule '%s'"), fromp->string);
+ die(_("missing from marks for submodule '%s'"), fromp->string);
} else if (!top || !top->util) {
- die(_("Missing to marks for submodule '%s'"), fromp->string);
+ die(_("missing to marks for submodule '%s'"), fromp->string);
}
build_mark_map_one(fromp->util, top->util);
}
@@ -3325,14 +3325,14 @@ static struct object_entry *parse_treeish_dataref(const char **p)
if (**p == ':') { /* <mark> */
e = find_mark(marks, parse_mark_ref_space(p));
if (!e)
- die("Unknown mark: %s", command_buf.buf);
+ die(_("unknown mark: %s"), command_buf.buf);
oidcpy(&oid, &e->idx.oid);
} else { /* <sha1> */
if (parse_mapped_oid_hex(*p, &oid, p))
- die("Invalid dataref: %s", command_buf.buf);
+ die(_("invalid dataref: %s"), command_buf.buf);
e = find_object(&oid);
if (*(*p)++ != ' ')
- die("Missing space after tree-ish: %s", command_buf.buf);
+ die(_("missing space after tree-ish: %s"), command_buf.buf);
}
while (!e || e->type != OBJ_TREE)
@@ -3376,7 +3376,7 @@ static void parse_ls(const char *p, struct branch *b)
/* ls SP (<tree-ish> SP)? <path> */
if (*p == '"') {
if (!b)
- die("Not in a commit: %s", command_buf.buf);
+ die(_("not in a commit: %s"), command_buf.buf);
root = &b->branch_tree;
} else {
struct object_entry *e = parse_treeish_dataref(&p);
@@ -3439,12 +3439,12 @@ static void parse_alias(void)
/* mark ... */
parse_mark();
if (!next_mark)
- die(_("Expected 'mark' command, got %s"), command_buf.buf);
+ die(_("expected 'mark' command, got %s"), command_buf.buf);
/* to ... */
memset(&b, 0, sizeof(b));
if (!parse_objectish_with_prefix(&b, "to "))
- die(_("Expected 'to' command, got %s"), command_buf.buf);
+ die(_("expected 'to' command, got %s"), command_buf.buf);
e = find_object(&b.oid);
assert(e);
insert_mark(&marks, next_mark, e);
@@ -3462,7 +3462,7 @@ static void option_import_marks(const char *marks,
{
if (import_marks_file) {
if (from_stream)
- die("Only one import-marks command allowed per stream");
+ die(_("only one import-marks command allowed per stream"));
/* read previous mark file */
if(!import_marks_file_from_stream)
@@ -3486,7 +3486,7 @@ static void option_date_format(const char *fmt)
else if (!strcmp(fmt, "now"))
whenspec = WHENSPEC_NOW;
else
- die("unknown --date-format argument %s", fmt);
+ die(_("unknown --date-format argument %s"), fmt);
}
static unsigned long ulong_arg(const char *option, const char *arg)
@@ -3494,7 +3494,7 @@ static unsigned long ulong_arg(const char *option, const char *arg)
char *endptr;
unsigned long rv = strtoul(arg, &endptr, 0);
if (strchr(arg, '-') || endptr == arg || *endptr)
- die("%s: argument must be a non-negative integer", option);
+ die(_("%s: argument must be a non-negative integer"), option);
return rv;
}
@@ -3502,7 +3502,7 @@ static void option_depth(const char *depth)
{
max_depth = ulong_arg("--depth", depth);
if (max_depth > MAX_DEPTH)
- die("--depth cannot exceed %u", MAX_DEPTH);
+ die(_("--depth cannot exceed %u"), MAX_DEPTH);
}
static void option_active_branches(const char *branches)
@@ -3520,7 +3520,7 @@ static void option_cat_blob_fd(const char *fd)
{
unsigned long n = ulong_arg("--cat-blob-fd", fd);
if (n > (unsigned long) INT_MAX)
- die("--cat-blob-fd cannot exceed %d", INT_MAX);
+ die(_("--cat-blob-fd cannot exceed %d"), INT_MAX);
cat_blob_fd = (int) n;
}
@@ -3540,7 +3540,7 @@ static void option_rewrite_submodules(const char *arg, struct string_list *list)
char *s = xstrdup(arg);
char *f = strchr(s, ':');
if (!f)
- die(_("Expected format name:filename for submodule rewrite option"));
+ die(_("expected format name:filename for submodule rewrite option"));
*f = '\0';
f++;
CALLOC_ARRAY(ms, 1);
@@ -3548,7 +3548,7 @@ static void option_rewrite_submodules(const char *arg, struct string_list *list)
f = prefix_filename(global_prefix, f);
fp = fopen(f, "r");
if (!fp)
- die_errno("cannot read '%s'", f);
+ die_errno(_("cannot read '%s'"), f);
read_mark_file(&ms, fp, insert_oid_entry);
fclose(fp);
free(f);
@@ -3565,10 +3565,10 @@ static int parse_one_option(const char *option)
if (!git_parse_ulong(option, &v))
return 0;
if (v < 8192) {
- warning("max-pack-size is now in bytes, assuming --max-pack-size=%lum", v);
+ warning(_("max-pack-size is now in bytes, assuming --max-pack-size=%lum"), v);
v *= 1024 * 1024;
} else if (v < 1024 * 1024) {
- warning("minimum max-pack-size is 1 MiB");
+ warning(_("minimum max-pack-size is 1 MiB"));
v = 1024 * 1024;
}
max_packsize = v;
@@ -3655,23 +3655,23 @@ static int parse_one_feature(const char *feature, int from_stream)
static void parse_feature(const char *feature)
{
if (seen_data_command)
- die("Got feature command '%s' after data command", feature);
+ die(_("got feature command '%s' after data command"), feature);
if (parse_one_feature(feature, 1))
return;
- die("This version of fast-import does not support feature %s.", feature);
+ die(_("this version of fast-import does not support feature %s."), feature);
}
static void parse_option(const char *option)
{
if (seen_data_command)
- die("Got option command '%s' after data command", option);
+ die(_("got option command '%s' after data command"), option);
if (parse_one_option(option))
return;
- die("This version of fast-import does not support option: %s", option);
+ die(_("this version of fast-import does not support option: %s"), option);
}
static void git_pack_config(void)
@@ -3715,7 +3715,7 @@ static void parse_argv(void)
break;
if (!skip_prefix(a, "--", &a))
- die("unknown option %s", a);
+ die(_("unknown option %s"), a);
if (parse_one_option(a))
continue;
@@ -3728,7 +3728,7 @@ static void parse_argv(void)
continue;
}
- die("unknown option --%s", a);
+ die(_("unknown option --%s"), a);
}
if (i != global_argc)
usage(fast_import_usage);
@@ -3817,7 +3817,7 @@ int cmd_fast_import(int argc,
else if (starts_with(command_buf.buf, "option "))
/* ignore non-git options*/;
else
- die("Unsupported command: %s", command_buf.buf);
+ die(_("unsupported command: %s"), command_buf.buf);
if (checkpoint_requested)
checkpoint();
@@ -3828,7 +3828,7 @@ int cmd_fast_import(int argc,
parse_argv();
if (require_explicit_termination && feof(stdin))
- die("stream ends early");
+ die(_("stream ends early"));
end_packfile();
diff --git a/builtin/gc.c b/builtin/gc.c
index 541d7471f1..d212cbb9b8 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -34,6 +34,7 @@
#include "pack-objects.h"
#include "path.h"
#include "reflog.h"
+#include "repack.h"
#include "rerere.h"
#include "blob.h"
#include "tree.h"
@@ -55,7 +56,6 @@ static const char * const builtin_gc_usage[] = {
};
static timestamp_t gc_log_expire_time;
-static struct strvec repack = STRVEC_INIT;
static struct tempfile *pidfile;
static struct lock_file log_lock;
static struct string_list pack_garbage = STRING_LIST_INIT_DUP;
@@ -255,6 +255,7 @@ enum maintenance_task_label {
TASK_PREFETCH,
TASK_LOOSE_OBJECTS,
TASK_INCREMENTAL_REPACK,
+ TASK_GEOMETRIC_REPACK,
TASK_GC,
TASK_COMMIT_GRAPH,
TASK_PACK_REFS,
@@ -448,7 +449,7 @@ out:
return should_gc;
}
-static int too_many_loose_objects(struct gc_config *cfg)
+static int too_many_loose_objects(int limit)
{
/*
* Quickly check if a "gc" is needed, by estimating how
@@ -470,7 +471,7 @@ static int too_many_loose_objects(struct gc_config *cfg)
if (!dir)
return 0;
- auto_threshold = DIV_ROUND_UP(cfg->gc_auto_threshold, 256);
+ auto_threshold = DIV_ROUND_UP(limit, 256);
while ((ent = readdir(dir)) != NULL) {
if (strspn(ent->d_name, "0123456789abcdef") != hexsz_loose ||
ent->d_name[hexsz_loose] != '\0')
@@ -616,48 +617,50 @@ static uint64_t estimate_repack_memory(struct gc_config *cfg,
return os_cache + heap;
}
-static int keep_one_pack(struct string_list_item *item, void *data UNUSED)
+static int keep_one_pack(struct string_list_item *item, void *data)
{
- strvec_pushf(&repack, "--keep-pack=%s", basename(item->string));
+ struct strvec *args = data;
+ strvec_pushf(args, "--keep-pack=%s", basename(item->string));
return 0;
}
static void add_repack_all_option(struct gc_config *cfg,
- struct string_list *keep_pack)
+ struct string_list *keep_pack,
+ struct strvec *args)
{
if (cfg->prune_expire && !strcmp(cfg->prune_expire, "now")
&& !(cfg->cruft_packs && cfg->repack_expire_to))
- strvec_push(&repack, "-a");
+ strvec_push(args, "-a");
else if (cfg->cruft_packs) {
- strvec_push(&repack, "--cruft");
+ strvec_push(args, "--cruft");
if (cfg->prune_expire)
- strvec_pushf(&repack, "--cruft-expiration=%s", cfg->prune_expire);
+ strvec_pushf(args, "--cruft-expiration=%s", cfg->prune_expire);
if (cfg->max_cruft_size)
- strvec_pushf(&repack, "--max-cruft-size=%lu",
+ strvec_pushf(args, "--max-cruft-size=%lu",
cfg->max_cruft_size);
if (cfg->repack_expire_to)
- strvec_pushf(&repack, "--expire-to=%s", cfg->repack_expire_to);
+ strvec_pushf(args, "--expire-to=%s", cfg->repack_expire_to);
} else {
- strvec_push(&repack, "-A");
+ strvec_push(args, "-A");
if (cfg->prune_expire)
- strvec_pushf(&repack, "--unpack-unreachable=%s", cfg->prune_expire);
+ strvec_pushf(args, "--unpack-unreachable=%s", cfg->prune_expire);
}
if (keep_pack)
- for_each_string_list(keep_pack, keep_one_pack, NULL);
+ for_each_string_list(keep_pack, keep_one_pack, args);
if (cfg->repack_filter && *cfg->repack_filter)
- strvec_pushf(&repack, "--filter=%s", cfg->repack_filter);
+ strvec_pushf(args, "--filter=%s", cfg->repack_filter);
if (cfg->repack_filter_to && *cfg->repack_filter_to)
- strvec_pushf(&repack, "--filter-to=%s", cfg->repack_filter_to);
+ strvec_pushf(args, "--filter-to=%s", cfg->repack_filter_to);
}
-static void add_repack_incremental_option(void)
+static void add_repack_incremental_option(struct strvec *args)
{
- strvec_push(&repack, "--no-write-bitmap-index");
+ strvec_push(args, "--no-write-bitmap-index");
}
-static int need_to_gc(struct gc_config *cfg)
+static int need_to_gc(struct gc_config *cfg, struct strvec *repack_args)
{
/*
* Setting gc.auto to 0 or negative can disable the
@@ -698,10 +701,10 @@ static int need_to_gc(struct gc_config *cfg)
string_list_clear(&keep_pack, 0);
}
- add_repack_all_option(cfg, &keep_pack);
+ add_repack_all_option(cfg, &keep_pack, repack_args);
string_list_clear(&keep_pack, 0);
- } else if (too_many_loose_objects(cfg))
- add_repack_incremental_option();
+ } else if (too_many_loose_objects(cfg->gc_auto_threshold))
+ add_repack_incremental_option(repack_args);
else
return 0;
@@ -850,6 +853,7 @@ int cmd_gc(int argc,
int keep_largest_pack = -1;
int skip_foreground_tasks = 0;
timestamp_t dummy;
+ struct strvec repack_args = STRVEC_INIT;
struct maintenance_run_opts opts = MAINTENANCE_RUN_OPTS_INIT;
struct gc_config cfg = GC_CONFIG_INIT;
const char *prune_expire_sentinel = "sentinel";
@@ -889,7 +893,7 @@ int cmd_gc(int argc,
show_usage_with_options_if_asked(argc, argv,
builtin_gc_usage, builtin_gc_options);
- strvec_pushl(&repack, "repack", "-d", "-l", NULL);
+ strvec_pushl(&repack_args, "repack", "-d", "-l", NULL);
gc_config(&cfg);
@@ -912,14 +916,14 @@ int cmd_gc(int argc,
die(_("failed to parse prune expiry value %s"), cfg.prune_expire);
if (aggressive) {
- strvec_push(&repack, "-f");
+ strvec_push(&repack_args, "-f");
if (cfg.aggressive_depth > 0)
- strvec_pushf(&repack, "--depth=%d", cfg.aggressive_depth);
+ strvec_pushf(&repack_args, "--depth=%d", cfg.aggressive_depth);
if (cfg.aggressive_window > 0)
- strvec_pushf(&repack, "--window=%d", cfg.aggressive_window);
+ strvec_pushf(&repack_args, "--window=%d", cfg.aggressive_window);
}
if (opts.quiet)
- strvec_push(&repack, "-q");
+ strvec_push(&repack_args, "-q");
if (opts.auto_flag) {
if (cfg.detach_auto && opts.detach < 0)
@@ -928,7 +932,7 @@ int cmd_gc(int argc,
/*
* Auto-gc should be least intrusive as possible.
*/
- if (!need_to_gc(&cfg)) {
+ if (!need_to_gc(&cfg, &repack_args)) {
ret = 0;
goto out;
}
@@ -950,7 +954,7 @@ int cmd_gc(int argc,
find_base_packs(&keep_pack, cfg.big_pack_threshold);
}
- add_repack_all_option(&cfg, &keep_pack);
+ add_repack_all_option(&cfg, &keep_pack, &repack_args);
string_list_clear(&keep_pack, 0);
}
@@ -1012,9 +1016,9 @@ int cmd_gc(int argc,
repack_cmd.git_cmd = 1;
repack_cmd.close_object_store = 1;
- strvec_pushv(&repack_cmd.args, repack.v);
+ strvec_pushv(&repack_cmd.args, repack_args.v);
if (run_command(&repack_cmd))
- die(FAILED_RUN, repack.v[0]);
+ die(FAILED_RUN, repack_args.v[0]);
if (cfg.prune_expire) {
struct child_process prune_cmd = CHILD_PROCESS_INIT;
@@ -1053,7 +1057,7 @@ int cmd_gc(int argc,
!opts.quiet && !daemonized ? COMMIT_GRAPH_WRITE_PROGRESS : 0,
NULL);
- if (opts.auto_flag && too_many_loose_objects(&cfg))
+ if (opts.auto_flag && too_many_loose_objects(cfg.gc_auto_threshold))
warning(_("There are too many unreachable loose objects; "
"run 'git prune' to remove them."));
@@ -1065,6 +1069,7 @@ int cmd_gc(int argc,
out:
maintenance_run_opts_release(&opts);
+ strvec_clear(&repack_args);
gc_config_release(&cfg);
return 0;
}
@@ -1267,6 +1272,19 @@ static int maintenance_task_gc_background(struct maintenance_run_opts *opts,
return run_command(&child);
}
+static int gc_condition(struct gc_config *cfg)
+{
+ /*
+ * Note that it's fine to drop the repack arguments here, as we execute
+ * git-gc(1) as a separate child process anyway. So it knows to compute
+ * these arguments again.
+ */
+ struct strvec repack_args = STRVEC_INIT;
+ int ret = need_to_gc(cfg, &repack_args);
+ strvec_clear(&repack_args);
+ return ret;
+}
+
static int prune_packed(struct maintenance_run_opts *opts)
{
struct child_process child = CHILD_PROCESS_INIT;
@@ -1548,6 +1566,108 @@ static int maintenance_task_incremental_repack(struct maintenance_run_opts *opts
return 0;
}
+static int maintenance_task_geometric_repack(struct maintenance_run_opts *opts,
+ struct gc_config *cfg)
+{
+ struct pack_geometry geometry = {
+ .split_factor = 2,
+ };
+ struct pack_objects_args po_args = {
+ .local = 1,
+ };
+ struct existing_packs existing_packs = EXISTING_PACKS_INIT;
+ struct string_list kept_packs = STRING_LIST_INIT_DUP;
+ struct child_process child = CHILD_PROCESS_INIT;
+ int ret;
+
+ repo_config_get_int(the_repository, "maintenance.geometric-repack.splitFactor",
+ &geometry.split_factor);
+
+ existing_packs.repo = the_repository;
+ existing_packs_collect(&existing_packs, &kept_packs);
+ pack_geometry_init(&geometry, &existing_packs, &po_args);
+ pack_geometry_split(&geometry);
+
+ child.git_cmd = 1;
+
+ strvec_pushl(&child.args, "repack", "-d", "-l", NULL);
+ if (geometry.split < geometry.pack_nr)
+ strvec_pushf(&child.args, "--geometric=%d",
+ geometry.split_factor);
+ else
+ add_repack_all_option(cfg, NULL, &child.args);
+ if (opts->quiet)
+ strvec_push(&child.args, "--quiet");
+ if (the_repository->settings.core_multi_pack_index)
+ strvec_push(&child.args, "--write-midx");
+
+ if (run_command(&child)) {
+ ret = error(_("failed to perform geometric repack"));
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ existing_packs_release(&existing_packs);
+ pack_geometry_release(&geometry);
+ return ret;
+}
+
+static int geometric_repack_auto_condition(struct gc_config *cfg UNUSED)
+{
+ struct pack_geometry geometry = {
+ .split_factor = 2,
+ };
+ struct pack_objects_args po_args = {
+ .local = 1,
+ };
+ struct existing_packs existing_packs = EXISTING_PACKS_INIT;
+ struct string_list kept_packs = STRING_LIST_INIT_DUP;
+ int auto_value = 100;
+ int ret;
+
+ repo_config_get_int(the_repository, "maintenance.geometric-repack.auto",
+ &auto_value);
+ if (!auto_value)
+ return 0;
+ if (auto_value < 0)
+ return 1;
+
+ repo_config_get_int(the_repository, "maintenance.geometric-repack.splitFactor",
+ &geometry.split_factor);
+
+ existing_packs.repo = the_repository;
+ existing_packs_collect(&existing_packs, &kept_packs);
+ pack_geometry_init(&geometry, &existing_packs, &po_args);
+ pack_geometry_split(&geometry);
+
+ /*
+ * When we'd merge at least two packs with one another we always
+ * perform the repack.
+ */
+ if (geometry.split) {
+ ret = 1;
+ goto out;
+ }
+
+ /*
+ * Otherwise, we estimate the number of loose objects to determine
+ * whether we want to create a new packfile or not.
+ */
+ if (too_many_loose_objects(auto_value)) {
+ ret = 1;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ existing_packs_release(&existing_packs);
+ pack_geometry_release(&geometry);
+ return ret;
+}
+
typedef int (*maintenance_task_fn)(struct maintenance_run_opts *opts,
struct gc_config *cfg);
typedef int (*maintenance_auto_fn)(struct gc_config *cfg);
@@ -1590,11 +1710,16 @@ static const struct maintenance_task tasks[] = {
.background = maintenance_task_incremental_repack,
.auto_condition = incremental_repack_auto_condition,
},
+ [TASK_GEOMETRIC_REPACK] = {
+ .name = "geometric-repack",
+ .background = maintenance_task_geometric_repack,
+ .auto_condition = geometric_repack_auto_condition,
+ },
[TASK_GC] = {
.name = "gc",
.foreground = maintenance_task_gc_foreground,
.background = maintenance_task_gc_background,
- .auto_condition = need_to_gc,
+ .auto_condition = gc_condition,
},
[TASK_COMMIT_GRAPH] = {
.name = "commit-graph",
@@ -1700,39 +1825,116 @@ static int maintenance_run_tasks(struct maintenance_run_opts *opts,
return result;
}
+enum maintenance_type {
+ /* As invoked via `git maintenance run --schedule=`. */
+ MAINTENANCE_TYPE_SCHEDULED = (1 << 0),
+ /* As invoked via `git maintenance run` and with `--auto`. */
+ MAINTENANCE_TYPE_MANUAL = (1 << 1),
+};
+
struct maintenance_strategy {
struct {
- int enabled;
+ unsigned type;
enum schedule_priority schedule;
} tasks[TASK__COUNT];
};
static const struct maintenance_strategy none_strategy = { 0 };
-static const struct maintenance_strategy default_strategy = {
+
+static const struct maintenance_strategy gc_strategy = {
.tasks = {
- [TASK_GC].enabled = 1,
+ [TASK_GC] = {
+ .type = MAINTENANCE_TYPE_MANUAL | MAINTENANCE_TYPE_SCHEDULED,
+ .schedule = SCHEDULE_DAILY,
+ },
},
};
+
static const struct maintenance_strategy incremental_strategy = {
.tasks = {
- [TASK_COMMIT_GRAPH].enabled = 1,
- [TASK_COMMIT_GRAPH].schedule = SCHEDULE_HOURLY,
- [TASK_PREFETCH].enabled = 1,
- [TASK_PREFETCH].schedule = SCHEDULE_HOURLY,
- [TASK_INCREMENTAL_REPACK].enabled = 1,
- [TASK_INCREMENTAL_REPACK].schedule = SCHEDULE_DAILY,
- [TASK_LOOSE_OBJECTS].enabled = 1,
- [TASK_LOOSE_OBJECTS].schedule = SCHEDULE_DAILY,
- [TASK_PACK_REFS].enabled = 1,
- [TASK_PACK_REFS].schedule = SCHEDULE_WEEKLY,
+ [TASK_COMMIT_GRAPH] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED,
+ .schedule = SCHEDULE_HOURLY,
+ },
+ [TASK_PREFETCH] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED,
+ .schedule = SCHEDULE_HOURLY,
+ },
+ [TASK_INCREMENTAL_REPACK] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED,
+ .schedule = SCHEDULE_DAILY,
+ },
+ [TASK_LOOSE_OBJECTS] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED,
+ .schedule = SCHEDULE_DAILY,
+ },
+ [TASK_PACK_REFS] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED,
+ .schedule = SCHEDULE_WEEKLY,
+ },
+ /*
+ * Historically, the "incremental" strategy was only available
+ * in the context of scheduled maintenance when set up via
+ * "maintenance.strategy". We have later expanded that config
+ * to also cover manual maintenance.
+ *
+ * To retain backwards compatibility with the previous status
+ * quo we thus run git-gc(1) in case manual maintenance was
+ * requested. This is the same as the default strategy, which
+ * would have been in use beforehand.
+ */
+ [TASK_GC] = {
+ .type = MAINTENANCE_TYPE_MANUAL,
+ },
+ },
+};
+
+static const struct maintenance_strategy geometric_strategy = {
+ .tasks = {
+ [TASK_COMMIT_GRAPH] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED | MAINTENANCE_TYPE_MANUAL,
+ .schedule = SCHEDULE_HOURLY,
+ },
+ [TASK_GEOMETRIC_REPACK] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED | MAINTENANCE_TYPE_MANUAL,
+ .schedule = SCHEDULE_DAILY,
+ },
+ [TASK_PACK_REFS] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED | MAINTENANCE_TYPE_MANUAL,
+ .schedule = SCHEDULE_DAILY,
+ },
+ [TASK_RERERE_GC] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED | MAINTENANCE_TYPE_MANUAL,
+ .schedule = SCHEDULE_WEEKLY,
+ },
+ [TASK_REFLOG_EXPIRE] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED | MAINTENANCE_TYPE_MANUAL,
+ .schedule = SCHEDULE_WEEKLY,
+ },
+ [TASK_WORKTREE_PRUNE] = {
+ .type = MAINTENANCE_TYPE_SCHEDULED | MAINTENANCE_TYPE_MANUAL,
+ .schedule = SCHEDULE_WEEKLY,
+ },
},
};
+static struct maintenance_strategy parse_maintenance_strategy(const char *name)
+{
+ if (!strcasecmp(name, "incremental"))
+ return incremental_strategy;
+ if (!strcasecmp(name, "gc"))
+ return gc_strategy;
+ if (!strcasecmp(name, "geometric"))
+ return geometric_strategy;
+ die(_("unknown maintenance strategy: '%s'"), name);
+}
+
static void initialize_task_config(struct maintenance_run_opts *opts,
const struct string_list *selected_tasks)
{
struct strbuf config_name = STRBUF_INIT;
struct maintenance_strategy strategy;
+ enum maintenance_type type;
const char *config_str;
/*
@@ -1760,19 +1962,20 @@ static void initialize_task_config(struct maintenance_run_opts *opts,
* - Unscheduled maintenance uses our default strategy.
*
* Both of these are affected by the gitconfig though, which may
- * override specific aspects of our strategy.
+ * override specific aspects of our strategy. Furthermore, both
+ * strategies can be overridden by setting "maintenance.strategy".
*/
if (opts->schedule) {
strategy = none_strategy;
-
- if (!repo_config_get_string_tmp(the_repository, "maintenance.strategy", &config_str)) {
- if (!strcasecmp(config_str, "incremental"))
- strategy = incremental_strategy;
- }
+ type = MAINTENANCE_TYPE_SCHEDULED;
} else {
- strategy = default_strategy;
+ strategy = gc_strategy;
+ type = MAINTENANCE_TYPE_MANUAL;
}
+ if (!repo_config_get_string_tmp(the_repository, "maintenance.strategy", &config_str))
+ strategy = parse_maintenance_strategy(config_str);
+
for (size_t i = 0; i < TASK__COUNT; i++) {
int config_value;
@@ -1780,8 +1983,8 @@ static void initialize_task_config(struct maintenance_run_opts *opts,
strbuf_addf(&config_name, "maintenance.%s.enabled",
tasks[i].name);
if (!repo_config_get_bool(the_repository, config_name.buf, &config_value))
- strategy.tasks[i].enabled = config_value;
- if (!strategy.tasks[i].enabled)
+ strategy.tasks[i].type = config_value ? type : 0;
+ if (!(strategy.tasks[i].type & type))
continue;
if (opts->schedule) {
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index b5454e5df1..0e4e9f8068 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -1706,8 +1706,8 @@ static int want_object_in_pack_mtime(const struct object_id *oid,
uint32_t found_mtime)
{
int want;
+ struct packfile_list_entry *e;
struct odb_source *source;
- struct list_head *pos;
if (!exclude && local) {
/*
@@ -1748,12 +1748,11 @@ static int want_object_in_pack_mtime(const struct object_id *oid,
}
}
- list_for_each(pos, packfile_store_get_packs_mru(the_repository->objects->packfiles)) {
- struct packed_git *p = list_entry(pos, struct packed_git, mru);
+ for (e = the_repository->objects->packfiles->packs.head; e; e = e->next) {
+ struct packed_git *p = e->pack;
want = want_object_in_pack_one(p, oid, exclude, found_pack, found_offset, found_mtime);
if (!exclude && want > 0)
- list_move(&p->mru,
- packfile_store_get_packs_mru(the_repository->objects->packfiles));
+ packfile_list_prepend(&the_repository->objects->packfiles->packs, p);
if (want != -1)
return want;
}
@@ -4389,27 +4388,27 @@ static void add_unreachable_loose_objects(struct rev_info *revs)
static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
{
- struct packfile_store *packs = the_repository->objects->packfiles;
- static struct packed_git *last_found = (void *)1;
+ static struct packed_git *last_found = NULL;
struct packed_git *p;
- p = (last_found != (void *)1) ? last_found :
- packfile_store_get_packs(packs);
+ if (last_found && find_pack_entry_one(oid, last_found))
+ return 1;
- while (p) {
- if ((!p->pack_local || p->pack_keep ||
- p->pack_keep_in_core) &&
- find_pack_entry_one(oid, p)) {
+ repo_for_each_pack(the_repository, p) {
+ /*
+ * We have already checked `last_found`, so there is no need to
+ * re-check here.
+ */
+ if (p == last_found)
+ continue;
+
+ if ((!p->pack_local || p->pack_keep || p->pack_keep_in_core) &&
+ find_pack_entry_one(oid, p)) {
last_found = p;
return 1;
}
- if (p == last_found)
- p = packfile_store_get_packs(packs);
- else
- p = p->next;
- if (p == last_found)
- p = p->next;
}
+
return 0;
}
diff --git a/builtin/repo.c b/builtin/repo.c
index bbb0966f2d..9d4749f79b 100644
--- a/builtin/repo.c
+++ b/builtin/repo.c
@@ -3,19 +3,27 @@
#include "builtin.h"
#include "environment.h"
#include "parse-options.h"
+#include "path-walk.h"
+#include "progress.h"
#include "quote.h"
+#include "ref-filter.h"
#include "refs.h"
+#include "revision.h"
#include "strbuf.h"
+#include "string-list.h"
#include "shallow.h"
+#include "utf8.h"
static const char *const repo_usage[] = {
"git repo info [--format=(keyvalue|nul)] [-z] [<key>...]",
+ "git repo structure [--format=(table|keyvalue|nul)]",
NULL
};
typedef int get_value_fn(struct repository *repo, struct strbuf *buf);
enum output_format {
+ FORMAT_TABLE,
FORMAT_KEYVALUE,
FORMAT_NUL_TERMINATED,
};
@@ -130,14 +138,16 @@ static int parse_format_cb(const struct option *opt,
*format = FORMAT_NUL_TERMINATED;
else if (!strcmp(arg, "keyvalue"))
*format = FORMAT_KEYVALUE;
+ else if (!strcmp(arg, "table"))
+ *format = FORMAT_TABLE;
else
die(_("invalid format '%s'"), arg);
return 0;
}
-static int repo_info(int argc, const char **argv, const char *prefix,
- struct repository *repo)
+static int cmd_repo_info(int argc, const char **argv, const char *prefix,
+ struct repository *repo)
{
enum output_format format = FORMAT_KEYVALUE;
struct option options[] = {
@@ -152,16 +162,380 @@ static int repo_info(int argc, const char **argv, const char *prefix,
};
argc = parse_options(argc, argv, prefix, options, repo_usage, 0);
+ if (format != FORMAT_KEYVALUE && format != FORMAT_NUL_TERMINATED)
+ die(_("unsupported output format"));
return print_fields(argc, argv, repo, format);
}
+struct ref_stats {
+ size_t branches;
+ size_t remotes;
+ size_t tags;
+ size_t others;
+};
+
+struct object_stats {
+ size_t tags;
+ size_t commits;
+ size_t trees;
+ size_t blobs;
+};
+
+struct repo_structure {
+ struct ref_stats refs;
+ struct object_stats objects;
+};
+
+struct stats_table {
+ struct string_list rows;
+
+ int name_col_width;
+ int value_col_width;
+};
+
+/*
+ * Holds column data that gets stored for each row.
+ */
+struct stats_table_entry {
+ char *value;
+};
+
+static void stats_table_vaddf(struct stats_table *table,
+ struct stats_table_entry *entry,
+ const char *format, va_list ap)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct string_list_item *item;
+ char *formatted_name;
+ int name_width;
+
+ strbuf_vaddf(&buf, format, ap);
+ formatted_name = strbuf_detach(&buf, NULL);
+ name_width = utf8_strwidth(formatted_name);
+
+ item = string_list_append_nodup(&table->rows, formatted_name);
+ item->util = entry;
+
+ if (name_width > table->name_col_width)
+ table->name_col_width = name_width;
+ if (entry) {
+ int value_width = utf8_strwidth(entry->value);
+ if (value_width > table->value_col_width)
+ table->value_col_width = value_width;
+ }
+}
+
+static void stats_table_addf(struct stats_table *table, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ stats_table_vaddf(table, NULL, format, ap);
+ va_end(ap);
+}
+
+static void stats_table_count_addf(struct stats_table *table, size_t value,
+ const char *format, ...)
+{
+ struct stats_table_entry *entry;
+ va_list ap;
+
+ CALLOC_ARRAY(entry, 1);
+ entry->value = xstrfmt("%" PRIuMAX, (uintmax_t)value);
+
+ va_start(ap, format);
+ stats_table_vaddf(table, entry, format, ap);
+ va_end(ap);
+}
+
+static inline size_t get_total_reference_count(struct ref_stats *stats)
+{
+ return stats->branches + stats->remotes + stats->tags + stats->others;
+}
+
+static inline size_t get_total_object_count(struct object_stats *stats)
+{
+ return stats->tags + stats->commits + stats->trees + stats->blobs;
+}
+
+static void stats_table_setup_structure(struct stats_table *table,
+ struct repo_structure *stats)
+{
+ struct object_stats *objects = &stats->objects;
+ struct ref_stats *refs = &stats->refs;
+ size_t object_total;
+ size_t ref_total;
+
+ ref_total = get_total_reference_count(refs);
+ stats_table_addf(table, "* %s", _("References"));
+ stats_table_count_addf(table, ref_total, " * %s", _("Count"));
+ stats_table_count_addf(table, refs->branches, " * %s", _("Branches"));
+ stats_table_count_addf(table, refs->tags, " * %s", _("Tags"));
+ stats_table_count_addf(table, refs->remotes, " * %s", _("Remotes"));
+ stats_table_count_addf(table, refs->others, " * %s", _("Others"));
+
+ object_total = get_total_object_count(objects);
+ stats_table_addf(table, "");
+ stats_table_addf(table, "* %s", _("Reachable objects"));
+ stats_table_count_addf(table, object_total, " * %s", _("Count"));
+ stats_table_count_addf(table, objects->commits, " * %s", _("Commits"));
+ stats_table_count_addf(table, objects->trees, " * %s", _("Trees"));
+ stats_table_count_addf(table, objects->blobs, " * %s", _("Blobs"));
+ stats_table_count_addf(table, objects->tags, " * %s", _("Tags"));
+}
+
+static void stats_table_print_structure(const struct stats_table *table)
+{
+ const char *name_col_title = _("Repository structure");
+ const char *value_col_title = _("Value");
+ int name_col_width = utf8_strwidth(name_col_title);
+ int value_col_width = utf8_strwidth(value_col_title);
+ struct string_list_item *item;
+
+ if (table->name_col_width > name_col_width)
+ name_col_width = table->name_col_width;
+ if (table->value_col_width > value_col_width)
+ value_col_width = table->value_col_width;
+
+ printf("| %-*s | %-*s |\n", name_col_width, name_col_title,
+ value_col_width, value_col_title);
+ printf("| ");
+ for (int i = 0; i < name_col_width; i++)
+ putchar('-');
+ printf(" | ");
+ for (int i = 0; i < value_col_width; i++)
+ putchar('-');
+ printf(" |\n");
+
+ for_each_string_list_item(item, &table->rows) {
+ struct stats_table_entry *entry = item->util;
+ const char *value = "";
+
+ if (entry) {
+ struct stats_table_entry *entry = item->util;
+ value = entry->value;
+ }
+
+ printf("| %-*s | %*s |\n", name_col_width, item->string,
+ value_col_width, value);
+ }
+}
+
+static void stats_table_clear(struct stats_table *table)
+{
+ struct stats_table_entry *entry;
+ struct string_list_item *item;
+
+ for_each_string_list_item(item, &table->rows) {
+ entry = item->util;
+ if (entry)
+ free(entry->value);
+ }
+
+ string_list_clear(&table->rows, 1);
+}
+
+static void structure_keyvalue_print(struct repo_structure *stats,
+ char key_delim, char value_delim)
+{
+ printf("references.branches.count%c%" PRIuMAX "%c", key_delim,
+ (uintmax_t)stats->refs.branches, value_delim);
+ printf("references.tags.count%c%" PRIuMAX "%c", key_delim,
+ (uintmax_t)stats->refs.tags, value_delim);
+ printf("references.remotes.count%c%" PRIuMAX "%c", key_delim,
+ (uintmax_t)stats->refs.remotes, value_delim);
+ printf("references.others.count%c%" PRIuMAX "%c", key_delim,
+ (uintmax_t)stats->refs.others, value_delim);
+
+ printf("objects.commits.count%c%" PRIuMAX "%c", key_delim,
+ (uintmax_t)stats->objects.commits, value_delim);
+ printf("objects.trees.count%c%" PRIuMAX "%c", key_delim,
+ (uintmax_t)stats->objects.trees, value_delim);
+ printf("objects.blobs.count%c%" PRIuMAX "%c", key_delim,
+ (uintmax_t)stats->objects.blobs, value_delim);
+ printf("objects.tags.count%c%" PRIuMAX "%c", key_delim,
+ (uintmax_t)stats->objects.tags, value_delim);
+
+ fflush(stdout);
+}
+
+struct count_references_data {
+ struct ref_stats *stats;
+ struct rev_info *revs;
+ struct progress *progress;
+};
+
+static int count_references(const char *refname,
+ const char *referent UNUSED,
+ const struct object_id *oid,
+ int flags UNUSED, void *cb_data)
+{
+ struct count_references_data *data = cb_data;
+ struct ref_stats *stats = data->stats;
+ size_t ref_count;
+
+ switch (ref_kind_from_refname(refname)) {
+ case FILTER_REFS_BRANCHES:
+ stats->branches++;
+ break;
+ case FILTER_REFS_REMOTES:
+ stats->remotes++;
+ break;
+ case FILTER_REFS_TAGS:
+ stats->tags++;
+ break;
+ case FILTER_REFS_OTHERS:
+ stats->others++;
+ break;
+ default:
+ BUG("unexpected reference type");
+ }
+
+ /*
+ * While iterating through references for counting, also add OIDs in
+ * preparation for the path walk.
+ */
+ add_pending_oid(data->revs, NULL, oid, 0);
+
+ ref_count = get_total_reference_count(stats);
+ display_progress(data->progress, ref_count);
+
+ return 0;
+}
+
+static void structure_count_references(struct ref_stats *stats,
+ struct rev_info *revs,
+ struct repository *repo,
+ int show_progress)
+{
+ struct count_references_data data = {
+ .stats = stats,
+ .revs = revs,
+ };
+
+ if (show_progress)
+ data.progress = start_delayed_progress(repo,
+ _("Counting references"), 0);
+
+ refs_for_each_ref(get_main_ref_store(repo), count_references, &data);
+ stop_progress(&data.progress);
+}
+
+struct count_objects_data {
+ struct object_stats *stats;
+ struct progress *progress;
+};
+
+static int count_objects(const char *path UNUSED, struct oid_array *oids,
+ enum object_type type, void *cb_data)
+{
+ struct count_objects_data *data = cb_data;
+ struct object_stats *stats = data->stats;
+ size_t object_count;
+
+ switch (type) {
+ case OBJ_TAG:
+ stats->tags += oids->nr;
+ break;
+ case OBJ_COMMIT:
+ stats->commits += oids->nr;
+ break;
+ case OBJ_TREE:
+ stats->trees += oids->nr;
+ break;
+ case OBJ_BLOB:
+ stats->blobs += oids->nr;
+ break;
+ default:
+ BUG("invalid object type");
+ }
+
+ object_count = get_total_object_count(stats);
+ display_progress(data->progress, object_count);
+
+ return 0;
+}
+
+static void structure_count_objects(struct object_stats *stats,
+ struct rev_info *revs,
+ struct repository *repo, int show_progress)
+{
+ struct path_walk_info info = PATH_WALK_INFO_INIT;
+ struct count_objects_data data = {
+ .stats = stats,
+ };
+
+ info.revs = revs;
+ info.path_fn = count_objects;
+ info.path_fn_data = &data;
+
+ if (show_progress)
+ data.progress = start_delayed_progress(repo, _("Counting objects"), 0);
+
+ walk_objects_by_path(&info);
+ path_walk_info_clear(&info);
+ stop_progress(&data.progress);
+}
+
+static int cmd_repo_structure(int argc, const char **argv, const char *prefix,
+ struct repository *repo)
+{
+ struct stats_table table = {
+ .rows = STRING_LIST_INIT_DUP,
+ };
+ enum output_format format = FORMAT_TABLE;
+ struct repo_structure stats = { 0 };
+ struct rev_info revs;
+ int show_progress = -1;
+ struct option options[] = {
+ OPT_CALLBACK_F(0, "format", &format, N_("format"),
+ N_("output format"),
+ PARSE_OPT_NONEG, parse_format_cb),
+ OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options, repo_usage, 0);
+ if (argc)
+ usage(_("too many arguments"));
+
+ repo_init_revisions(repo, &revs, prefix);
+
+ if (show_progress < 0)
+ show_progress = isatty(2);
+
+ structure_count_references(&stats.refs, &revs, repo, show_progress);
+ structure_count_objects(&stats.objects, &revs, repo, show_progress);
+
+ switch (format) {
+ case FORMAT_TABLE:
+ stats_table_setup_structure(&table, &stats);
+ stats_table_print_structure(&table);
+ break;
+ case FORMAT_KEYVALUE:
+ structure_keyvalue_print(&stats, '=', '\n');
+ break;
+ case FORMAT_NUL_TERMINATED:
+ structure_keyvalue_print(&stats, '\n', '\0');
+ break;
+ default:
+ BUG("invalid output format");
+ }
+
+ stats_table_clear(&table);
+ release_revisions(&revs);
+
+ return 0;
+}
+
int cmd_repo(int argc, const char **argv, const char *prefix,
struct repository *repo)
{
parse_opt_subcommand_fn *fn = NULL;
struct option options[] = {
- OPT_SUBCOMMAND("info", &fn, repo_info),
+ OPT_SUBCOMMAND("info", &fn, cmd_repo_info),
+ OPT_SUBCOMMAND("structure", &fn, cmd_repo_structure),
OPT_END()
};
diff --git a/commit-reach.c b/commit-reach.c
index a339e41aa4..cc18c86d3b 100644
--- a/commit-reach.c
+++ b/commit-reach.c
@@ -60,6 +60,7 @@ static int paint_down_to_common(struct repository *r,
struct prio_queue queue = { compare_commits_by_gen_then_commit_date };
int i;
timestamp_t last_gen = GENERATION_NUMBER_INFINITY;
+ struct commit_list **tail = result;
if (!min_generation && !corrected_commit_dates_enabled(r))
queue.compare = compare_commits_by_commit_date;
@@ -95,7 +96,7 @@ static int paint_down_to_common(struct repository *r,
if (flags == (PARENT1 | PARENT2)) {
if (!(commit->object.flags & RESULT)) {
commit->object.flags |= RESULT;
- commit_list_insert_by_date(commit, result);
+ tail = commit_list_append(commit, tail);
}
/* Mark parents of a found merge stale */
flags |= STALE;
@@ -128,6 +129,7 @@ static int paint_down_to_common(struct repository *r,
}
clear_prio_queue(&queue);
+ commit_list_sort_by_date(result);
return 0;
}
@@ -136,7 +138,7 @@ static int merge_bases_many(struct repository *r,
struct commit **twos,
struct commit_list **result)
{
- struct commit_list *list = NULL;
+ struct commit_list *list = NULL, **tail = result;
int i;
for (i = 0; i < n; i++) {
@@ -171,8 +173,9 @@ static int merge_bases_many(struct repository *r,
while (list) {
struct commit *commit = pop_commit(&list);
if (!(commit->object.flags & STALE))
- commit_list_insert_by_date(commit, result);
+ tail = commit_list_append(commit, tail);
}
+ commit_list_sort_by_date(result);
return 0;
}
@@ -425,7 +428,7 @@ static int get_merge_bases_many_0(struct repository *r,
int cleanup,
struct commit_list **result)
{
- struct commit_list *list;
+ struct commit_list *list, **tail = result;
struct commit **rslt;
size_t cnt, i;
int ret;
@@ -461,7 +464,8 @@ static int get_merge_bases_many_0(struct repository *r,
return -1;
}
for (i = 0; i < cnt; i++)
- commit_list_insert_by_date(rslt[i], result);
+ tail = commit_list_append(rslt[i], tail);
+ commit_list_sort_by_date(result);
free(rslt);
return 0;
}
diff --git a/config.c b/config.c
index 71b136bf7f..f1def0dcfb 100644
--- a/config.c
+++ b/config.c
@@ -1278,7 +1278,7 @@ int git_config_string(char **dest, const char *var, const char *value)
int git_config_pathname(char **dest, const char *var, const char *value)
{
- int is_optional;
+ bool is_optional;
char *path;
if (!value)
diff --git a/contrib/credential/libsecret/Makefile b/contrib/credential/libsecret/Makefile
index 7cacc57681..9309cfb78c 100644
--- a/contrib/credential/libsecret/Makefile
+++ b/contrib/credential/libsecret/Makefile
@@ -10,6 +10,7 @@ gitexecdir ?= $(prefix)/libexec/git-core
CC ?= gcc
CFLAGS ?= -g -O2 -Wall
PKG_CONFIG ?= pkg-config
+INSTALL ?= install
RM ?= rm -f
INCS:=$(shell $(PKG_CONFIG) --cflags libsecret-1 glib-2.0)
@@ -21,7 +22,11 @@ LIBS:=$(shell $(PKG_CONFIG) --libs libsecret-1 glib-2.0)
git-credential-libsecret: git-credential-libsecret.o
$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) $(LIBS)
+install: git-credential-libsecret
+ $(INSTALL) -d -m 755 $(DESTDIR)$(gitexecdir)
+ $(INSTALL) -m 755 $< $(DESTDIR)$(gitexecdir)
+
clean:
$(RM) git-credential-libsecret git-credential-libsecret.o
-.PHONY: all clean
+.PHONY: all install clean
diff --git a/contrib/credential/osxkeychain/Makefile b/contrib/credential/osxkeychain/Makefile
index c7d9121022..9680717abe 100644
--- a/contrib/credential/osxkeychain/Makefile
+++ b/contrib/credential/osxkeychain/Makefile
@@ -9,6 +9,7 @@ gitexecdir ?= $(prefix)/libexec/git-core
CC ?= gcc
CFLAGS ?= -g -O2 -Wall
+INSTALL ?= install
RM ?= rm -f
%.o: %.c
@@ -18,7 +19,11 @@ git-credential-osxkeychain: git-credential-osxkeychain.o
$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) \
-framework Security -framework CoreFoundation
+install: git-credential-osxkeychain
+ $(INSTALL) -d -m 755 $(DESTDIR)$(gitexecdir)
+ $(INSTALL) -m 755 $< $(DESTDIR)$(gitexecdir)
+
clean:
$(RM) git-credential-osxkeychain git-credential-osxkeychain.o
-.PHONY: all clean
+.PHONY: all install clean
diff --git a/diff.c b/diff.c
index 22415aecee..a1961526c0 100644
--- a/diff.c
+++ b/diff.c
@@ -1351,7 +1351,7 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
int len = eds->len;
unsigned flags = eds->flags;
- if (o->dry_run)
+ if (!o->file)
return;
switch (s) {
@@ -3765,9 +3765,9 @@ static void builtin_diff(const char *name_a,
if (o->word_diff)
init_diff_words_data(&ecbdata, o, one, two);
- if (o->dry_run) {
+ if (!o->file) {
/*
- * Unlike the !dry_run case, we need to ignore the
+ * Unlike the normal output case, we need to ignore the
* return value from xdi_diff_outf() here, because
* xdi_diff_outf() takes non-zero return from its
* callback function as a sign of error and returns
@@ -4423,7 +4423,6 @@ static void run_external_diff(const struct external_diff *pgm,
{
struct child_process cmd = CHILD_PROCESS_INIT;
struct diff_queue_struct *q = &diff_queued_diff;
- int quiet = !(o->output_format & DIFF_FORMAT_PATCH) || o->dry_run;
int rc;
/*
@@ -4432,7 +4431,7 @@ static void run_external_diff(const struct external_diff *pgm,
* external diff program lacks the ability to tell us whether
* it's empty then we consider it non-empty without even asking.
*/
- if (!pgm->trust_exit_code && quiet) {
+ if (!pgm->trust_exit_code && !o->file) {
o->found_changes = 1;
return;
}
@@ -4457,7 +4456,10 @@ static void run_external_diff(const struct external_diff *pgm,
diff_free_filespec_data(one);
diff_free_filespec_data(two);
cmd.use_shell = 1;
- cmd.no_stdout = quiet;
+ if (!o->file)
+ cmd.no_stdout = 1;
+ else if (o->file != stdout)
+ cmd.out = xdup(fileno(o->file));
rc = run_command(&cmd);
if (!pgm->trust_exit_code && rc == 0)
o->found_changes = 1;
@@ -4618,7 +4620,7 @@ static void run_diff_cmd(const struct external_diff *pgm,
p->status == DIFF_STATUS_RENAMED)
o->found_changes = 1;
} else {
- if (!o->dry_run)
+ if (o->file)
fprintf(o->file, "* Unmerged path %s\n", name);
o->found_changes = 1;
}
@@ -6196,15 +6198,15 @@ static void diff_flush_patch(struct diff_filepair *p, struct diff_options *o)
/* return 1 if any change is found; otherwise, return 0 */
static int diff_flush_patch_quietly(struct diff_filepair *p, struct diff_options *o)
{
- int saved_dry_run = o->dry_run;
+ FILE *saved_file = o->file;
int saved_found_changes = o->found_changes;
int ret;
- o->dry_run = 1;
+ o->file = NULL;
o->found_changes = 0;
diff_flush_patch(p, o);
ret = o->found_changes;
- o->dry_run = saved_dry_run;
+ o->file = saved_file;
o->found_changes |= saved_found_changes;
return ret;
}
@@ -6832,38 +6834,18 @@ void diff_flush(struct diff_options *options)
DIFF_FORMAT_NAME |
DIFF_FORMAT_NAME_STATUS |
DIFF_FORMAT_CHECKDIFF)) {
- /*
- * make sure diff_Flush_patch_quietly() to be silent.
- */
- FILE *dev_null = NULL;
- int saved_color_moved = options->color_moved;
-
- if (options->flags.diff_from_contents) {
- dev_null = xfopen("/dev/null", "w");
- options->color_moved = 0;
- }
for (i = 0; i < q->nr; i++) {
struct diff_filepair *p = q->queue[i];
if (!check_pair_status(p))
continue;
- if (options->flags.diff_from_contents) {
- FILE *saved_file = options->file;
- int found_changes;
+ if (options->flags.diff_from_contents &&
+ !diff_flush_patch_quietly(p, options))
+ continue;
- options->file = dev_null;
- found_changes = diff_flush_patch_quietly(p, options);
- options->file = saved_file;
- if (!found_changes)
- continue;
- }
flush_one_pair(p, options);
}
- if (options->flags.diff_from_contents) {
- fclose(dev_null);
- options->color_moved = saved_color_moved;
- }
separator++;
}
@@ -6914,15 +6896,6 @@ void diff_flush(struct diff_options *options)
if (output_format & DIFF_FORMAT_NO_OUTPUT &&
options->flags.exit_with_status &&
options->flags.diff_from_contents) {
- /*
- * run diff_flush_patch for the exit status. setting
- * options->file to /dev/null should be safe, because we
- * aren't supposed to produce any output anyway.
- */
- diff_free_file(options);
- options->file = xfopen("/dev/null", "w");
- options->close_file = 1;
- options->color_moved = 0;
for (i = 0; i < q->nr; i++) {
struct diff_filepair *p = q->queue[i];
if (check_pair_status(p))
diff --git a/diff.h b/diff.h
index 2fa256c3ef..31eedd5c0c 100644
--- a/diff.h
+++ b/diff.h
@@ -408,8 +408,6 @@ struct diff_options {
#define COLOR_MOVED_WS_ERROR (1<<0)
unsigned color_moved_ws_handling;
- bool dry_run;
-
struct repository *repo;
struct strmap *additional_path_headers;
diff --git a/dir.c b/dir.c
index f683f8ba49..b00821f294 100644
--- a/dir.c
+++ b/dir.c
@@ -1388,18 +1388,25 @@ int match_pathname(const char *pathname, int pathlen,
if (fspathncmp(pattern, name, prefix))
return 0;
- pattern += prefix;
- patternlen -= prefix;
- name += prefix;
- namelen -= prefix;
/*
* If the whole pattern did not have a wildcard,
* then our prefix match is all we need; we
* do not need to call fnmatch at all.
*/
- if (!patternlen && !namelen)
+ if (patternlen == prefix && namelen == prefix)
return 1;
+
+ /*
+ * Retain one character of the prefix to
+ * pass to fnmatch, which lets it distinguish
+ * the start of a directory component correctly.
+ */
+ prefix--;
+ pattern += prefix;
+ patternlen -= prefix;
+ name += prefix;
+ namelen -= prefix;
}
return fnmatch_icase_mem(pattern, patternlen,
diff --git a/gpg-interface.c b/gpg-interface.c
index d1e88da8c1..f680ed38c0 100644
--- a/gpg-interface.c
+++ b/gpg-interface.c
@@ -443,7 +443,7 @@ static void parse_ssh_output(struct signature_check *sigc)
key = strstr(line, "key ");
if (key) {
- sigc->fingerprint = xstrdup(strstr(line, "key ") + 4);
+ sigc->fingerprint = xstrdup(key + 4);
sigc->key = xstrdup(sigc->fingerprint);
} else {
/*
@@ -879,7 +879,7 @@ static char *get_default_ssh_signing_key(void)
n = split_cmdline(key_command, &argv);
if (n < 0)
- die("malformed build-time gpg.ssh.defaultKeyCommand: %s",
+ die(_("malformed build-time gpg.ssh.defaultKeyCommand: %s"),
split_cmdline_strerror(n));
strvec_pushv(&ssh_default_key.args, argv);
diff --git a/gpg-interface.h b/gpg-interface.h
index 50487aa148..ead1ed6967 100644
--- a/gpg-interface.h
+++ b/gpg-interface.h
@@ -3,9 +3,9 @@
struct strbuf;
-#define GPG_VERIFY_VERBOSE 1
-#define GPG_VERIFY_RAW 2
-#define GPG_VERIFY_OMIT_STATUS 4
+#define GPG_VERIFY_VERBOSE (1<<0)
+#define GPG_VERIFY_RAW (1<<1)
+#define GPG_VERIFY_OMIT_STATUS (1<<2)
enum signature_trust_level {
TRUST_UNDEFINED,
diff --git a/http-push.c b/http-push.c
index a1c01e3b9b..d86ce77119 100644
--- a/http-push.c
+++ b/http-push.c
@@ -104,7 +104,7 @@ struct repo {
int has_info_refs;
int can_update_info_refs;
int has_info_packs;
- struct packed_git *packs;
+ struct packfile_list packs;
struct remote_lock *locks;
};
@@ -311,7 +311,7 @@ static void start_fetch_packed(struct transfer_request *request)
struct transfer_request *check_request = request_queue_head;
struct http_pack_request *preq;
- target = find_oid_pack(&request->obj->oid, repo->packs);
+ target = packfile_list_find_oid(repo->packs.head, &request->obj->oid);
if (!target) {
fprintf(stderr, "Unable to fetch %s, will not be able to update server info refs\n", oid_to_hex(&request->obj->oid));
repo->can_update_info_refs = 0;
@@ -683,7 +683,7 @@ static int add_send_request(struct object *obj, struct remote_lock *lock)
get_remote_object_list(obj->oid.hash[0]);
if (obj->flags & (REMOTE | PUSHING))
return 0;
- target = find_oid_pack(&obj->oid, repo->packs);
+ target = packfile_list_find_oid(repo->packs.head, &obj->oid);
if (target) {
obj->flags |= REMOTE;
return 0;
diff --git a/http-walker.c b/http-walker.c
index 0f7ae46d7f..e886e64866 100644
--- a/http-walker.c
+++ b/http-walker.c
@@ -15,7 +15,7 @@
struct alt_base {
char *base;
int got_indices;
- struct packed_git *packs;
+ struct packfile_list packs;
struct alt_base *next;
};
@@ -324,11 +324,8 @@ static void process_alternates_response(void *callback_data)
} else if (is_alternate_allowed(target.buf)) {
warning("adding alternate object store: %s",
target.buf);
- newalt = xmalloc(sizeof(*newalt));
- newalt->next = NULL;
+ CALLOC_ARRAY(newalt, 1);
newalt->base = strbuf_detach(&target, NULL);
- newalt->got_indices = 0;
- newalt->packs = NULL;
while (tail->next != NULL)
tail = tail->next;
@@ -435,7 +432,7 @@ static int http_fetch_pack(struct walker *walker, struct alt_base *repo,
if (fetch_indices(walker, repo))
return -1;
- target = find_oid_pack(oid, repo->packs);
+ target = packfile_list_find_oid(repo->packs.head, oid);
if (!target)
return -1;
close_pack_index(target);
@@ -584,17 +581,15 @@ static void cleanup(struct walker *walker)
if (data) {
alt = data->alt;
while (alt) {
- struct packed_git *pack;
+ struct packfile_list_entry *e;
alt_next = alt->next;
- pack = alt->packs;
- while (pack) {
- struct packed_git *pack_next = pack->next;
- close_pack(pack);
- free(pack);
- pack = pack_next;
+ for (e = alt->packs.head; e; e = e->next) {
+ close_pack(e->pack);
+ free(e->pack);
}
+ packfile_list_clear(&alt->packs);
free(alt->base);
free(alt);
@@ -612,14 +607,11 @@ struct walker *get_http_walker(const char *url)
struct walker_data *data = xmalloc(sizeof(struct walker_data));
struct walker *walker = xmalloc(sizeof(struct walker));
- data->alt = xmalloc(sizeof(*data->alt));
+ CALLOC_ARRAY(data->alt, 1);
data->alt->base = xstrdup(url);
for (s = data->alt->base + strlen(data->alt->base) - 1; *s == '/'; --s)
*s = 0;
- data->alt->got_indices = 0;
- data->alt->packs = NULL;
- data->alt->next = NULL;
data->got_alternates = -1;
walker->corrupt_object_found = 0;
diff --git a/http.c b/http.c
index 17130823f0..41f850db16 100644
--- a/http.c
+++ b/http.c
@@ -2413,8 +2413,9 @@ static char *fetch_pack_index(unsigned char *hash, const char *base_url)
return tmp;
}
-static int fetch_and_setup_pack_index(struct packed_git **packs_head,
- unsigned char *sha1, const char *base_url)
+static int fetch_and_setup_pack_index(struct packfile_list *packs,
+ unsigned char *sha1,
+ const char *base_url)
{
struct packed_git *new_pack, *p;
char *tmp_idx = NULL;
@@ -2448,12 +2449,11 @@ static int fetch_and_setup_pack_index(struct packed_git **packs_head,
if (ret)
return -1;
- new_pack->next = *packs_head;
- *packs_head = new_pack;
+ packfile_list_prepend(packs, new_pack);
return 0;
}
-int http_get_info_packs(const char *base_url, struct packed_git **packs_head)
+int http_get_info_packs(const char *base_url, struct packfile_list *packs)
{
struct http_get_options options = {0};
int ret = 0;
@@ -2477,7 +2477,7 @@ int http_get_info_packs(const char *base_url, struct packed_git **packs_head)
!parse_oid_hex(data, &oid, &data) &&
skip_prefix(data, ".pack", &data) &&
(*data == '\n' || *data == '\0')) {
- fetch_and_setup_pack_index(packs_head, oid.hash, base_url);
+ fetch_and_setup_pack_index(packs, oid.hash, base_url);
} else {
data = strchrnul(data, '\n');
}
@@ -2541,14 +2541,9 @@ cleanup:
}
void http_install_packfile(struct packed_git *p,
- struct packed_git **list_to_remove_from)
+ struct packfile_list *list_to_remove_from)
{
- struct packed_git **lst = list_to_remove_from;
-
- while (*lst != p)
- lst = &((*lst)->next);
- *lst = (*lst)->next;
-
+ packfile_list_remove(list_to_remove_from, p);
packfile_store_add_pack(the_repository->objects->packfiles, p);
}
diff --git a/http.h b/http.h
index 553e16205c..f9d4593404 100644
--- a/http.h
+++ b/http.h
@@ -2,6 +2,7 @@
#define HTTP_H
struct packed_git;
+struct packfile_list;
#include "git-zlib.h"
@@ -190,7 +191,7 @@ struct curl_slist *http_append_auth_header(const struct credential *c,
/* Helpers for fetching packs */
int http_get_info_packs(const char *base_url,
- struct packed_git **packs_head);
+ struct packfile_list *packs);
/* Helper for getting Accept-Language header */
const char *http_get_accept_language_header(void);
@@ -226,7 +227,7 @@ void release_http_pack_request(struct http_pack_request *preq);
* from http_get_info_packs() and have chosen a specific pack to fetch.
*/
void http_install_packfile(struct packed_git *p,
- struct packed_git **list_to_remove_from);
+ struct packfile_list *list_to_remove_from);
/* Helpers for fetching object */
struct http_object_request {
diff --git a/midx.c b/midx.c
index 1d6269f957..24e1e72175 100644
--- a/midx.c
+++ b/midx.c
@@ -462,8 +462,6 @@ int prepare_midx_pack(struct multi_pack_index *m,
m->pack_names[pack_int_id]);
p = packfile_store_load_pack(r->objects->packfiles,
pack_name.buf, m->source->local);
- if (p)
- list_add_tail(&p->mru, &r->objects->packfiles->mru);
strbuf_release(&pack_name);
if (!p) {
diff --git a/packfile.c b/packfile.c
index 1ae2b2fe1e..378b0b1920 100644
--- a/packfile.c
+++ b/packfile.c
@@ -47,6 +47,89 @@ static size_t pack_mapped;
#define SZ_FMT PRIuMAX
static inline uintmax_t sz_fmt(size_t s) { return s; }
+void packfile_list_clear(struct packfile_list *list)
+{
+ struct packfile_list_entry *e, *next;
+
+ for (e = list->head; e; e = next) {
+ next = e->next;
+ free(e);
+ }
+
+ list->head = list->tail = NULL;
+}
+
+static struct packfile_list_entry *packfile_list_remove_internal(struct packfile_list *list,
+ struct packed_git *pack)
+{
+ struct packfile_list_entry *e, *prev;
+
+ for (e = list->head, prev = NULL; e; prev = e, e = e->next) {
+ if (e->pack != pack)
+ continue;
+
+ if (prev)
+ prev->next = e->next;
+ if (list->head == e)
+ list->head = e->next;
+ if (list->tail == e)
+ list->tail = prev;
+
+ return e;
+ }
+
+ return NULL;
+}
+
+void packfile_list_remove(struct packfile_list *list, struct packed_git *pack)
+{
+ free(packfile_list_remove_internal(list, pack));
+}
+
+void packfile_list_prepend(struct packfile_list *list, struct packed_git *pack)
+{
+ struct packfile_list_entry *entry;
+
+ entry = packfile_list_remove_internal(list, pack);
+ if (!entry) {
+ entry = xmalloc(sizeof(*entry));
+ entry->pack = pack;
+ }
+ entry->next = list->head;
+
+ list->head = entry;
+ if (!list->tail)
+ list->tail = entry;
+}
+
+void packfile_list_append(struct packfile_list *list, struct packed_git *pack)
+{
+ struct packfile_list_entry *entry;
+
+ entry = packfile_list_remove_internal(list, pack);
+ if (!entry) {
+ entry = xmalloc(sizeof(*entry));
+ entry->pack = pack;
+ }
+ entry->next = NULL;
+
+ if (list->tail) {
+ list->tail->next = entry;
+ list->tail = entry;
+ } else {
+ list->head = list->tail = entry;
+ }
+}
+
+struct packed_git *packfile_list_find_oid(struct packfile_list_entry *packs,
+ const struct object_id *oid)
+{
+ for (; packs; packs = packs->next)
+ if (find_pack_entry_one(oid, packs->pack))
+ return packs->pack;
+ return NULL;
+}
+
void pack_report(struct repository *repo)
{
fprintf(stderr,
@@ -273,13 +356,14 @@ static void scan_windows(struct packed_git *p,
static int unuse_one_window(struct packed_git *current)
{
- struct packed_git *p, *lru_p = NULL;
+ struct packfile_list_entry *e;
+ struct packed_git *lru_p = NULL;
struct pack_window *lru_w = NULL, *lru_l = NULL;
if (current)
scan_windows(current, &lru_p, &lru_w, &lru_l);
- for (p = current->repo->objects->packfiles->packs; p; p = p->next)
- scan_windows(p, &lru_p, &lru_w, &lru_l);
+ for (e = current->repo->objects->packfiles->packs.head; e; e = e->next)
+ scan_windows(e->pack, &lru_p, &lru_w, &lru_l);
if (lru_p) {
munmap(lru_w->base, lru_w->len);
pack_mapped -= lru_w->len;
@@ -459,14 +543,15 @@ static void find_lru_pack(struct packed_git *p, struct packed_git **lru_p, struc
static int close_one_pack(struct repository *r)
{
- struct packed_git *p, *lru_p = NULL;
+ struct packfile_list_entry *e;
+ struct packed_git *lru_p = NULL;
struct pack_window *mru_w = NULL;
int accept_windows_inuse = 1;
- for (p = r->objects->packfiles->packs; p; p = p->next) {
- if (p->pack_fd == -1)
+ for (e = r->objects->packfiles->packs.head; e; e = e->next) {
+ if (e->pack->pack_fd == -1)
continue;
- find_lru_pack(p, &lru_p, &mru_w, &accept_windows_inuse);
+ find_lru_pack(e->pack, &lru_p, &mru_w, &accept_windows_inuse);
}
if (lru_p)
@@ -785,11 +870,8 @@ void packfile_store_add_pack(struct packfile_store *store,
if (pack->pack_fd != -1)
pack_open_fds++;
- pack->next = store->packs;
- store->packs = pack;
-
- hashmap_entry_init(&pack->packmap_ent, strhash(pack->pack_name));
- hashmap_add(&store->map, &pack->packmap_ent);
+ packfile_list_append(&store->packs, pack);
+ strmap_put(&store->packs_by_path, pack->pack_name, pack);
}
struct packed_git *packfile_store_load_pack(struct packfile_store *store,
@@ -806,8 +888,7 @@ struct packed_git *packfile_store_load_pack(struct packfile_store *store,
strbuf_strip_suffix(&key, ".idx");
strbuf_addstr(&key, ".pack");
- p = hashmap_get_entry_from_hash(&store->map, strhash(key.buf), key.buf,
- struct packed_git, packmap_ent);
+ p = strmap_get(&store->packs_by_path, key.buf);
if (!p) {
p = add_packed_git(store->odb->repo, idx_path,
strlen(idx_path), local);
@@ -965,9 +1046,10 @@ static void prepare_packed_git_one(struct odb_source *source)
string_list_clear(data.garbage, 0);
}
-DEFINE_LIST_SORT(static, sort_packs, struct packed_git, next);
+DEFINE_LIST_SORT(static, sort_packs, struct packfile_list_entry, next);
-static int sort_pack(const struct packed_git *a, const struct packed_git *b)
+static int sort_pack(const struct packfile_list_entry *a,
+ const struct packfile_list_entry *b)
{
int st;
@@ -977,7 +1059,7 @@ static int sort_pack(const struct packed_git *a, const struct packed_git *b)
* remote ones could be on a network mounted filesystem.
* Favor local ones for these reasons.
*/
- st = a->pack_local - b->pack_local;
+ st = a->pack->pack_local - b->pack->pack_local;
if (st)
return -st;
@@ -986,23 +1068,13 @@ static int sort_pack(const struct packed_git *a, const struct packed_git *b)
* and more recent objects tend to get accessed more
* often.
*/
- if (a->mtime < b->mtime)
+ if (a->pack->mtime < b->pack->mtime)
return 1;
- else if (a->mtime == b->mtime)
+ else if (a->pack->mtime == b->pack->mtime)
return 0;
return -1;
}
-static void packfile_store_prepare_mru(struct packfile_store *store)
-{
- struct packed_git *p;
-
- INIT_LIST_HEAD(&store->mru);
-
- for (p = store->packs; p; p = p->next)
- list_add_tail(&p->mru, &store->mru);
-}
-
void packfile_store_prepare(struct packfile_store *store)
{
struct odb_source *source;
@@ -1015,9 +1087,12 @@ void packfile_store_prepare(struct packfile_store *store)
prepare_multi_pack_index_one(source);
prepare_packed_git_one(source);
}
- sort_packs(&store->packs, sort_pack);
- packfile_store_prepare_mru(store);
+ sort_packs(&store->packs.head, sort_pack);
+ for (struct packfile_list_entry *e = store->packs.head; e; e = e->next)
+ if (!e->next)
+ store->packs.tail = e;
+
store->initialized = true;
}
@@ -1027,7 +1102,7 @@ void packfile_store_reprepare(struct packfile_store *store)
packfile_store_prepare(store);
}
-struct packed_git *packfile_store_get_packs(struct packfile_store *store)
+struct packfile_list_entry *packfile_store_get_packs(struct packfile_store *store)
{
packfile_store_prepare(store);
@@ -1039,13 +1114,7 @@ struct packed_git *packfile_store_get_packs(struct packfile_store *store)
prepare_midx_pack(m, i);
}
- return store->packs;
-}
-
-struct list_head *packfile_store_get_packs_mru(struct packfile_store *store)
-{
- packfile_store_prepare(store);
- return &store->mru;
+ return store->packs.head;
}
/*
@@ -1062,16 +1131,16 @@ unsigned long repo_approximate_object_count(struct repository *r)
unsigned long count = 0;
struct packed_git *p;
- packfile_store_prepare(r->objects->packfiles);
+ odb_prepare_alternates(r->objects);
for (source = r->objects->sources; source; source = source->next) {
struct multi_pack_index *m = get_multi_pack_index(source);
if (m)
- count += m->num_objects;
+ count += m->num_objects + m->num_objects_in_base;
}
- for (p = r->objects->packfiles->packs; p; p = p->next) {
- if (open_pack_index(p))
+ repo_for_each_pack(r, p) {
+ if (p->multi_pack_index || open_pack_index(p))
continue;
count += p->num_objects;
}
@@ -1195,11 +1264,11 @@ void mark_bad_packed_object(struct packed_git *p, const struct object_id *oid)
const struct packed_git *has_packed_and_bad(struct repository *r,
const struct object_id *oid)
{
- struct packed_git *p;
+ struct packfile_list_entry *e;
- for (p = r->objects->packfiles->packs; p; p = p->next)
- if (oidset_contains(&p->bad_objects, oid))
- return p;
+ for (e = r->objects->packfiles->packs.head; e; e = e->next)
+ if (oidset_contains(&e->pack->bad_objects, oid))
+ return e->pack;
return NULL;
}
@@ -2007,19 +2076,6 @@ int is_pack_valid(struct packed_git *p)
return !open_packed_git(p);
}
-struct packed_git *find_oid_pack(const struct object_id *oid,
- struct packed_git *packs)
-{
- struct packed_git *p;
-
- for (p = packs; p; p = p->next) {
- if (find_pack_entry_one(oid, p))
- return p;
- }
- return NULL;
-
-}
-
static int fill_pack_entry(const struct object_id *oid,
struct pack_entry *e,
struct packed_git *p)
@@ -2050,7 +2106,7 @@ static int fill_pack_entry(const struct object_id *oid,
int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e)
{
- struct list_head *pos;
+ struct packfile_list_entry *l;
packfile_store_prepare(r->objects->packfiles);
@@ -2058,13 +2114,15 @@ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pa
if (source->midx && fill_midx_entry(source->midx, oid, e))
return 1;
- if (!r->objects->packfiles->packs)
+ if (!r->objects->packfiles->packs.head)
return 0;
- list_for_each(pos, &r->objects->packfiles->mru) {
- struct packed_git *p = list_entry(pos, struct packed_git, mru);
+ for (l = r->objects->packfiles->packs.head; l; l = l->next) {
+ struct packed_git *p = l->pack;
+
if (!p->multi_pack_index && fill_pack_entry(oid, e, p)) {
- list_move(&p->mru, &r->objects->packfiles->mru);
+ if (!r->objects->packfiles->skip_mru_updates)
+ packfile_list_prepend(&r->objects->packfiles->packs, p);
return 1;
}
}
@@ -2196,6 +2254,7 @@ int for_each_packed_object(struct repository *repo, each_packed_object_fn cb,
int r = 0;
int pack_errors = 0;
+ repo->objects->packfiles->skip_mru_updates = true;
repo_for_each_pack(repo, p) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
@@ -2216,6 +2275,8 @@ int for_each_packed_object(struct repository *repo, each_packed_object_fn cb,
if (r)
break;
}
+ repo->objects->packfiles->skip_mru_updates = false;
+
return r ? r : pack_errors;
}
@@ -2311,45 +2372,30 @@ int parse_pack_header_option(const char *in, unsigned char *out, unsigned int *l
return 0;
}
-static int pack_map_entry_cmp(const void *cmp_data UNUSED,
- const struct hashmap_entry *entry,
- const struct hashmap_entry *entry2,
- const void *keydata)
-{
- const char *key = keydata;
- const struct packed_git *pg1, *pg2;
-
- pg1 = container_of(entry, const struct packed_git, packmap_ent);
- pg2 = container_of(entry2, const struct packed_git, packmap_ent);
-
- return strcmp(pg1->pack_name, key ? key : pg2->pack_name);
-}
-
struct packfile_store *packfile_store_new(struct object_database *odb)
{
struct packfile_store *store;
CALLOC_ARRAY(store, 1);
store->odb = odb;
- INIT_LIST_HEAD(&store->mru);
- hashmap_init(&store->map, pack_map_entry_cmp, NULL, 0);
+ strmap_init(&store->packs_by_path);
return store;
}
void packfile_store_free(struct packfile_store *store)
{
- for (struct packed_git *p = store->packs, *next; p; p = next) {
- next = p->next;
- free(p);
- }
- hashmap_clear(&store->map);
+ for (struct packfile_list_entry *e = store->packs.head; e; e = e->next)
+ free(e->pack);
+ packfile_list_clear(&store->packs);
+
+ strmap_clear(&store->packs_by_path, 0);
free(store);
}
void packfile_store_close(struct packfile_store *store)
{
- for (struct packed_git *p = store->packs; p; p = p->next) {
- if (p->do_not_close)
+ for (struct packfile_list_entry *e = store->packs.head; e; e = e->next) {
+ if (e->pack->do_not_close)
BUG("want to close pack marked 'do-not-close'");
- close_pack(p);
+ close_pack(e->pack);
}
}
diff --git a/packfile.h b/packfile.h
index c9d0b93446..27ba607e7c 100644
--- a/packfile.h
+++ b/packfile.h
@@ -5,14 +5,12 @@
#include "object.h"
#include "odb.h"
#include "oidset.h"
+#include "strmap.h"
/* in odb.h */
struct object_info;
struct packed_git {
- struct hashmap_entry packmap_ent;
- struct packed_git *next;
- struct list_head mru;
struct pack_window *windows;
off_t pack_size;
const void *index_data;
@@ -52,6 +50,28 @@ struct packed_git {
char pack_name[FLEX_ARRAY]; /* more */
};
+struct packfile_list {
+ struct packfile_list_entry *head, *tail;
+};
+
+struct packfile_list_entry {
+ struct packfile_list_entry *next;
+ struct packed_git *pack;
+};
+
+void packfile_list_clear(struct packfile_list *list);
+void packfile_list_remove(struct packfile_list *list, struct packed_git *pack);
+void packfile_list_prepend(struct packfile_list *list, struct packed_git *pack);
+void packfile_list_append(struct packfile_list *list, struct packed_git *pack);
+
+/*
+ * Find the pack within the "packs" list whose index contains the object
+ * "oid". For general object lookups, you probably don't want this; use
+ * find_pack_entry() instead.
+ */
+struct packed_git *packfile_list_find_oid(struct packfile_list_entry *packs,
+ const struct object_id *oid);
+
/*
* A store that manages packfiles for a given object database.
*/
@@ -59,10 +79,10 @@ struct packfile_store {
struct object_database *odb;
/*
- * The list of packfiles in the order in which they are being added to
- * the store.
+ * The list of packfiles in the order in which they have been most
+ * recently used.
*/
- struct packed_git *packs;
+ struct packfile_list packs;
/*
* Cache of packfiles which are marked as "kept", either because there
@@ -78,20 +98,32 @@ struct packfile_store {
unsigned flags;
} kept_cache;
- /* A most-recently-used ordered version of the packs list. */
- struct list_head mru;
-
/*
* A map of packfile names to packed_git structs for tracking which
* packs have been loaded already.
*/
- struct hashmap map;
+ struct strmap packs_by_path;
/*
* Whether packfiles have already been populated with this store's
* packs.
*/
bool initialized;
+
+ /*
+ * Usually, packfiles will be reordered to the front of the `packs`
+ * list whenever an object is looked up via them. This has the effect
+ * that packs that contain a lot of accessed objects will be located
+ * towards the front.
+ *
+ * This is usually desireable, but there are exceptions. One exception
+ * is when the looking up multiple objects in a loop for each packfile.
+ * In that case, we may easily end up with an infinite loop as the
+ * packfiles get reordered to the front repeatedly.
+ *
+ * Setting this field to `true` thus disables these reorderings.
+ */
+ bool skip_mru_updates;
};
/*
@@ -142,18 +174,14 @@ void packfile_store_add_pack(struct packfile_store *store,
* repository.
*/
#define repo_for_each_pack(repo, p) \
- for (p = packfile_store_get_packs(repo->objects->packfiles); p; p = p->next)
+ for (struct packfile_list_entry *e = packfile_store_get_packs(repo->objects->packfiles); \
+ ((p) = (e ? e->pack : NULL)); e = e->next)
/*
* Get all packs managed by the given store, including packfiles that are
* referenced by multi-pack indices.
*/
-struct packed_git *packfile_store_get_packs(struct packfile_store *store);
-
-/*
- * Get all packs in most-recently-used order.
- */
-struct list_head *packfile_store_get_packs_mru(struct packfile_store *store);
+struct packfile_list_entry *packfile_store_get_packs(struct packfile_store *store);
/*
* Open the packfile and add it to the store if it isn't yet known. Returns
@@ -245,14 +273,6 @@ extern void (*report_garbage)(unsigned seen_bits, const char *path);
*/
unsigned long repo_approximate_object_count(struct repository *r);
-/*
- * Find the pack within the "packs" list whose index contains the object "oid".
- * For general object lookups, you probably don't want this; use
- * find_pack_entry() instead.
- */
-struct packed_git *find_oid_pack(const struct object_id *oid,
- struct packed_git *packs);
-
void pack_report(struct repository *repo);
/*
diff --git a/parse-options.c b/parse-options.c
index 5933468c19..c9cafc21b9 100644
--- a/parse-options.c
+++ b/parse-options.c
@@ -208,12 +208,12 @@ static enum parse_opt_result do_get_value(struct parse_opt_ctx_t *p,
case OPTION_FILENAME:
{
const char *value;
- int is_optional;
+ bool is_optional;
if (unset)
value = NULL;
else if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
- value = (char *)opt->defval;
+ value = (const char *)opt->defval;
else {
int err = get_arg(p, opt, flags, &value);
if (err)
@@ -223,10 +223,8 @@ static enum parse_opt_result do_get_value(struct parse_opt_ctx_t *p,
return 0;
is_optional = skip_prefix(value, ":(optional)", &value);
- if (!value)
- is_optional = 0;
value = fix_filename(p->prefix, value);
- if (is_optional && is_empty_or_missing_file(value)) {
+ if (is_optional && is_missing_file(value)) {
free((char *)value);
} else {
FREE_AND_NULL(*(char **)opt->value);
diff --git a/ref-filter.c b/ref-filter.c
index 520d2539c9..30cc488d8a 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -2664,7 +2664,7 @@ static int match_name_as_path(const char **pattern, const char *refname,
/* Return 1 if the refname matches one of the patterns, otherwise 0. */
static int filter_pattern_match(struct ref_filter *filter, const char *refname)
{
- if (!*filter->name_patterns)
+ if (!filter->name_patterns || !*filter->name_patterns)
return 1; /* No pattern always matches */
if (filter->match_as_path)
return match_name_as_path(filter->name_patterns, refname,
@@ -2751,7 +2751,7 @@ static int for_each_fullref_in_pattern(struct ref_filter *filter,
return for_each_fullref_with_seek(filter, cb, cb_data, 0);
}
- if (!filter->name_patterns[0]) {
+ if (!filter->name_patterns || !filter->name_patterns[0]) {
/* no patterns; we have to look at everything */
return for_each_fullref_with_seek(filter, cb, cb_data, 0);
}
@@ -2833,7 +2833,7 @@ struct ref_array_item *ref_array_push(struct ref_array *array,
return ref;
}
-static int ref_kind_from_refname(const char *refname)
+int ref_kind_from_refname(const char *refname)
{
unsigned int i;
diff --git a/ref-filter.h b/ref-filter.h
index 81f2c229a9..235c60f79c 100644
--- a/ref-filter.h
+++ b/ref-filter.h
@@ -135,6 +135,8 @@ struct ref_format {
OPT_STRVEC(0, "exclude", &(var)->exclude, \
N_("pattern"), N_("exclude refs which match pattern"))
+/* Get the reference kind from the provided reference name. */
+int ref_kind_from_refname(const char *refname);
/*
* API for filtering a set of refs. Based on the type of refs the user
* has requested, we iterate through those refs and apply filters
diff --git a/refs/debug.c b/refs/debug.c
index 697adbd0dc..c59c1728a3 100644
--- a/refs/debug.c
+++ b/refs/debug.c
@@ -47,6 +47,14 @@ static int debug_create_on_disk(struct ref_store *refs, int flags, struct strbuf
return res;
}
+static int debug_remove_on_disk(struct ref_store *refs, struct strbuf *err)
+{
+ struct debug_ref_store *drefs = (struct debug_ref_store *)refs;
+ int res = drefs->refs->be->remove_on_disk(drefs->refs, err);
+ trace_printf_key(&trace_refs, "remove_on_disk: %d\n", res);
+ return res;
+}
+
static int debug_transaction_prepare(struct ref_store *refs,
struct ref_transaction *transaction,
struct strbuf *err)
@@ -432,6 +440,7 @@ struct ref_storage_be refs_be_debug = {
.init = NULL,
.release = debug_release,
.create_on_disk = debug_create_on_disk,
+ .remove_on_disk = debug_remove_on_disk,
/*
* None of these should be NULL. If the "files" backend (in
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 054cf42f4e..1adc4b5182 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -3124,7 +3124,7 @@ static int parse_and_write_reflog(struct files_ref_store *refs,
if (!(update->flags & REF_HAVE_OLD) ||
!(update->flags & REF_HAVE_NEW) ||
!(update->flags & REF_LOG_ONLY)) {
- strbuf_addf(err, _("trying to write reflog for '%s'"
+ strbuf_addf(err, _("trying to write reflog for '%s' "
"with incomplete values"), update->refname);
return REF_TRANSACTION_ERROR_GENERIC;
}
diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
index d4b7928620..eeec64798f 100644
--- a/refs/reftable-backend.c
+++ b/refs/reftable-backend.c
@@ -1103,7 +1103,7 @@ static enum ref_transaction_error prepare_single_update(struct reftable_ref_stor
if (!(u->flags & REF_HAVE_OLD) ||
!(u->flags & REF_HAVE_NEW) ||
!(u->flags & REF_LOG_ONLY)) {
- strbuf_addf(err, _("trying to write reflog for '%s'"
+ strbuf_addf(err, _("trying to write reflog for '%s' "
"with incomplete values"), u->refname);
return REF_TRANSACTION_ERROR_GENERIC;
}
diff --git a/t/helper/test-delete-gpgsig.c b/t/helper/test-delete-gpgsig.c
index e36831af03..658c7a37f7 100644
--- a/t/helper/test-delete-gpgsig.c
+++ b/t/helper/test-delete-gpgsig.c
@@ -23,8 +23,7 @@ int cmd__delete_gpgsig(int argc, const char **argv)
if (!strcmp(pattern, "trailer")) {
size_t payload_size = parse_signed_buffer(buf.buf, buf.len);
fwrite(buf.buf, 1, payload_size, stdout);
- fflush(stdout);
- return 0;
+ goto out;
}
bufptr = buf.buf;
@@ -56,7 +55,9 @@ int cmd__delete_gpgsig(int argc, const char **argv)
fwrite(bufptr, 1, (eol - bufptr) + 1, stdout);
bufptr = eol + 1;
}
- fflush(stdout);
+out:
+ fflush(stdout);
+ strbuf_release(&buf);
return 0;
}
diff --git a/t/lib-gpg.sh b/t/lib-gpg.sh
index b99ae39a06..97268ae07c 100644
--- a/t/lib-gpg.sh
+++ b/t/lib-gpg.sh
@@ -71,6 +71,7 @@ test_lazy_prereq GPG2 '
exit 1
;;
*)
+ prepare_gnupghome &&
(gpgconf --kill all || : ) &&
# NEEDSWORK: prepare_gnupghome() should definitely be
diff --git a/t/meson.build b/t/meson.build
index c9ddd89889..a5531df415 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -238,6 +238,7 @@ integration_tests = [
't1701-racy-split-index.sh',
't1800-hook.sh',
't1900-repo.sh',
+ 't1901-repo-structure.sh',
't2000-conflict-when-checking-files-out.sh',
't2002-checkout-cache-u.sh',
't2003-checkout-cache-mkdir.sh',
diff --git a/t/perf/p6010-merge-base.sh b/t/perf/p6010-merge-base.sh
new file mode 100755
index 0000000000..54f52fa23e
--- /dev/null
+++ b/t/perf/p6010-merge-base.sh
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+test_description='Test git merge-base'
+
+. ./perf-lib.sh
+
+test_perf_fresh_repo
+
+#
+# Creates lots of merges to make history traversal costly. In
+# particular it creates 2^($max_level-1)-1 2-way merges on top of
+# 2^($max_level-1) root commits. E.g., the commit history looks like
+# this for a $max_level of 3:
+#
+# _1_
+# / \
+# 2 3
+# / \ / \
+# 4 5 6 7
+#
+# The numbers are the fast-import marks, which also are the commit
+# messages. 1 is the HEAD commit and a merge, 2 and 3 are also merges,
+# 4-7 are the root commits.
+#
+build_history () {
+ local max_level="$1" &&
+ local level="${2:-1}" &&
+ local mark="${3:-1}" &&
+ if test $level -eq $max_level
+ then
+ echo "reset refs/heads/master" &&
+ echo "from $ZERO_OID" &&
+ echo "commit refs/heads/master" &&
+ echo "mark :$mark" &&
+ echo "committer C <c@example.com> 1234567890 +0000" &&
+ echo "data <<EOF" &&
+ echo "$mark" &&
+ echo "EOF"
+ else
+ local level1=$((level+1)) &&
+ local mark1=$((2*mark)) &&
+ local mark2=$((2*mark+1)) &&
+ build_history $max_level $level1 $mark1 &&
+ build_history $max_level $level1 $mark2 &&
+ echo "commit refs/heads/master" &&
+ echo "mark :$mark" &&
+ echo "committer C <c@example.com> 1234567890 +0000" &&
+ echo "data <<EOF" &&
+ echo "$mark" &&
+ echo "EOF" &&
+ echo "from :$mark1" &&
+ echo "merge :$mark2"
+ fi
+}
+
+#
+# Creates a new merge history in the same shape as build_history does,
+# while reusing the same root commits. This way the two top commits
+# have 2^($max_level-1) merge bases between them.
+#
+build_history2 () {
+ local max_level="$1" &&
+ local level="${2:-1}" &&
+ local mark="${3:-1}" &&
+ if test $level -lt $max_level
+ then
+ local level1=$((level+1)) &&
+ local mark1=$((2*mark)) &&
+ local mark2=$((2*mark+1)) &&
+ build_history2 $max_level $level1 $mark1 &&
+ build_history2 $max_level $level1 $mark2 &&
+ echo "commit refs/heads/master" &&
+ echo "mark :$mark" &&
+ echo "committer C <c@example.com> 1234567890 +0000" &&
+ echo "data <<EOF" &&
+ echo "$mark II" &&
+ echo "EOF" &&
+ echo "from :$mark1" &&
+ echo "merge :$mark2"
+ fi
+}
+
+test_expect_success 'setup' '
+ max_level=15 &&
+ build_history $max_level | git fast-import --export-marks=marks &&
+ git tag one &&
+ build_history2 $max_level | git fast-import --import-marks=marks --force &&
+ git tag two &&
+ git gc &&
+ git log --format=%H --no-merges >expect
+'
+
+test_perf 'git merge-base' '
+ git merge-base --all one two >actual
+'
+
+test_expect_success 'verify result' '
+ test_cmp expect actual
+'
+
+test_done
diff --git a/t/t0008-ignores.sh b/t/t0008-ignores.sh
index 273d71411f..db8bde280e 100755
--- a/t/t0008-ignores.sh
+++ b/t/t0008-ignores.sh
@@ -847,6 +847,17 @@ test_expect_success 'directories and ** matches' '
test_cmp expect actual
'
+test_expect_success '** not confused by matching leading prefix' '
+ cat >.gitignore <<-\EOF &&
+ foo**/bar
+ EOF
+ git check-ignore foobar foo/bar >actual &&
+ cat >expect <<-\EOF &&
+ foo/bar
+ EOF
+ test_cmp expect actual
+'
+
############################################################################
#
# test whitespace handling
diff --git a/t/t0450/adoc-help-mismatches b/t/t0450/adoc-help-mismatches
index 2c6ecd5fc8..8ee2d3f7c8 100644
--- a/t/t0450/adoc-help-mismatches
+++ b/t/t0450/adoc-help-mismatches
@@ -2,7 +2,6 @@ add
am
apply
archive
-bisect
blame
branch
check-ref-format
diff --git a/t/t1016-compatObjectFormat.sh b/t/t1016-compatObjectFormat.sh
index a9af8b2396..0efce53f3a 100755
--- a/t/t1016-compatObjectFormat.sh
+++ b/t/t1016-compatObjectFormat.sh
@@ -21,6 +21,12 @@ test_description='Test how well compatObjectFormat works'
# different hash functions result in the same content in the commits.
# This means that when the commit is translated between hash functions
# the commit is identical to the commit in the other repository.
+#
+# Similarly this test relies on:
+# gpg --faked-system-time '20230918T154812!
+# freezing the system time from gpg perspective so that two different
+# runs of gpg applied to the same data result in identical signatures.
+#
compat_hash () {
case "$1" in
diff --git a/t/t1016/gpg b/t/t1016/gpg
index 2601cb18a5..34d6e055fc 100755
--- a/t/t1016/gpg
+++ b/t/t1016/gpg
@@ -1,2 +1,2 @@
#!/bin/sh
-exec gpg --faked-system-time "20230918T154812" "$@"
+exec gpg --faked-system-time '20230918T154812!' "$@"
diff --git a/t/t1901-repo-structure.sh b/t/t1901-repo-structure.sh
new file mode 100755
index 0000000000..36a71a144e
--- /dev/null
+++ b/t/t1901-repo-structure.sh
@@ -0,0 +1,129 @@
+#!/bin/sh
+
+test_description='test git repo structure'
+
+. ./test-lib.sh
+
+test_expect_success 'empty repository' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ cat >expect <<-\EOF &&
+ | Repository structure | Value |
+ | -------------------- | ----- |
+ | * References | |
+ | * Count | 0 |
+ | * Branches | 0 |
+ | * Tags | 0 |
+ | * Remotes | 0 |
+ | * Others | 0 |
+ | | |
+ | * Reachable objects | |
+ | * Count | 0 |
+ | * Commits | 0 |
+ | * Trees | 0 |
+ | * Blobs | 0 |
+ | * Tags | 0 |
+ EOF
+
+ git repo structure >out 2>err &&
+
+ test_cmp expect out &&
+ test_line_count = 0 err
+ )
+'
+
+test_expect_success 'repository with references and objects' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit_bulk 42 &&
+ git tag -a foo -m bar &&
+
+ oid="$(git rev-parse HEAD)" &&
+ git update-ref refs/remotes/origin/foo "$oid" &&
+
+ # Also creates a commit, tree, and blob.
+ git notes add -m foo &&
+
+ cat >expect <<-\EOF &&
+ | Repository structure | Value |
+ | -------------------- | ----- |
+ | * References | |
+ | * Count | 4 |
+ | * Branches | 1 |
+ | * Tags | 1 |
+ | * Remotes | 1 |
+ | * Others | 1 |
+ | | |
+ | * Reachable objects | |
+ | * Count | 130 |
+ | * Commits | 43 |
+ | * Trees | 43 |
+ | * Blobs | 43 |
+ | * Tags | 1 |
+ EOF
+
+ git repo structure >out 2>err &&
+
+ test_cmp expect out &&
+ test_line_count = 0 err
+ )
+'
+
+test_expect_success 'keyvalue and nul format' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit_bulk 42 &&
+ git tag -a foo -m bar &&
+
+ cat >expect <<-\EOF &&
+ references.branches.count=1
+ references.tags.count=1
+ references.remotes.count=0
+ references.others.count=0
+ objects.commits.count=42
+ objects.trees.count=42
+ objects.blobs.count=42
+ objects.tags.count=1
+ EOF
+
+ git repo structure --format=keyvalue >out 2>err &&
+
+ test_cmp expect out &&
+ test_line_count = 0 err &&
+
+ # Replace key and value delimiters for nul format.
+ tr "\n=" "\0\n" <expect >expect_nul &&
+ git repo structure --format=nul >out 2>err &&
+
+ test_cmp expect_nul out &&
+ test_line_count = 0 err
+ )
+'
+
+test_expect_success 'progress meter option' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit foo &&
+
+ GIT_PROGRESS_DELAY=0 git repo structure --progress >out 2>err &&
+
+ test_file_not_empty out &&
+ test_grep "Counting references: 2, done." err &&
+ test_grep "Counting objects: 3, done." err &&
+
+ GIT_PROGRESS_DELAY=0 git repo structure --no-progress >out 2>err &&
+
+ test_file_not_empty out &&
+ test_line_count = 0 err
+ )
+'
+
+test_done
diff --git a/t/t3070-wildmatch.sh b/t/t3070-wildmatch.sh
index 3da824117c..655bb1a0f2 100755
--- a/t/t3070-wildmatch.sh
+++ b/t/t3070-wildmatch.sh
@@ -235,6 +235,8 @@ match 1 1 1 1 aaaaaaabababab '*ab'
match 1 1 1 1 'foo*' 'foo\*'
match 0 0 0 0 foobar 'foo\*bar'
match 1 1 1 1 'f\oo' 'f\\oo'
+match 0 0 0 0 \
+ 1 1 1 1 'foo\' 'foo\'
match 1 1 1 1 ball '*[al]?'
match 0 0 0 0 ten '[ten]'
match 1 1 1 1 ten '**[!te]'
diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh
index 851ca6dd91..4285314f35 100755
--- a/t/t3701-add-interactive.sh
+++ b/t/t3701-add-interactive.sh
@@ -1431,4 +1431,15 @@ test_expect_success 'invalid option s is rejected' '
test_cmp expect actual
'
+test_expect_success 'EOF quits' '
+ echo a >file &&
+ echo a >file2 &&
+ git add file file2 &&
+ echo X >file &&
+ echo X >file2 &&
+ git add -p </dev/null >out &&
+ test_grep file out &&
+ test_grep ! file2 out
+'
+
test_done
diff --git a/t/t4020-diff-external.sh b/t/t4020-diff-external.sh
index c8a23d5148..7ec5854f74 100755
--- a/t/t4020-diff-external.sh
+++ b/t/t4020-diff-external.sh
@@ -44,6 +44,16 @@ test_expect_success 'GIT_EXTERNAL_DIFF environment and --no-ext-diff' '
'
+test_expect_success 'GIT_EXTERNAL_DIFF and --output' '
+ cat >expect <<-EOF &&
+ file $(git rev-parse --verify HEAD:file) 100644 file $(test_oid zero) 100644
+ EOF
+ GIT_EXTERNAL_DIFF=echo git diff --output=out >stdout &&
+ cut -d" " -f1,3- <out >actual &&
+ test_must_be_empty stdout &&
+ test_cmp expect actual
+'
+
test_expect_success SYMLINKS 'typechange diff' '
rm -f file &&
ln -s elif file &&
diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh
index ddd273d8dc..614184a097 100755
--- a/t/t7900-maintenance.sh
+++ b/t/t7900-maintenance.sh
@@ -465,6 +465,176 @@ test_expect_success 'maintenance.incremental-repack.auto (when config is unset)'
)
'
+run_and_verify_geometric_pack () {
+ EXPECTED_PACKS="$1" &&
+
+ # Verify that we perform a geometric repack.
+ rm -f "trace2.txt" &&
+ GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \
+ git maintenance run --task=geometric-repack 2>/dev/null &&
+ test_subcommand git repack -d -l --geometric=2 \
+ --quiet --write-midx <trace2.txt &&
+
+ # Verify that the number of packfiles matches our expectation.
+ ls -l .git/objects/pack/*.pack >packfiles &&
+ test_line_count = "$EXPECTED_PACKS" packfiles &&
+
+ # And verify that there are no loose objects anymore.
+ git count-objects -v >count &&
+ test_grep '^count: 0$' count
+}
+
+test_expect_success 'geometric repacking task' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ git config set maintenance.auto false &&
+ test_commit initial &&
+
+ # The initial repack causes an all-into-one repack.
+ GIT_TRACE2_EVENT="$(pwd)/initial-repack.txt" \
+ git maintenance run --task=geometric-repack 2>/dev/null &&
+ test_subcommand git repack -d -l --cruft --cruft-expiration=2.weeks.ago \
+ --quiet --write-midx <initial-repack.txt &&
+
+ # Repacking should now cause a no-op geometric repack because
+ # no packfiles need to be combined.
+ ls -l .git/objects/pack/*.pack >before &&
+ run_and_verify_geometric_pack 1 &&
+ ls -l .git/objects/pack/*.pack >after &&
+ test_cmp before after &&
+
+ # This incremental change creates a new packfile that only
+ # soaks up loose objects. The packfiles are not getting merged
+ # at this point.
+ test_commit loose &&
+ run_and_verify_geometric_pack 2 &&
+
+ # Both packfiles have 3 objects, so the next run would cause us
+ # to merge all packfiles together. This should be turned into
+ # an all-into-one-repack.
+ GIT_TRACE2_EVENT="$(pwd)/all-into-one-repack.txt" \
+ git maintenance run --task=geometric-repack 2>/dev/null &&
+ test_subcommand git repack -d -l --cruft --cruft-expiration=2.weeks.ago \
+ --quiet --write-midx <all-into-one-repack.txt &&
+
+ # The geometric repack soaks up unreachable objects.
+ echo blob-1 | git hash-object -w --stdin -t blob &&
+ run_and_verify_geometric_pack 2 &&
+
+ # A second unreachable object should be written into another packfile.
+ echo blob-2 | git hash-object -w --stdin -t blob &&
+ run_and_verify_geometric_pack 3 &&
+
+ # And these two small packs should now be merged via the
+ # geometric repack. The large packfile should remain intact.
+ run_and_verify_geometric_pack 2 &&
+
+ # If we now add two more objects and repack twice we should
+ # then see another all-into-one repack. This time around
+ # though, as we have unreachable objects, we should also see a
+ # cruft pack.
+ echo blob-3 | git hash-object -w --stdin -t blob &&
+ echo blob-4 | git hash-object -w --stdin -t blob &&
+ run_and_verify_geometric_pack 3 &&
+ GIT_TRACE2_EVENT="$(pwd)/cruft-repack.txt" \
+ git maintenance run --task=geometric-repack 2>/dev/null &&
+ test_subcommand git repack -d -l --cruft --cruft-expiration=2.weeks.ago \
+ --quiet --write-midx <cruft-repack.txt &&
+ ls .git/objects/pack/*.pack >packs &&
+ test_line_count = 2 packs &&
+ ls .git/objects/pack/*.mtimes >cruft &&
+ test_line_count = 1 cruft
+ )
+'
+
+test_geometric_repack_needed () {
+ NEEDED="$1"
+ GEOMETRIC_CONFIG="$2" &&
+ rm -f trace2.txt &&
+ GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \
+ git ${GEOMETRIC_CONFIG:+-c maintenance.geometric-repack.$GEOMETRIC_CONFIG} \
+ maintenance run --auto --task=geometric-repack 2>/dev/null &&
+ case "$NEEDED" in
+ true)
+ test_grep "\[\"git\",\"repack\"," trace2.txt;;
+ false)
+ ! test_grep "\[\"git\",\"repack\"," trace2.txt;;
+ *)
+ BUG "invalid parameter: $NEEDED";;
+ esac
+}
+
+test_expect_success 'geometric repacking with --auto' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+
+ # An empty repository does not need repacking, except when
+ # explicitly told to do it.
+ test_geometric_repack_needed false &&
+ test_geometric_repack_needed false auto=0 &&
+ test_geometric_repack_needed false auto=1 &&
+ test_geometric_repack_needed true auto=-1 &&
+
+ test_oid_init &&
+
+ # Loose objects cause a repack when crossing the limit. Note
+ # that the number of objects gets extrapolated by having a look
+ # at the "objects/17/" shard.
+ test_commit "$(test_oid blob17_1)" &&
+ test_geometric_repack_needed false &&
+ test_commit "$(test_oid blob17_2)" &&
+ test_geometric_repack_needed false auto=257 &&
+ test_geometric_repack_needed true auto=256 &&
+
+ # Force another repack.
+ test_commit first &&
+ test_commit second &&
+ test_geometric_repack_needed true auto=-1 &&
+
+ # We now have two packfiles that would be merged together. As
+ # such, the repack should always happen unless the user has
+ # disabled the auto task.
+ test_geometric_repack_needed false auto=0 &&
+ test_geometric_repack_needed true auto=9000
+ )
+'
+
+test_expect_success 'geometric repacking honors configured split factor' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ git config set maintenance.auto false &&
+
+ # Create three different packs with 9, 2 and 1 object, respectively.
+ # This is done so that only a subset of packs would be merged
+ # together so that we can verify that `git repack` receives the
+ # correct geometric factor.
+ for i in $(test_seq 9)
+ do
+ echo first-$i | git hash-object -w --stdin -t blob || return 1
+ done &&
+ git repack --geometric=2 -d &&
+
+ for i in $(test_seq 2)
+ do
+ echo second-$i | git hash-object -w --stdin -t blob || return 1
+ done &&
+ git repack --geometric=2 -d &&
+
+ echo third | git hash-object -w --stdin -t blob &&
+ git repack --geometric=2 -d &&
+
+ test_geometric_repack_needed false splitFactor=2 &&
+ test_geometric_repack_needed true splitFactor=3 &&
+ test_subcommand git repack -d -l --geometric=3 --quiet --write-midx <trace2.txt
+ )
+'
+
test_expect_success 'pack-refs task' '
for n in $(test_seq 1 5)
do
@@ -716,6 +886,76 @@ test_expect_success 'maintenance.strategy inheritance' '
<modified-daily.txt
'
+test_strategy () {
+ STRATEGY="$1"
+ shift
+
+ cat >expect &&
+ rm -f trace2.txt &&
+ GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \
+ git -c maintenance.strategy=$STRATEGY maintenance run --quiet "$@" &&
+ sed -n 's/{"event":"child_start","sid":"[^/"]*",.*,"argv":\["\(.*\)\"]}/\1/p' <trace2.txt |
+ sed 's/","/ /g' >actual
+ test_cmp expect actual
+}
+
+test_expect_success 'maintenance.strategy is respected' '
+ test_when_finished "rm -rf repo" &&
+ git init repo &&
+ (
+ cd repo &&
+ test_commit initial &&
+
+ test_must_fail git -c maintenance.strategy=unknown maintenance run 2>err &&
+ test_grep "unknown maintenance strategy: .unknown." err &&
+
+ test_strategy incremental <<-\EOF &&
+ git pack-refs --all --prune
+ git reflog expire --all
+ git gc --quiet --no-detach --skip-foreground-tasks
+ EOF
+
+ test_strategy incremental --schedule=weekly <<-\EOF &&
+ git pack-refs --all --prune
+ git prune-packed --quiet
+ git multi-pack-index write --no-progress
+ git multi-pack-index expire --no-progress
+ git multi-pack-index repack --no-progress --batch-size=1
+ git commit-graph write --split --reachable --no-progress
+ EOF
+
+ test_strategy gc <<-\EOF &&
+ git pack-refs --all --prune
+ git reflog expire --all
+ git gc --quiet --no-detach --skip-foreground-tasks
+ EOF
+
+ test_strategy gc --schedule=weekly <<-\EOF &&
+ git pack-refs --all --prune
+ git reflog expire --all
+ git gc --quiet --no-detach --skip-foreground-tasks
+ EOF
+
+ test_strategy geometric <<-\EOF &&
+ git pack-refs --all --prune
+ git reflog expire --all
+ git repack -d -l --geometric=2 --quiet --write-midx
+ git commit-graph write --split --reachable --no-progress
+ git worktree prune --expire 3.months.ago
+ git rerere gc
+ EOF
+
+ test_strategy geometric --schedule=weekly <<-\EOF
+ git pack-refs --all --prune
+ git reflog expire --all
+ git repack -d -l --geometric=2 --quiet --write-midx
+ git commit-graph write --split --reachable --no-progress
+ git worktree prune --expire 3.months.ago
+ git rerere gc
+ EOF
+ )
+'
+
test_expect_success 'register and unregister' '
test_when_finished git config --global --unset-all maintenance.repo &&
@@ -1093,6 +1333,11 @@ test_expect_success 'fails when running outside of a repository' '
nongit test_must_fail git maintenance unregister
'
+test_expect_success 'fails when configured to use an invalid strategy' '
+ test_must_fail git -c maintenance.strategy=invalid maintenance run --schedule=hourly 2>err &&
+ test_grep "unknown maintenance strategy: .invalid." err
+'
+
test_expect_success 'register and unregister bare repo' '
test_when_finished "git config --global --unset-all maintenance.repo || :" &&
test_might_fail git config --global --unset-all maintenance.repo &&
diff --git a/t/t9300-fast-import.sh b/t/t9300-fast-import.sh
index 4dc3d645bf..5685cce6fe 100755
--- a/t/t9300-fast-import.sh
+++ b/t/t9300-fast-import.sh
@@ -2927,16 +2927,16 @@ test_expect_success 'R: blob appears only once' '
# The error message when a space is missing not at the
# end of the line is:
#
-# Missing space after ..
+# missing space after ..
#
# or when extra characters come after the mark at the end
# of the line:
#
-# Garbage after ..
+# garbage after ..
#
# or when the dataref is neither "inline " or a known SHA1,
#
-# Invalid dataref ..
+# invalid dataref ..
#
test_expect_success 'S: initialize for S tests' '
test_tick &&
@@ -3405,15 +3405,15 @@ test_path_fail () {
test_path_base_fail () {
local change="$1" prefix="$2" field="$3" suffix="$4"
- test_path_fail "$change" 'unclosed " in '"$field" "$prefix" '"hello.c' "$suffix" "Invalid $field"
- test_path_fail "$change" "invalid escape in quoted $field" "$prefix" '"hello\xff"' "$suffix" "Invalid $field"
+ test_path_fail "$change" 'unclosed " in '"$field" "$prefix" '"hello.c' "$suffix" "invalid $field"
+ test_path_fail "$change" "invalid escape in quoted $field" "$prefix" '"hello\xff"' "$suffix" "invalid $field"
test_path_fail "$change" "escaped NUL in quoted $field" "$prefix" '"hello\000"' "$suffix" "NUL in $field"
}
test_path_eol_quoted_fail () {
local change="$1" prefix="$2" field="$3"
test_path_base_fail "$change" "$prefix" "$field" ''
- test_path_fail "$change" "garbage after quoted $field" "$prefix" '"hello.c"' 'x' "Garbage after $field"
- test_path_fail "$change" "space after quoted $field" "$prefix" '"hello.c"' ' ' "Garbage after $field"
+ test_path_fail "$change" "garbage after quoted $field" "$prefix" '"hello.c"' 'x' "garbage after $field"
+ test_path_fail "$change" "space after quoted $field" "$prefix" '"hello.c"' ' ' "garbage after $field"
}
test_path_eol_fail () {
local change="$1" prefix="$2" field="$3"
@@ -3422,8 +3422,8 @@ test_path_eol_fail () {
test_path_space_fail () {
local change="$1" prefix="$2" field="$3"
test_path_base_fail "$change" "$prefix" "$field" ' world.c'
- test_path_fail "$change" "missing space after quoted $field" "$prefix" '"hello.c"' 'x world.c' "Missing space after $field"
- test_path_fail "$change" "missing space after unquoted $field" "$prefix" 'hello.c' '' "Missing space after $field"
+ test_path_fail "$change" "missing space after quoted $field" "$prefix" '"hello.c"' 'x world.c' "missing space after $field"
+ test_path_fail "$change" "missing space after unquoted $field" "$prefix" 'hello.c' '' "missing space after $field"
}
test_path_eol_fail filemodify 'M 100644 :1 ' path
@@ -3820,7 +3820,7 @@ test_expect_success 'X: replace ref that becomes useless is removed' '
sed -e s/othername/somename/ tmp >tmp2 &&
git fast-import --force <tmp2 2>msgs &&
- grep "Dropping.*since it would point to itself" msgs &&
+ grep "dropping.*since it would point to itself" msgs &&
git show-ref >refs &&
! grep refs/replace refs
)