Merge branch 'master' of github.com:git/git
* 'master' of github.com:git/git: (397 commits) Git 2.33-rc0 The seventh batch ci/install-dependencies: handle "sparse" job package installs ci: run "apt-get update" before "apt-get install" cache-tree: prefetch in partial clone read-tree unpack-trees: refactor prefetching code pack-bitmap: check pack validity when opening bitmap bundle tests: use test_cmp instead of grep bundle tests: use ">file" not ": >file" The sixth batch doc: pull: fix rebase=false documentation pack-bitmap: clarify comment in filter_bitmap_exclude_type() doc: clarify description of 'submodule.recurse' doc/git-config: simplify "override" advice for FILES section doc/git-config: clarify GIT_CONFIG environment variable doc/git-config: explain --file instead of referring to GIT_CONFIG t0000: fix test if run with TEST_OUTPUT_DIRECTORY multi-pack-index: fix potential segfault without sub-command refs/debug: quote prefix t0000: clear GIT_SKIP_TESTS before running sub-tests ...
This commit is contained in:
commit
972c9cf6ae
26
.github/workflows/check-whitespace.yml
vendored
26
.github/workflows/check-whitespace.yml
vendored
@ -12,15 +12,9 @@ jobs:
|
||||
check-whitespace:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set commit count
|
||||
shell: bash
|
||||
run: echo "COMMIT_DEPTH=$((1+$COMMITS))" >>$GITHUB_ENV
|
||||
env:
|
||||
COMMITS: ${{ github.event.pull_request.commits }}
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: ${{ env.COMMIT_DEPTH }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: git log --check
|
||||
id: check_out
|
||||
@ -47,25 +41,9 @@ jobs:
|
||||
echo "${dash} ${etc}"
|
||||
;;
|
||||
esac
|
||||
done <<< $(git log --check --pretty=format:"---% h% s" -${{github.event.pull_request.commits}})
|
||||
done <<< $(git log --check --pretty=format:"---% h% s" ${{github.event.pull_request.base.sha}}..)
|
||||
|
||||
if test -n "${log}"
|
||||
then
|
||||
echo "::set-output name=checkout::"${log}""
|
||||
exit 2
|
||||
fi
|
||||
|
||||
- name: Add Check Output as Comment
|
||||
uses: actions/github-script@v3
|
||||
id: add-comment
|
||||
env:
|
||||
log: ${{ steps.check_out.outputs.checkout }}
|
||||
with:
|
||||
script: |
|
||||
await github.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: `Whitespace errors found in workflow ${{ github.workflow }}:\n\n\`\`\`\n${process.env.log.replace(/\\n/g, "\n")}\n\`\`\``
|
||||
})
|
||||
if: ${{ failure() }}
|
||||
|
177
.github/workflows/main.yml
vendored
177
.github/workflows/main.yml
vendored
@ -81,44 +81,21 @@ jobs:
|
||||
if: needs.ci-config.outputs.enabled == 'yes'
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: download git-sdk-64-minimal
|
||||
shell: bash
|
||||
run: |
|
||||
## Get artifact
|
||||
urlbase=https://dev.azure.com/git-for-windows/git/_apis/build/builds
|
||||
id=$(curl "$urlbase?definitions=22&statusFilter=completed&resultFilter=succeeded&\$top=1" |
|
||||
jq -r ".value[] | .id")
|
||||
download_url="$(curl "$urlbase/$id/artifacts" |
|
||||
jq -r '.value[] | select(.name == "git-sdk-64-minimal").resource.downloadUrl')"
|
||||
curl --connect-timeout 10 --retry 5 --retry-delay 0 --retry-max-time 240 \
|
||||
-o artifacts.zip "$download_url"
|
||||
|
||||
## Unzip and remove the artifact
|
||||
unzip artifacts.zip
|
||||
rm artifacts.zip
|
||||
- uses: actions/checkout@v2
|
||||
- uses: git-for-windows/setup-git-for-windows-sdk@v1
|
||||
- name: build
|
||||
shell: powershell
|
||||
shell: bash
|
||||
env:
|
||||
HOME: ${{runner.workspace}}
|
||||
MSYSTEM: MINGW64
|
||||
NO_PERL: 1
|
||||
run: |
|
||||
& .\git-sdk-64-minimal\usr\bin\bash.exe -lc @"
|
||||
printf '%s\n' /git-sdk-64-minimal/ >>.git/info/exclude
|
||||
|
||||
ci/make-test-artifacts.sh artifacts
|
||||
"@
|
||||
- name: upload build artifacts
|
||||
uses: actions/upload-artifact@v1
|
||||
run: ci/make-test-artifacts.sh artifacts
|
||||
- name: zip up tracked files
|
||||
run: git archive -o artifacts/tracked.tar.gz HEAD
|
||||
- name: upload tracked files and build artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: windows-artifacts
|
||||
path: artifacts
|
||||
- name: upload git-sdk-64-minimal
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: git-sdk-64-minimal
|
||||
path: git-sdk-64-minimal
|
||||
windows-test:
|
||||
runs-on: windows-latest
|
||||
needs: [windows-build]
|
||||
@ -127,37 +104,25 @@ jobs:
|
||||
matrix:
|
||||
nr: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: download build artifacts
|
||||
uses: actions/download-artifact@v1
|
||||
- name: download tracked files and build artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: windows-artifacts
|
||||
path: ${{github.workspace}}
|
||||
- name: extract build artifacts
|
||||
- name: extract tracked files and build artifacts
|
||||
shell: bash
|
||||
run: tar xf artifacts.tar.gz
|
||||
- name: download git-sdk-64-minimal
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: git-sdk-64-minimal
|
||||
path: ${{github.workspace}}/git-sdk-64-minimal/
|
||||
run: tar xf artifacts.tar.gz && tar xf tracked.tar.gz
|
||||
- uses: git-for-windows/setup-git-for-windows-sdk@v1
|
||||
- name: test
|
||||
shell: powershell
|
||||
run: |
|
||||
& .\git-sdk-64-minimal\usr\bin\bash.exe -lc @"
|
||||
# Let Git ignore the SDK
|
||||
printf '%s\n' /git-sdk-64-minimal/ >>.git/info/exclude
|
||||
|
||||
ci/run-test-slice.sh ${{matrix.nr}} 10
|
||||
"@
|
||||
shell: bash
|
||||
run: ci/run-test-slice.sh ${{matrix.nr}} 10
|
||||
- name: ci/print-test-failures.sh
|
||||
if: failure()
|
||||
shell: powershell
|
||||
run: |
|
||||
& .\git-sdk-64-minimal\usr\bin\bash.exe -lc ci/print-test-failures.sh
|
||||
shell: bash
|
||||
run: ci/print-test-failures.sh
|
||||
- name: Upload failed tests' directories
|
||||
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: failed-tests-windows
|
||||
path: ${{env.FAILED_TEST_ARTIFACTS}}
|
||||
@ -165,27 +130,12 @@ jobs:
|
||||
needs: ci-config
|
||||
if: needs.ci-config.outputs.enabled == 'yes'
|
||||
env:
|
||||
MSYSTEM: MINGW64
|
||||
NO_PERL: 1
|
||||
GIT_CONFIG_PARAMETERS: "'user.name=CI' 'user.email=ci@git'"
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: download git-sdk-64-minimal
|
||||
shell: bash
|
||||
run: |
|
||||
## Get artifact
|
||||
urlbase=https://dev.azure.com/git-for-windows/git/_apis/build/builds
|
||||
id=$(curl "$urlbase?definitions=22&statusFilter=completed&resultFilter=succeeded&\$top=1" |
|
||||
jq -r ".value[] | .id")
|
||||
download_url="$(curl "$urlbase/$id/artifacts" |
|
||||
jq -r '.value[] | select(.name == "git-sdk-64-minimal").resource.downloadUrl')"
|
||||
curl --connect-timeout 10 --retry 5 --retry-delay 0 --retry-max-time 240 \
|
||||
-o artifacts.zip "$download_url"
|
||||
|
||||
## Unzip and remove the artifact
|
||||
unzip artifacts.zip
|
||||
rm artifacts.zip
|
||||
- uses: actions/checkout@v2
|
||||
- uses: git-for-windows/setup-git-for-windows-sdk@v1
|
||||
- name: initialize vcpkg
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
@ -203,75 +153,60 @@ jobs:
|
||||
- name: add msbuild to PATH
|
||||
uses: microsoft/setup-msbuild@v1
|
||||
- name: copy dlls to root
|
||||
shell: powershell
|
||||
run: |
|
||||
& compat\vcbuild\vcpkg_copy_dlls.bat release
|
||||
if (!$?) { exit(1) }
|
||||
shell: cmd
|
||||
run: compat\vcbuild\vcpkg_copy_dlls.bat release
|
||||
- name: generate Visual Studio solution
|
||||
shell: bash
|
||||
run: |
|
||||
cmake `pwd`/contrib/buildsystems/ -DCMAKE_PREFIX_PATH=`pwd`/compat/vcbuild/vcpkg/installed/x64-windows \
|
||||
-DMSGFMT_EXE=`pwd`/git-sdk-64-minimal/mingw64/bin/msgfmt.exe -DPERL_TESTS=OFF -DPYTHON_TESTS=OFF -DCURL_NO_CURL_CMAKE=ON
|
||||
-DNO_GETTEXT=YesPlease -DPERL_TESTS=OFF -DPYTHON_TESTS=OFF -DCURL_NO_CURL_CMAKE=ON
|
||||
- name: MSBuild
|
||||
run: msbuild git.sln -property:Configuration=Release -property:Platform=x64 -maxCpuCount:4 -property:PlatformToolset=v142
|
||||
- name: bundle artifact tar
|
||||
shell: powershell
|
||||
shell: bash
|
||||
env:
|
||||
MSVC: 1
|
||||
VCPKG_ROOT: ${{github.workspace}}\compat\vcbuild\vcpkg
|
||||
run: |
|
||||
& git-sdk-64-minimal\usr\bin\bash.exe -lc @"
|
||||
mkdir -p artifacts &&
|
||||
eval \"`$(make -n artifacts-tar INCLUDE_DLLS_IN_ARTIFACTS=YesPlease ARTIFACTS_DIRECTORY=artifacts 2>&1 | grep ^tar)\"
|
||||
"@
|
||||
- name: upload build artifacts
|
||||
uses: actions/upload-artifact@v1
|
||||
mkdir -p artifacts &&
|
||||
eval "$(make -n artifacts-tar INCLUDE_DLLS_IN_ARTIFACTS=YesPlease ARTIFACTS_DIRECTORY=artifacts NO_GETTEXT=YesPlease 2>&1 | grep ^tar)"
|
||||
- name: zip up tracked files
|
||||
run: git archive -o artifacts/tracked.tar.gz HEAD
|
||||
- name: upload tracked files and build artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: vs-artifacts
|
||||
path: artifacts
|
||||
vs-test:
|
||||
runs-on: windows-latest
|
||||
needs: [vs-build, windows-build]
|
||||
needs: vs-build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
nr: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: download git-sdk-64-minimal
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: git-sdk-64-minimal
|
||||
path: ${{github.workspace}}/git-sdk-64-minimal/
|
||||
- name: download build artifacts
|
||||
uses: actions/download-artifact@v1
|
||||
- uses: git-for-windows/setup-git-for-windows-sdk@v1
|
||||
- name: download tracked files and build artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: vs-artifacts
|
||||
path: ${{github.workspace}}
|
||||
- name: extract build artifacts
|
||||
- name: extract tracked files and build artifacts
|
||||
shell: bash
|
||||
run: tar xf artifacts.tar.gz
|
||||
run: tar xf artifacts.tar.gz && tar xf tracked.tar.gz
|
||||
- name: test
|
||||
shell: powershell
|
||||
shell: bash
|
||||
env:
|
||||
MSYSTEM: MINGW64
|
||||
NO_SVN_TESTS: 1
|
||||
GIT_TEST_SKIP_REBASE_P: 1
|
||||
run: |
|
||||
& .\git-sdk-64-minimal\usr\bin\bash.exe -lc @"
|
||||
# Let Git ignore the SDK and the test-cache
|
||||
printf '%s\n' /git-sdk-64-minimal/ /test-cache/ >>.git/info/exclude
|
||||
|
||||
ci/run-test-slice.sh ${{matrix.nr}} 10
|
||||
"@
|
||||
run: ci/run-test-slice.sh ${{matrix.nr}} 10
|
||||
- name: ci/print-test-failures.sh
|
||||
if: failure()
|
||||
shell: powershell
|
||||
run: |
|
||||
& .\git-sdk-64-minimal\usr\bin\bash.exe -lc ci/print-test-failures.sh
|
||||
shell: bash
|
||||
run: ci/print-test-failures.sh
|
||||
- name: Upload failed tests' directories
|
||||
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: failed-tests-windows
|
||||
path: ${{env.FAILED_TEST_ARTIFACTS}}
|
||||
@ -302,14 +237,14 @@ jobs:
|
||||
jobname: ${{matrix.vector.jobname}}
|
||||
runs-on: ${{matrix.vector.pool}}
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v2
|
||||
- run: ci/install-dependencies.sh
|
||||
- run: ci/run-build-and-tests.sh
|
||||
- run: ci/print-test-failures.sh
|
||||
if: failure()
|
||||
- name: Upload failed tests' directories
|
||||
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: failed-tests-${{matrix.vector.jobname}}
|
||||
path: ${{env.FAILED_TEST_ARTIFACTS}}
|
||||
@ -336,7 +271,7 @@ jobs:
|
||||
if: failure()
|
||||
- name: Upload failed tests' directories
|
||||
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: failed-tests-${{matrix.vector.jobname}}
|
||||
path: ${{env.FAILED_TEST_ARTIFACTS}}
|
||||
@ -347,9 +282,29 @@ jobs:
|
||||
jobname: StaticAnalysis
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v2
|
||||
- run: ci/install-dependencies.sh
|
||||
- run: ci/run-static-analysis.sh
|
||||
sparse:
|
||||
needs: ci-config
|
||||
if: needs.ci-config.outputs.enabled == 'yes'
|
||||
env:
|
||||
jobname: sparse
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Download a current `sparse` package
|
||||
# Ubuntu's `sparse` version is too old for us
|
||||
uses: git-for-windows/get-azure-pipelines-artifact@v0
|
||||
with:
|
||||
repository: git/git
|
||||
definitionId: 10
|
||||
artifact: sparse-20.04
|
||||
- name: Install the current `sparse` package
|
||||
run: sudo dpkg -i sparse-20.04/sparse_*.deb
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install other dependencies
|
||||
run: ci/install-dependencies.sh
|
||||
- run: make sparse
|
||||
documentation:
|
||||
needs: ci-config
|
||||
if: needs.ci-config.outputs.enabled == 'yes'
|
||||
@ -357,6 +312,6 @@ jobs:
|
||||
jobname: Documentation
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v2
|
||||
- run: ci/install-dependencies.sh
|
||||
- run: ci/test-documentation.sh
|
||||
|
@ -551,6 +551,51 @@ Writing Documentation:
|
||||
documentation, please see the documentation-related advice in the
|
||||
Documentation/SubmittingPatches file).
|
||||
|
||||
In order to ensure the documentation is inclusive, avoid assuming
|
||||
that an unspecified example person is male or female, and think
|
||||
twice before using "he", "him", "she", or "her". Here are some
|
||||
tips to avoid use of gendered pronouns:
|
||||
|
||||
- Prefer succinctness and matter-of-factly describing functionality
|
||||
in the abstract. E.g.
|
||||
|
||||
--short:: Emit output in the short-format.
|
||||
|
||||
and avoid something like these overly verbose alternatives:
|
||||
|
||||
--short:: Use this to emit output in the short-format.
|
||||
--short:: You can use this to get output in the short-format.
|
||||
--short:: A user who prefers shorter output could....
|
||||
--short:: Should a person and/or program want shorter output, he
|
||||
she/they/it can...
|
||||
|
||||
This practice often eliminates the need to involve human actors in
|
||||
your description, but it is a good practice regardless of the
|
||||
avoidance of gendered pronouns.
|
||||
|
||||
- When it becomes awkward to stick to this style, prefer "you" when
|
||||
addressing the the hypothetical user, and possibly "we" when
|
||||
discussing how the program might react to the user. E.g.
|
||||
|
||||
You can use this option instead of --xyz, but we might remove
|
||||
support for it in future versions.
|
||||
|
||||
while keeping in mind that you can probably be less verbose, e.g.
|
||||
|
||||
Use this instead of --xyz. This option might be removed in future
|
||||
versions.
|
||||
|
||||
- If you still need to refer to an example person that is
|
||||
third-person singular, you may resort to "singular they" to avoid
|
||||
"he/she/him/her", e.g.
|
||||
|
||||
A contributor asks their upstream to pull from them.
|
||||
|
||||
Note that this sounds ungrammatical and unnatural to those who
|
||||
learned that "they" is only used for third-person plural, e.g.
|
||||
those who learn English as a second language in some parts of the
|
||||
world.
|
||||
|
||||
Every user-visible change should be reflected in the documentation.
|
||||
The same general rule as for code applies -- imitate the existing
|
||||
conventions.
|
||||
|
@ -139,6 +139,7 @@ ASCIIDOC_CONF = -f asciidoc.conf
|
||||
ASCIIDOC_COMMON = $(ASCIIDOC) $(ASCIIDOC_EXTRA) $(ASCIIDOC_CONF) \
|
||||
-amanversion=$(GIT_VERSION) \
|
||||
-amanmanual='Git Manual' -amansource='Git'
|
||||
ASCIIDOC_DEPS = asciidoc.conf GIT-ASCIIDOCFLAGS
|
||||
TXT_TO_HTML = $(ASCIIDOC_COMMON) -b $(ASCIIDOC_HTML)
|
||||
TXT_TO_XML = $(ASCIIDOC_COMMON) -b $(ASCIIDOC_DOCBOOK)
|
||||
MANPAGE_XSL = manpage-normal.xsl
|
||||
@ -193,6 +194,7 @@ ASCIIDOC_DOCBOOK = docbook5
|
||||
ASCIIDOC_EXTRA += -acompat-mode -atabsize=8
|
||||
ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
|
||||
ASCIIDOC_EXTRA += -alitdd='&\#x2d;&\#x2d;'
|
||||
ASCIIDOC_DEPS = asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
|
||||
DBLATEX_COMMON =
|
||||
XMLTO_EXTRA += --skip-validation
|
||||
XMLTO_EXTRA += -x manpage.xsl
|
||||
@ -294,9 +296,7 @@ docdep_prereqs = \
|
||||
cmd-list.made $(cmds_txt)
|
||||
|
||||
doc.dep : $(docdep_prereqs) $(DOC_DEP_TXT) build-docdep.perl
|
||||
$(QUIET_GEN)$(RM) $@+ $@ && \
|
||||
$(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \
|
||||
mv $@+ $@
|
||||
$(QUIET_GEN)$(PERL_PATH) ./build-docdep.perl >$@ $(QUIET_STDERR)
|
||||
|
||||
ifneq ($(MAKECMDGOALS),clean)
|
||||
-include doc.dep
|
||||
@ -316,8 +316,7 @@ cmds_txt = cmds-ancillaryinterrogators.txt \
|
||||
$(cmds_txt): cmd-list.made
|
||||
|
||||
cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT)
|
||||
$(QUIET_GEN)$(RM) $@ && \
|
||||
$(PERL_PATH) ./cmd-list.perl ../command-list.txt $(cmds_txt) $(QUIET_STDERR) && \
|
||||
$(QUIET_GEN)$(PERL_PATH) ./cmd-list.perl ../command-list.txt $(cmds_txt) $(QUIET_STDERR) && \
|
||||
date >$@
|
||||
|
||||
mergetools_txt = mergetools-diff.txt mergetools-merge.txt
|
||||
@ -325,7 +324,7 @@ mergetools_txt = mergetools-diff.txt mergetools-merge.txt
|
||||
$(mergetools_txt): mergetools-list.made
|
||||
|
||||
mergetools-list.made: ../git-mergetool--lib.sh $(wildcard ../mergetools/*)
|
||||
$(QUIET_GEN)$(RM) $@ && \
|
||||
$(QUIET_GEN) \
|
||||
$(SHELL_PATH) -c 'MERGE_TOOLS_DIR=../mergetools && \
|
||||
. ../git-mergetool--lib.sh && \
|
||||
show_tool_names can_diff "* " || :' >mergetools-diff.txt && \
|
||||
@ -354,32 +353,23 @@ clean:
|
||||
$(RM) manpage-base-url.xsl
|
||||
$(RM) GIT-ASCIIDOCFLAGS
|
||||
|
||||
$(MAN_HTML): %.html : %.txt asciidoc.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
|
||||
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
|
||||
$(TXT_TO_HTML) -d manpage -o $@+ $< && \
|
||||
mv $@+ $@
|
||||
$(MAN_HTML): %.html : %.txt $(ASCIIDOC_DEPS)
|
||||
$(QUIET_ASCIIDOC)$(TXT_TO_HTML) -d manpage -o $@ $<
|
||||
|
||||
$(OBSOLETE_HTML): %.html : %.txto asciidoc.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
|
||||
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
|
||||
$(TXT_TO_HTML) -o $@+ $< && \
|
||||
mv $@+ $@
|
||||
$(OBSOLETE_HTML): %.html : %.txto $(ASCIIDOC_DEPS)
|
||||
$(QUIET_ASCIIDOC)$(TXT_TO_HTML) -o $@ $<
|
||||
|
||||
manpage-base-url.xsl: manpage-base-url.xsl.in
|
||||
$(QUIET_GEN)sed "s|@@MAN_BASE_URL@@|$(MAN_BASE_URL)|" $< > $@
|
||||
|
||||
%.1 %.5 %.7 : %.xml manpage-base-url.xsl $(wildcard manpage*.xsl)
|
||||
$(QUIET_XMLTO)$(RM) $@ && \
|
||||
$(XMLTO) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
|
||||
$(QUIET_XMLTO)$(XMLTO) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
|
||||
|
||||
%.xml : %.txt asciidoc.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
|
||||
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
|
||||
$(TXT_TO_XML) -d manpage -o $@+ $< && \
|
||||
mv $@+ $@
|
||||
%.xml : %.txt $(ASCIIDOC_DEPS)
|
||||
$(QUIET_ASCIIDOC)$(TXT_TO_XML) -d manpage -o $@ $<
|
||||
|
||||
user-manual.xml: user-manual.txt user-manual.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
|
||||
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
|
||||
$(TXT_TO_XML) -d book -o $@+ $< && \
|
||||
mv $@+ $@
|
||||
$(QUIET_ASCIIDOC)$(TXT_TO_XML) -d book -o $@ $<
|
||||
|
||||
technical/api-index.txt: technical/api-index-skel.txt \
|
||||
technical/api-index.sh $(patsubst %,%.txt,$(API_DOCS))
|
||||
@ -400,46 +390,35 @@ XSLTOPTS += --stringparam html.stylesheet docbook-xsl.css
|
||||
XSLTOPTS += --param generate.consistent.ids 1
|
||||
|
||||
user-manual.html: user-manual.xml $(XSLT)
|
||||
$(QUIET_XSLTPROC)$(RM) $@+ $@ && \
|
||||
xsltproc $(XSLTOPTS) -o $@+ $(XSLT) $< && \
|
||||
mv $@+ $@
|
||||
$(QUIET_XSLTPROC)xsltproc $(XSLTOPTS) -o $@ $(XSLT) $<
|
||||
|
||||
git.info: user-manual.texi
|
||||
$(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ user-manual.texi
|
||||
|
||||
user-manual.texi: user-manual.xml
|
||||
$(QUIET_DB2TEXI)$(RM) $@+ $@ && \
|
||||
$(DOCBOOK2X_TEXI) user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \
|
||||
$(PERL_PATH) fix-texi.perl <$@++ >$@+ && \
|
||||
rm $@++ && \
|
||||
mv $@+ $@
|
||||
$(QUIET_DB2TEXI)$(DOCBOOK2X_TEXI) user-manual.xml --encoding=UTF-8 --to-stdout >$@+ && \
|
||||
$(PERL_PATH) fix-texi.perl <$@+ >$@ && \
|
||||
$(RM) $@+
|
||||
|
||||
user-manual.pdf: user-manual.xml
|
||||
$(QUIET_DBLATEX)$(RM) $@+ $@ && \
|
||||
$(DBLATEX) -o $@+ $(DBLATEX_COMMON) $< && \
|
||||
mv $@+ $@
|
||||
$(QUIET_DBLATEX)$(DBLATEX) -o $@ $(DBLATEX_COMMON) $<
|
||||
|
||||
gitman.texi: $(MAN_XML) cat-texi.perl texi.xsl
|
||||
$(QUIET_DB2TEXI)$(RM) $@+ $@ && \
|
||||
$(QUIET_DB2TEXI) \
|
||||
($(foreach xml,$(sort $(MAN_XML)),xsltproc -o $(xml)+ texi.xsl $(xml) && \
|
||||
$(DOCBOOK2X_TEXI) --encoding=UTF-8 --to-stdout $(xml)+ && \
|
||||
rm $(xml)+ &&) true) > $@++ && \
|
||||
$(PERL_PATH) cat-texi.perl $@ <$@++ >$@+ && \
|
||||
rm $@++ && \
|
||||
mv $@+ $@
|
||||
$(RM) $(xml)+ &&) true) > $@+ && \
|
||||
$(PERL_PATH) cat-texi.perl $@ <$@+ >$@ && \
|
||||
$(RM) $@+
|
||||
|
||||
gitman.info: gitman.texi
|
||||
$(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $*.texi
|
||||
|
||||
$(patsubst %.txt,%.texi,$(MAN_TXT)): %.texi : %.xml
|
||||
$(QUIET_DB2TEXI)$(RM) $@+ $@ && \
|
||||
$(DOCBOOK2X_TEXI) --to-stdout $*.xml >$@+ && \
|
||||
mv $@+ $@
|
||||
$(QUIET_DB2TEXI)$(DOCBOOK2X_TEXI) --to-stdout $*.xml >$@
|
||||
|
||||
howto-index.txt: howto-index.sh $(HOWTO_TXT)
|
||||
$(QUIET_GEN)$(RM) $@+ $@ && \
|
||||
'$(SHELL_PATH_SQ)' ./howto-index.sh $(sort $(HOWTO_TXT)) >$@+ && \
|
||||
mv $@+ $@
|
||||
$(QUIET_GEN)'$(SHELL_PATH_SQ)' ./howto-index.sh $(sort $(HOWTO_TXT)) >$@
|
||||
|
||||
$(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt
|
||||
$(QUIET_ASCIIDOC)$(TXT_TO_HTML) $*.txt
|
||||
@ -448,10 +427,9 @@ WEBDOC_DEST = /pub/software/scm/git/docs
|
||||
|
||||
howto/%.html: ASCIIDOC_EXTRA += -a git-relative-html-prefix=../
|
||||
$(patsubst %.txt,%.html,$(HOWTO_TXT)): %.html : %.txt GIT-ASCIIDOCFLAGS
|
||||
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
|
||||
$(QUIET_ASCIIDOC) \
|
||||
sed -e '1,/^$$/d' $< | \
|
||||
$(TXT_TO_HTML) - >$@+ && \
|
||||
mv $@+ $@
|
||||
$(TXT_TO_HTML) - >$@
|
||||
|
||||
install-webdoc : html
|
||||
'$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST)
|
||||
@ -492,4 +470,7 @@ doc-l10n install-l10n::
|
||||
$(MAKE) -C po $@
|
||||
endif
|
||||
|
||||
# Delete the target file on error
|
||||
.DELETE_ON_ERROR:
|
||||
|
||||
.PHONY: FORCE
|
||||
|
@ -47,7 +47,7 @@ Veteran contributors who are especially interested in helping mentor newcomers
|
||||
are present on the list. In order to avoid search indexers, group membership is
|
||||
required to view messages; anyone can join and no approval is required.
|
||||
|
||||
==== https://webchat.freenode.net/#git-devel[#git-devel] on Freenode
|
||||
==== https://web.libera.chat/#git-devel[#git-devel] on Libera Chat
|
||||
|
||||
This IRC channel is for conversations between Git contributors. If someone is
|
||||
currently online and knows the answer to your question, you can receive help
|
||||
@ -827,7 +827,7 @@ either examining recent pull requests where someone has been granted `/allow`
|
||||
(https://github.com/gitgitgadget/git/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+%22%2Fallow%22[Search:
|
||||
is:pr is:open "/allow"]), in which case both the author and the person who
|
||||
granted the `/allow` can now `/allow` you, or by inquiring on the
|
||||
https://webchat.freenode.net/#git-devel[#git-devel] IRC channel on Freenode
|
||||
https://web.libera.chat/#git-devel[#git-devel] IRC channel on Libera Chat
|
||||
linking your pull request and asking for someone to `/allow` you.
|
||||
|
||||
If the CI fails, you can update your changes with `git rebase -i` and push your
|
||||
|
@ -50,7 +50,7 @@ Fixes since v1.6.0.2
|
||||
if the working tree is currently dirty.
|
||||
|
||||
* "git for-each-ref --format=%(subject)" fixed for commits with no
|
||||
no newline in the message body.
|
||||
newline in the message body.
|
||||
|
||||
* "git remote" fixed to protect printf from user input.
|
||||
|
||||
|
@ -365,7 +365,7 @@ details).
|
||||
(merge 2fbd4f9 mh/maint-lockfile-overflow later to maint).
|
||||
|
||||
* Invocations of "git checkout" used internally by "git rebase" were
|
||||
counted as "checkout", and affected later "git checkout -" to the
|
||||
counted as "checkout", and affected later "git checkout -", which took
|
||||
the user to an unexpected place.
|
||||
(merge 3bed291 rr/rebase-checkout-reflog later to maint).
|
||||
|
||||
|
@ -184,8 +184,8 @@ Performance, Internal Implementation, Development Support etc.
|
||||
the ref backend in use, as its format is much richer than the
|
||||
normal refs, and written directly by "git fetch" as a plain file..
|
||||
|
||||
* An unused binary has been discarded, and and a bunch of commands
|
||||
have been turned into into built-in.
|
||||
* An unused binary has been discarded, and a bunch of commands
|
||||
have been turned into built-in.
|
||||
|
||||
* A handful of places in in-tree code still relied on being able to
|
||||
execute the git subcommands, especially built-ins, in "git-foo"
|
||||
|
@ -47,11 +47,6 @@ UI, Workflows & Features
|
||||
tweak both the message and the contents, and only the message,
|
||||
respectively.
|
||||
|
||||
* When accessing a server with a URL like https://user:pass@site/, we
|
||||
did not to fall back to the basic authentication with the
|
||||
credential material embedded in the URL after the "Negotiate"
|
||||
authentication failed. Now we do.
|
||||
|
||||
* "git send-email" learned to honor the core.hooksPath configuration.
|
||||
|
||||
* "git format-patch -v<n>" learned to allow a reroll count that is
|
||||
@ -371,6 +366,30 @@ Fixes since v2.31
|
||||
empty directories under $GIT_DIR/refs/ for
|
||||
(merge 5f03e5126d wc/packed-ref-removal-cleanup later to maint).
|
||||
|
||||
* "git clean" and "git ls-files -i" had confusion around working on
|
||||
or showing ignored paths inside an ignored directory, which has
|
||||
been corrected.
|
||||
(merge b548f0f156 en/dir-traversal later to maint).
|
||||
|
||||
* The handling of "%(push)" formatting element of "for-each-ref" and
|
||||
friends was broken when the same codepath started handling
|
||||
"%(push:<what>)", which has been corrected.
|
||||
(merge 1e1c4c5eac zh/ref-filter-push-remote-fix later to maint).
|
||||
|
||||
* The bash prompt script (in contrib/) did not work under "set -u".
|
||||
(merge 5c0cbdb107 en/prompt-under-set-u later to maint).
|
||||
|
||||
* The "chainlint" feature in the test framework is a handy way to
|
||||
catch common mistakes in writing new tests, but tends to get
|
||||
expensive. An knob to selectively disable it has been introduced
|
||||
to help running tests that the developer has not modified.
|
||||
(merge 2d86a96220 jk/test-chainlint-softer later to maint).
|
||||
|
||||
* The "rev-parse" command did not diagnose the lack of argument to
|
||||
"--path-format" option, which was introduced in v2.31 era, which
|
||||
has been corrected.
|
||||
(merge 99fc555188 wm/rev-parse-path-format-wo-arg later to maint).
|
||||
|
||||
* Other code cleanup, docfix, build fix, etc.
|
||||
(merge f451960708 dl/cat-file-doc-cleanup later to maint).
|
||||
(merge 12604a8d0c sv/t9801-test-path-is-file-cleanup later to maint).
|
||||
|
280
Documentation/RelNotes/2.33.0.txt
Normal file
280
Documentation/RelNotes/2.33.0.txt
Normal file
@ -0,0 +1,280 @@
|
||||
Git 2.33 Release Notes
|
||||
======================
|
||||
|
||||
Backward compatibility notes
|
||||
----------------------------
|
||||
|
||||
* The "-m" option in "git log -m" that does not specify which format,
|
||||
if any, of diff is desired did not have any visible effect; it now
|
||||
implies some form of diff (by default "--patch") is produced.
|
||||
|
||||
You can disable the diff output with "git log -m --no-patch", but
|
||||
then there probably isn't much point in passing "-m" in the first
|
||||
place ;-).
|
||||
|
||||
|
||||
Updates since Git 2.32
|
||||
----------------------
|
||||
|
||||
UI, Workflows & Features
|
||||
|
||||
* "git send-email" learned the "--sendmail-cmd" command line option
|
||||
and the "sendemail.sendmailCmd" configuration variable, which is a
|
||||
more sensible approach than the current way of repurposing the
|
||||
"smtp-server" that is meant to name the server to instead name the
|
||||
command to talk to the server.
|
||||
|
||||
* The "-m" option in "git log -m" that does not specify which format,
|
||||
if any, of diff is desired did not have any visible effect; it now
|
||||
implies some form of diff (by default "--patch") is produced.
|
||||
|
||||
* The userdiff pattern for C# learned the token "record".
|
||||
|
||||
* "git rev-list" learns to omit the "commit <object-name>" header
|
||||
lines from the output with the `--no-commit-header` option.
|
||||
|
||||
* "git worktree add --lock" learned to record why the worktree is
|
||||
locked with a custom message.
|
||||
|
||||
|
||||
Performance, Internal Implementation, Development Support etc.
|
||||
|
||||
* The code to handle the "--format" option in "for-each-ref" and
|
||||
friends made too many string comparisons on %(atom)s used in the
|
||||
format string, which has been corrected by converting them into
|
||||
enum when the format string is parsed.
|
||||
|
||||
* Use the hashfile API in the codepath that writes the index file to
|
||||
reduce code duplication.
|
||||
|
||||
* Repeated rename detections in a sequence of mergy operations have
|
||||
been optimize out.
|
||||
|
||||
* Preliminary clean-up of tests before the main reftable changes
|
||||
hits the codebase.
|
||||
|
||||
* The backend for "diff -G/-S" has been updated to use pcre2 engine
|
||||
when available.
|
||||
|
||||
* Use ".DELETE_ON_ERROR" pseudo target to simplify our Makefile.
|
||||
|
||||
* Code cleanup around struct_type_init() functions.
|
||||
|
||||
* "git send-email" optimization.
|
||||
|
||||
* GitHub Actions / CI update.
|
||||
(merge 0dc787a9f2 js/ci-windows-update later to maint).
|
||||
|
||||
* Object accesses in repositories with many alternate object store
|
||||
have been optimized.
|
||||
|
||||
* "git log" has been optimized not to waste cycles to load ref
|
||||
decoration data that may not be needed.
|
||||
|
||||
* Many "printf"-like helper functions we have have been annotated
|
||||
with __attribute__() to catch placeholder/parameter mismatches.
|
||||
|
||||
* Tests that cover protocol bits have been updated and helpers
|
||||
used there have been consolidated.
|
||||
|
||||
* The CI gained a new job to run "make sparse" check.
|
||||
|
||||
* "git status" codepath learned to work with sparsely populated index
|
||||
without hydrating it fully.
|
||||
|
||||
* A guideline for gender neutral documentation has been added.
|
||||
|
||||
* Documentation on "git diff -l<n>" and diff.renameLimit have been
|
||||
updated, and the defaults for these limits have been raised.
|
||||
|
||||
* The completion support used to offer alternate spelling of options
|
||||
that exist only for compatibility, which has been corrected.
|
||||
|
||||
* "TEST_OUTPUT_DIRECTORY=there make test" failed to work, which has
|
||||
been corrected.
|
||||
|
||||
* "git bundle" gained more test coverage.
|
||||
|
||||
* "git read-tree" had a codepath where blobs are fetched one-by-one
|
||||
from the promisor remote, which has been corrected to fetch in bulk.
|
||||
|
||||
|
||||
Fixes since v2.32
|
||||
-----------------
|
||||
|
||||
* We historically rejected a very short string as an author name
|
||||
while accepting a patch e-mail, which has been loosened.
|
||||
(merge 72ee47ceeb ef/mailinfo-short-name later to maint).
|
||||
|
||||
* The parallel checkout codepath did not initialize object ID field
|
||||
used to talk to the worker processes in a futureproof way.
|
||||
|
||||
* Rewrite code that triggers undefined behaviour warning.
|
||||
(merge aafa5df0df jn/size-t-casted-to-off-t-fix later to maint).
|
||||
|
||||
* The description of "fast-forward" in the glossary has been updated.
|
||||
(merge e22f2daed0 ry/clarify-fast-forward-in-glossary later to maint).
|
||||
|
||||
* Recent "git clone" left a temporary directory behind when the
|
||||
transport layer returned an failure.
|
||||
(merge 6aacb7d861 jk/clone-clean-upon-transport-error later to maint).
|
||||
|
||||
* "git fetch" over protocol v2 left its side of the socket open after
|
||||
it finished speaking, which unnecessarily wasted the resource on
|
||||
the other side.
|
||||
(merge ae1a7eefff jk/fetch-pack-v2-half-close-early later to maint).
|
||||
|
||||
* The command line completion (in contrib/) learned that "git diff"
|
||||
takes the "--anchored" option.
|
||||
(merge d1e7c2cac9 tb/complete-diff-anchored later to maint).
|
||||
|
||||
* "git-svn" tests assumed that "locale -a", which is used to pick an
|
||||
available UTF-8 locale, is available everywhere. A knob has been
|
||||
introduced to allow testers to specify a suitable locale to use.
|
||||
(merge 482c962de4 dd/svn-test-wo-locale-a later to maint).
|
||||
|
||||
* Update "git subtree" to work better on Windows.
|
||||
(merge 77f37de39f js/subtree-on-windows-fix later to maint).
|
||||
|
||||
* Remove multimail from contrib/
|
||||
(merge f74d11471f js/no-more-multimail later to maint).
|
||||
|
||||
* Make the codebase MSAN clean.
|
||||
(merge 4dbc55e87d ah/uninitialized-reads-fix later to maint).
|
||||
|
||||
* Work around inefficient glob substitution in older versions of bash
|
||||
by rewriting parts of a test.
|
||||
(merge eb87c6f559 jx/t6020-with-older-bash later to maint).
|
||||
|
||||
* Avoid duplicated work while building reachability bitmaps.
|
||||
(merge aa9ad6fee5 jk/bitmap-tree-optim later to maint).
|
||||
|
||||
* We broke "GIT_SKIP_TESTS=t?000" to skip certain tests in recent
|
||||
update, which got fixed.
|
||||
|
||||
* The side-band demultiplexer that is used to display progress output
|
||||
from the remote end did not clear the line properly when the end of
|
||||
line hits at a packet boundary, which has been corrected.
|
||||
|
||||
* Some test scripts assumed that readlink(1) was universally
|
||||
installed and available, which is not the case.
|
||||
(merge 7c0afdf23c jk/test-without-readlink-1 later to maint).
|
||||
|
||||
* Recent update to completion script (in contrib/) broke those who
|
||||
use the __git_complete helper to define completion to their custom
|
||||
command.
|
||||
(merge cea232194d fw/complete-cmd-idx-fix later to maint).
|
||||
|
||||
* Output from some of our tests were affected by the width of the
|
||||
terminal that they were run in, which has been corrected by
|
||||
exporting a fixed value in the COLUMNS environment.
|
||||
(merge c49a177bec ab/fix-columns-to-80-during-tests later to maint).
|
||||
|
||||
* On Windows, mergetool has been taught to find kdiff3.exe just like
|
||||
it finds winmerge.exe.
|
||||
(merge 47eb4c6890 ms/mergetools-kdiff3-on-windows later to maint).
|
||||
|
||||
* When we cannot figure out how wide the terminal is, we use a
|
||||
fallback value of 80 ourselves (which cannot be avoided), but when
|
||||
we run the pager, we export it in COLUMNS, which forces the pager
|
||||
to use the hardcoded value, even when the pager is perfectly
|
||||
capable to figure it out itself. Stop exporting COLUMNS when we
|
||||
fall back on the hardcoded default value for our own use.
|
||||
(merge 9b6e2c8b98 js/stop-exporting-bogus-columns later to maint).
|
||||
|
||||
* "git cat-file --batch-all-objects"" misbehaved when "--batch" is in
|
||||
use and did not ask for certain object traits.
|
||||
(merge ee02ac6164 zh/cat-file-batch-fix later to maint).
|
||||
|
||||
* Some code and doc clarification around "git push".
|
||||
|
||||
* The "union" conflict resultion variant misbehaved when used with
|
||||
binary merge driver.
|
||||
(merge 382b601acd jk/union-merge-binary later to maint).
|
||||
|
||||
* Prevent "git p4" from failing to submit changes to binary file.
|
||||
(merge 54662d5958 dc/p4-binary-submit-fix later to maint).
|
||||
|
||||
* "git grep --and -e foo" ought to have been diagnosed as an error
|
||||
but instead segfaulted, which has been corrected.
|
||||
(merge fe7fe62d8d rs/grep-parser-fix later to maint).
|
||||
|
||||
* The merge code had funny interactions between content based rename
|
||||
detection and directory rename detection.
|
||||
(merge 3585d0ea23 en/merge-dir-rename-corner-case-fix later to maint).
|
||||
|
||||
* When rebuilding the multi-pack index file reusing an existing one,
|
||||
we used to blindly trust the existing file and ended up carrying
|
||||
corrupted data into the updated file, which has been corrected.
|
||||
(merge f89ecf7988 tb/midx-use-checksum later to maint).
|
||||
|
||||
* Update the location of system-side configuration file on Windows.
|
||||
(merge e355307692 js/gfw-system-config-loc-fix later to maint).
|
||||
|
||||
* Code recently added to support common ancestry negotiation during
|
||||
"git push" did not sanity check its arguments carefully enough.
|
||||
(merge eff40457a4 ab/fetch-negotiate-segv-fix later to maint).
|
||||
|
||||
* Update the documentation not to assume users are of certain gender
|
||||
and adds to guidelines to do so.
|
||||
(merge 46a237f42f ds/gender-neutral-doc later to maint).
|
||||
|
||||
* "git commit --allow-empty-message" won't abort the operation upon
|
||||
an empty message, but the hint shown in the editor said otherwise.
|
||||
(merge 6f70f00b4f hj/commit-allow-empty-message later to maint).
|
||||
|
||||
* The code that gives an error message in "git multi-pack-index" when
|
||||
no subcommand is given tried to print a NULL pointer as a strong,
|
||||
which has been corrected.
|
||||
(merge 88617d11f9 tb/reverse-midx later to maint).
|
||||
|
||||
* CI update.
|
||||
(merge a066a90db6 js/ci-check-whitespace-updates later to maint).
|
||||
|
||||
* Documentation fix for "git pull --rebase=no".
|
||||
(merge d3236becec fc/pull-no-rebase-merges-theirs-into-ours later to maint).
|
||||
|
||||
* A race between repacking and using pack bitmaps has been corrected.
|
||||
(merge dc1daacdcc jk/check-pack-valid-before-opening-bitmap later to maint).
|
||||
|
||||
* Other code cleanup, docfix, build fix, etc.
|
||||
(merge bfe35a6165 ah/doc-describe later to maint).
|
||||
(merge f302c1e4aa jc/clarify-revision-range later to maint).
|
||||
(merge 3127ff90ea tl/fix-packfile-uri-doc later to maint).
|
||||
(merge a84216c684 jk/doc-color-pager later to maint).
|
||||
(merge 4e0a64a713 ab/trace2-squelch-gcc-warning later to maint).
|
||||
(merge 225f7fa847 ps/rev-list-object-type-filter later to maint).
|
||||
(merge 5317dfeaed dd/honor-users-tar-in-tests later to maint).
|
||||
(merge ace6d8e3d6 tk/partial-clone-repack-doc later to maint).
|
||||
(merge 7ba68e0cf1 js/trace2-discard-event-docfix later to maint).
|
||||
(merge 8603c419d3 fc/doc-default-to-upstream-config later to maint).
|
||||
(merge 1d72b604ef jk/revision-squelch-gcc-warning later to maint).
|
||||
(merge abcb66c614 ar/typofix later to maint).
|
||||
(merge 9853830787 ah/graph-typofix later to maint).
|
||||
(merge aac578492d ab/config-hooks-path-testfix later to maint).
|
||||
(merge 98c7656a18 ar/more-typofix later to maint).
|
||||
(merge 6fb9195f6c jk/doc-max-pack-size later to maint).
|
||||
(merge 4184cbd635 ar/mailinfo-memcmp-to-skip-prefix later to maint).
|
||||
(merge 91d2347033 ar/doc-libera-chat-in-my-first-contrib later to maint).
|
||||
(merge 338abb0f04 ab/cmd-foo-should-return later to maint).
|
||||
(merge 546096a5cb ab/xdiff-bug-cleanup later to maint).
|
||||
(merge b7b793d1e7 ab/progress-cleanup later to maint).
|
||||
(merge d94f9b8e90 ba/object-info later to maint).
|
||||
(merge 52ff891c03 ar/test-code-cleanup later to maint).
|
||||
(merge a0538e5c8b dd/document-log-decorate-default later to maint).
|
||||
(merge ce24797d38 mr/cmake later to maint).
|
||||
(merge 9eb542f2ee ab/pre-auto-gc-hook-test later to maint).
|
||||
(merge 9fffc38583 bk/doc-commit-typofix later to maint).
|
||||
(merge 1cf823d8f0 ks/submodule-cleanup later to maint).
|
||||
(merge ebbf5d2b70 js/config-mak-windows-pcre-fix later to maint).
|
||||
(merge 617480d75b hn/refs-iterator-peel-returns-boolean later to maint).
|
||||
(merge 6a24cc71ed ar/submodule-helper-include-cleanup later to maint).
|
||||
(merge 5632e838f8 rs/khash-alloc-cleanup later to maint).
|
||||
(merge b1d87fbaf1 jk/typofix later to maint).
|
||||
(merge e04170697a ab/gitignore-discovery-doc later to maint).
|
||||
(merge 8232a0ff48 dl/packet-read-response-end-fix later to maint).
|
||||
(merge eb448631fb dl/diff-merge-base later to maint).
|
||||
(merge c510928a25 hn/refs-debug-empty-prefix later to maint).
|
||||
(merge ddcb189d9d tb/bitmap-type-filter-comment-fix later to maint).
|
||||
(merge 878b399734 pb/submodule-recurse-doc later to maint).
|
||||
(merge 734283855f jk/config-env-doc later to maint).
|
@ -377,7 +377,7 @@ notes for details).
|
||||
on that order.
|
||||
|
||||
* "git show 'HEAD:Foo[BAR]Baz'" did not interpret the argument as a
|
||||
rev, i.e. the object named by the the pathname with wildcard
|
||||
rev, i.e. the object named by the pathname with wildcard
|
||||
characters in a tree object.
|
||||
(merge aac4fac nd/dwim-wildcards-as-pathspecs later to maint).
|
||||
|
||||
|
@ -373,9 +373,8 @@ If you like, you can put extra tags at the end:
|
||||
. `Acked-by:` says that the person who is more familiar with the area
|
||||
the patch attempts to modify liked the patch.
|
||||
. `Reviewed-by:`, unlike the other tags, can only be offered by the
|
||||
reviewer and means that she is completely satisfied that the patch
|
||||
is ready for application. It is usually offered only after a
|
||||
detailed review.
|
||||
reviewers themselves when they are completely satisfied with the
|
||||
patch after a detailed analysis.
|
||||
. `Tested-by:` is used to indicate that the person applied the patch
|
||||
and found it to have the desired effect.
|
||||
|
||||
|
@ -27,7 +27,7 @@ blame.ignoreRevsFile::
|
||||
file names will reset the list of ignored revisions. This option will
|
||||
be handled before the command line option `--ignore-revs-file`.
|
||||
|
||||
blame.markUnblamables::
|
||||
blame.markUnblamableLines::
|
||||
Mark lines that were changed by an ignored revision that we could not
|
||||
attribute to another commit with a '*' in the output of
|
||||
linkgit:git-blame[1].
|
||||
|
@ -127,8 +127,9 @@ color.interactive.<slot>::
|
||||
interactive commands.
|
||||
|
||||
color.pager::
|
||||
A boolean to enable/disable colored output when the pager is in
|
||||
use (default is true).
|
||||
A boolean to specify whether `auto` color modes should colorize
|
||||
output going to the pager. Defaults to true; set this to false
|
||||
if your pager does not understand ANSI color codes.
|
||||
|
||||
color.push::
|
||||
A boolean to enable/disable color in push errors. May be set to
|
||||
|
@ -118,9 +118,10 @@ diff.orderFile::
|
||||
relative to the top of the working tree.
|
||||
|
||||
diff.renameLimit::
|
||||
The number of files to consider when performing the copy/rename
|
||||
detection; equivalent to the 'git diff' option `-l`. This setting
|
||||
has no effect if rename detection is turned off.
|
||||
The number of files to consider in the exhaustive portion of
|
||||
copy/rename detection; equivalent to the 'git diff' option
|
||||
`-l`. If not set, the default value is currently 1000. This
|
||||
setting has no effect if rename detection is turned off.
|
||||
|
||||
diff.renames::
|
||||
Whether and how Git detects renames. If set to "false",
|
||||
|
@ -69,7 +69,8 @@ fetch.negotiationAlgorithm::
|
||||
setting defaults to "skipping".
|
||||
Unknown values will cause 'git fetch' to error out.
|
||||
+
|
||||
See also the `--negotiation-tip` option for linkgit:git-fetch[1].
|
||||
See also the `--negotiate-only` and `--negotiation-tip` options to
|
||||
linkgit:git-fetch[1].
|
||||
|
||||
fetch.showForcedUpdates::
|
||||
Set to false to enable `--no-show-forced-updates` in
|
||||
|
@ -14,7 +14,7 @@ merge.defaultToUpstream::
|
||||
branches at the remote named by `branch.<current branch>.remote`
|
||||
are consulted, and then they are mapped via `remote.<remote>.fetch`
|
||||
to their corresponding remote-tracking branches, and the tips of
|
||||
these tracking branches are merged.
|
||||
these tracking branches are merged. Defaults to true.
|
||||
|
||||
merge.ff::
|
||||
By default, Git does not create an extra merge commit when merging
|
||||
@ -33,10 +33,12 @@ merge.verifySignatures::
|
||||
include::fmt-merge-msg.txt[]
|
||||
|
||||
merge.renameLimit::
|
||||
The number of files to consider when performing rename detection
|
||||
during a merge; if not specified, defaults to the value of
|
||||
diff.renameLimit. This setting has no effect if rename detection
|
||||
is turned off.
|
||||
The number of files to consider in the exhaustive portion of
|
||||
rename detection during a merge. If not specified, defaults
|
||||
to the value of diff.renameLimit. If neither
|
||||
merge.renameLimit nor diff.renameLimit are specified,
|
||||
currently defaults to 7000. This setting has no effect if
|
||||
rename detection is turned off.
|
||||
|
||||
merge.renames::
|
||||
Whether Git detects renames. If set to "false", rename detection
|
||||
|
@ -99,12 +99,23 @@ pack.packSizeLimit::
|
||||
packing to a file when repacking, i.e. the git:// protocol
|
||||
is unaffected. It can be overridden by the `--max-pack-size`
|
||||
option of linkgit:git-repack[1]. Reaching this limit results
|
||||
in the creation of multiple packfiles; which in turn prevents
|
||||
bitmaps from being created.
|
||||
The minimum size allowed is limited to 1 MiB.
|
||||
The default is unlimited.
|
||||
Common unit suffixes of 'k', 'm', or 'g' are
|
||||
supported.
|
||||
in the creation of multiple packfiles.
|
||||
+
|
||||
Note that this option is rarely useful, and may result in a larger total
|
||||
on-disk size (because Git will not store deltas between packs), as well
|
||||
as worse runtime performance (object lookup within multiple packs is
|
||||
slower than a single pack, and optimizations like reachability bitmaps
|
||||
cannot cope with multiple packs).
|
||||
+
|
||||
If you need to actively run Git using smaller packfiles (e.g., because your
|
||||
filesystem does not support large files), this option may help. But if
|
||||
your goal is to transmit a packfile over a medium that supports limited
|
||||
sizes (e.g., removable media that cannot store the whole repository),
|
||||
you are likely better off creating a single large packfile and splitting
|
||||
it using a generic multi-volume archive tool (e.g., Unix `split`).
|
||||
+
|
||||
The minimum size allowed is limited to 1 MiB. The default is unlimited.
|
||||
Common unit suffixes of 'k', 'm', or 'g' are supported.
|
||||
|
||||
pack.useBitmaps::
|
||||
When true, git will use pack bitmaps (if available) when packing
|
||||
|
@ -24,15 +24,14 @@ push.default::
|
||||
|
||||
* `tracking` - This is a deprecated synonym for `upstream`.
|
||||
|
||||
* `simple` - in centralized workflow, work like `upstream` with an
|
||||
added safety to refuse to push if the upstream branch's name is
|
||||
different from the local one.
|
||||
* `simple` - pushes the current branch with the same name on the remote.
|
||||
+
|
||||
When pushing to a remote that is different from the remote you normally
|
||||
pull from, work as `current`. This is the safest option and is suited
|
||||
for beginners.
|
||||
If you are working on a centralized workflow (pushing to the same repository you
|
||||
pull from, which is typically `origin`), then you need to configure an upstream
|
||||
branch with the same name.
|
||||
+
|
||||
This mode has become the default in Git 2.0.
|
||||
This mode is the default since Git 2.0, and is the safest option suited for
|
||||
beginners.
|
||||
|
||||
* `matching` - push all branches having the same name on both ends.
|
||||
This makes the repository you are pushing to remember the set of
|
||||
|
@ -8,9 +8,6 @@ sendemail.smtpEncryption::
|
||||
See linkgit:git-send-email[1] for description. Note that this
|
||||
setting is not subject to the 'identity' mechanism.
|
||||
|
||||
sendemail.smtpssl (deprecated)::
|
||||
Deprecated alias for 'sendemail.smtpEncryption = ssl'.
|
||||
|
||||
sendemail.smtpsslcertpath::
|
||||
Path to ca-certificates (either a directory or a single file).
|
||||
Set it to an empty string to disable certificate verification.
|
||||
|
@ -6,9 +6,9 @@ stash.useBuiltin::
|
||||
remaining users that setting this now does nothing.
|
||||
|
||||
stash.showIncludeUntracked::
|
||||
If this is set to true, the `git stash show` command without an
|
||||
option will show the untracked files of a stash entry. Defaults to
|
||||
false. See description of 'show' command in linkgit:git-stash[1].
|
||||
If this is set to true, the `git stash show` command will show
|
||||
the untracked files of a stash entry. Defaults to false. See
|
||||
description of 'show' command in linkgit:git-stash[1].
|
||||
|
||||
stash.showPatch::
|
||||
If this is set to true, the `git stash show` command without an
|
||||
|
@ -58,8 +58,9 @@ submodule.active::
|
||||
commands. See linkgit:gitsubmodules[7] for details.
|
||||
|
||||
submodule.recurse::
|
||||
Specifies if commands recurse into submodules by default. This
|
||||
applies to all commands that have a `--recurse-submodules` option
|
||||
A boolean indicating if commands should enable the `--recurse-submodules`
|
||||
option by default.
|
||||
Applies to all commands that support this option
|
||||
(`checkout`, `fetch`, `grep`, `pull`, `push`, `read-tree`, `reset`,
|
||||
`restore` and `switch`) except `clone` and `ls-files`.
|
||||
Defaults to false.
|
||||
|
@ -49,10 +49,9 @@ ifdef::git-log[]
|
||||
--diff-merges=m:::
|
||||
-m:::
|
||||
This option makes diff output for merge commits to be shown in
|
||||
the default format. `-m` will produce the output only if `-p`
|
||||
is given as well. The default format could be changed using
|
||||
the default format. The default format could be changed using
|
||||
`log.diffMerges` configuration parameter, which default value
|
||||
is `separate`.
|
||||
is `separate`. `-m` implies `-p`.
|
||||
+
|
||||
--diff-merges=first-parent:::
|
||||
--diff-merges=1:::
|
||||
@ -62,7 +61,8 @@ ifdef::git-log[]
|
||||
--diff-merges=separate:::
|
||||
This makes merge commits show the full diff with respect to
|
||||
each of the parents. Separate log entry and diff is generated
|
||||
for each parent.
|
||||
for each parent. This is the format that `-m` produced
|
||||
historically.
|
||||
+
|
||||
--diff-merges=combined:::
|
||||
--diff-merges=c:::
|
||||
@ -588,11 +588,17 @@ When used together with `-B`, omit also the preimage in the deletion part
|
||||
of a delete/create pair.
|
||||
|
||||
-l<num>::
|
||||
The `-M` and `-C` options require O(n^2) processing time where n
|
||||
is the number of potential rename/copy targets. This
|
||||
option prevents rename/copy detection from running if
|
||||
the number of rename/copy targets exceeds the specified
|
||||
number.
|
||||
The `-M` and `-C` options involve some preliminary steps that
|
||||
can detect subsets of renames/copies cheaply, followed by an
|
||||
exhaustive fallback portion that compares all remaining
|
||||
unpaired destinations to all relevant sources. (For renames,
|
||||
only remaining unpaired sources are relevant; for copies, all
|
||||
original sources are relevant.) For N sources and
|
||||
destinations, this exhaustive check is O(N^2). This option
|
||||
prevents the exhaustive portion of rename/copy detection from
|
||||
running if the number of source/destination files involved
|
||||
exceeds the specified number. Defaults to diff.renameLimit.
|
||||
Note that a value of 0 is treated as unlimited.
|
||||
|
||||
ifndef::git-format-patch[]
|
||||
--diff-filter=[(A|C|D|M|R|T|U|X|B)...[*]]::
|
||||
|
@ -62,8 +62,17 @@ The argument to this option may be a glob on ref names, a ref, or the (possibly
|
||||
abbreviated) SHA-1 of a commit. Specifying a glob is equivalent to specifying
|
||||
this option multiple times, one for each matching ref name.
|
||||
+
|
||||
See also the `fetch.negotiationAlgorithm` configuration variable
|
||||
documented in linkgit:git-config[1].
|
||||
See also the `fetch.negotiationAlgorithm` and `push.negotiate`
|
||||
configuration variables documented in linkgit:git-config[1], and the
|
||||
`--negotiate-only` option below.
|
||||
|
||||
--negotiate-only::
|
||||
Do not fetch anything from the server, and instead print the
|
||||
ancestors of the provided `--negotiation-tip=*` arguments,
|
||||
which we have in common with the server.
|
||||
+
|
||||
Internally this is used to implement the `push.negotiate` option, see
|
||||
linkgit:git-config[1].
|
||||
|
||||
--dry-run::
|
||||
Show what would be done, without making any changes.
|
||||
|
@ -72,7 +72,7 @@ OPTIONS
|
||||
|
||||
-p::
|
||||
--patch::
|
||||
Use the interactive patch selection interface to chose
|
||||
Use the interactive patch selection interface to choose
|
||||
which changes to commit. See linkgit:git-add[1] for
|
||||
details.
|
||||
|
||||
|
@ -71,6 +71,7 @@ codes are:
|
||||
|
||||
On success, the command returns the exit code 0.
|
||||
|
||||
[[OPTIONS]]
|
||||
OPTIONS
|
||||
-------
|
||||
|
||||
@ -143,7 +144,13 @@ See also <<FILES>>.
|
||||
|
||||
-f config-file::
|
||||
--file config-file::
|
||||
Use the given config file instead of the one specified by GIT_CONFIG.
|
||||
For writing options: write to the specified file rather than the
|
||||
repository `.git/config`.
|
||||
+
|
||||
For reading options: read only from the specified file rather than from all
|
||||
available files.
|
||||
+
|
||||
See also <<FILES>>.
|
||||
|
||||
--blob blob::
|
||||
Similar to `--file` but use the given blob instead of a file. E.g.
|
||||
@ -325,21 +332,14 @@ All writing options will per default write to the repository specific
|
||||
configuration file. Note that this also affects options like `--replace-all`
|
||||
and `--unset`. *'git config' will only ever change one file at a time*.
|
||||
|
||||
You can override these rules either by command-line options or by environment
|
||||
variables. The `--global`, `--system` and `--worktree` options will limit
|
||||
the file used to the global, system-wide or per-worktree file respectively.
|
||||
The `GIT_CONFIG` environment variable has a similar effect, but you
|
||||
can specify any filename you want.
|
||||
You can override these rules using the `--global`, `--system`,
|
||||
`--local`, `--worktree`, and `--file` command-line options; see
|
||||
<<OPTIONS>> above.
|
||||
|
||||
|
||||
ENVIRONMENT
|
||||
-----------
|
||||
|
||||
GIT_CONFIG::
|
||||
Take the configuration from the given file instead of .git/config.
|
||||
Using the "--global" option forces this to ~/.gitconfig. Using the
|
||||
"--system" option forces this to $(prefix)/etc/gitconfig.
|
||||
|
||||
GIT_CONFIG_GLOBAL::
|
||||
GIT_CONFIG_SYSTEM::
|
||||
Take the configuration from the given files instead from global or
|
||||
@ -367,6 +367,12 @@ This is useful for cases where you want to spawn multiple git commands
|
||||
with a common configuration but cannot depend on a configuration file,
|
||||
for example when writing scripts.
|
||||
|
||||
GIT_CONFIG::
|
||||
If no `--file` option is provided to `git config`, use the file
|
||||
given by `GIT_CONFIG` as if it were provided via `--file`. This
|
||||
variable has no effect on other Git commands, and is mostly for
|
||||
historical compatibility; there is generally no reason to use it
|
||||
instead of the `--file` option.
|
||||
|
||||
[[EXAMPLES]]
|
||||
EXAMPLES
|
||||
|
@ -63,9 +63,10 @@ OPTIONS
|
||||
Automatically implies --tags.
|
||||
|
||||
--abbrev=<n>::
|
||||
Instead of using the default 7 hexadecimal digits as the
|
||||
abbreviated object name, use <n> digits, or as many digits
|
||||
as needed to form a unique object name. An <n> of 0
|
||||
Instead of using the default number of hexadecimal digits (which
|
||||
will vary according to the number of objects in the repository with
|
||||
a default of 7) of the abbreviated object name, use <n> digits, or
|
||||
as many digits as needed to form a unique object name. An <n> of 0
|
||||
will suppress long format, only showing the closest tag.
|
||||
|
||||
--candidates=<n>::
|
||||
@ -139,8 +140,11 @@ at the end.
|
||||
|
||||
The number of additional commits is the number
|
||||
of commits which would be displayed by "git log v1.0.4..parent".
|
||||
The hash suffix is "-g" + unambiguous abbreviation for the tip commit
|
||||
of parent (which was `2414721b194453f058079d897d13c4e377f92dc6`).
|
||||
The hash suffix is "-g" + an unambigous abbreviation for the tip commit
|
||||
of parent (which was `2414721b194453f058079d897d13c4e377f92dc6`). The
|
||||
length of the abbreviation scales as the repository grows, using the
|
||||
approximate number of objects in the repository and a bit of math
|
||||
around the birthday paradox, and defaults to a minimum of 7.
|
||||
The "g" prefix stands for "git" and is used to allow describing the version of
|
||||
a software depending on the SCM the software is managed with. This is useful
|
||||
in an environment where people may use different SCMs.
|
||||
|
@ -51,16 +51,20 @@ files on disk.
|
||||
--staged is a synonym of --cached.
|
||||
+
|
||||
If --merge-base is given, instead of using <commit>, use the merge base
|
||||
of <commit> and HEAD. `git diff --merge-base A` is equivalent to
|
||||
`git diff $(git merge-base A HEAD)`.
|
||||
of <commit> and HEAD. `git diff --cached --merge-base A` is equivalent to
|
||||
`git diff --cached $(git merge-base A HEAD)`.
|
||||
|
||||
'git diff' [<options>] <commit> [--] [<path>...]::
|
||||
'git diff' [<options>] [--merge-base] <commit> [--] [<path>...]::
|
||||
|
||||
This form is to view the changes you have in your
|
||||
working tree relative to the named <commit>. You can
|
||||
use HEAD to compare it with the latest commit, or a
|
||||
branch name to compare with the tip of a different
|
||||
branch.
|
||||
+
|
||||
If --merge-base is given, instead of using <commit>, use the merge base
|
||||
of <commit> and HEAD. `git diff --merge-base A` is equivalent to
|
||||
`git diff $(git merge-base A HEAD)`.
|
||||
|
||||
'git diff' [<options>] [--merge-base] <commit> <commit> [--] [<path>...]::
|
||||
|
||||
|
@ -39,7 +39,9 @@ OPTIONS
|
||||
full ref name (including prefix) will be printed. If 'auto' is
|
||||
specified, then if the output is going to a terminal, the ref names
|
||||
are shown as if 'short' were given, otherwise no ref names are
|
||||
shown. The default option is 'short'.
|
||||
shown. The option `--decorate` is short-hand for `--decorate=short`.
|
||||
Default to configuration value of `log.decorate` if configured,
|
||||
otherwise, `auto`.
|
||||
|
||||
--decorate-refs=<pattern>::
|
||||
--decorate-refs-exclude=<pattern>::
|
||||
|
@ -128,10 +128,10 @@ depth is 4095.
|
||||
into multiple independent packfiles, each not larger than the
|
||||
given size. The size can be suffixed with
|
||||
"k", "m", or "g". The minimum size allowed is limited to 1 MiB.
|
||||
This option
|
||||
prevents the creation of a bitmap index.
|
||||
The default is unlimited, unless the config variable
|
||||
`pack.packSizeLimit` is set.
|
||||
`pack.packSizeLimit` is set. Note that this option may result in
|
||||
a larger and slower repository; see the discussion in
|
||||
`pack.packSizeLimit`.
|
||||
|
||||
--honor-pack-keep::
|
||||
This flag causes an object already in a local pack that
|
||||
|
@ -117,7 +117,7 @@ When set to `preserve` (deprecated in favor of `merges`), rebase with the
|
||||
`--preserve-merges` option passed to `git rebase` so that locally created
|
||||
merge commits will not be flattened.
|
||||
+
|
||||
When false, merge the current branch into the upstream branch.
|
||||
When false, merge the upstream branch into the current branch.
|
||||
+
|
||||
When `interactive`, enable the interactive mode of rebase.
|
||||
+
|
||||
|
@ -244,8 +244,8 @@ Imagine that you have to rebase what you have already published.
|
||||
You will have to bypass the "must fast-forward" rule in order to
|
||||
replace the history you originally published with the rebased history.
|
||||
If somebody else built on top of your original history while you are
|
||||
rebasing, the tip of the branch at the remote may advance with her
|
||||
commit, and blindly pushing with `--force` will lose her work.
|
||||
rebasing, the tip of the branch at the remote may advance with their
|
||||
commit, and blindly pushing with `--force` will lose their work.
|
||||
+
|
||||
This option allows you to say that you expect the history you are
|
||||
updating is what you rebased and want to replace. If the remote ref
|
||||
|
@ -121,7 +121,9 @@ depth is 4095.
|
||||
If specified, multiple packfiles may be created, which also
|
||||
prevents the creation of a bitmap index.
|
||||
The default is unlimited, unless the config variable
|
||||
`pack.packSizeLimit` is set.
|
||||
`pack.packSizeLimit` is set. Note that this option may result in
|
||||
a larger and slower repository; see the discussion in
|
||||
`pack.packSizeLimit`.
|
||||
|
||||
-b::
|
||||
--write-bitmap-index::
|
||||
|
@ -167,6 +167,14 @@ Sending
|
||||
`sendemail.envelopeSender` configuration variable; if that is
|
||||
unspecified, choosing the envelope sender is left to your MTA.
|
||||
|
||||
--sendmail-cmd=<command>::
|
||||
Specify a command to run to send the email. The command should
|
||||
be sendmail-like; specifically, it must support the `-i` option.
|
||||
The command will be executed in the shell if necessary. Default
|
||||
is the value of `sendemail.sendmailcmd`. If unspecified, and if
|
||||
--smtp-server is also unspecified, git-send-email will search
|
||||
for `sendmail` in `/usr/sbin`, `/usr/lib` and $PATH.
|
||||
|
||||
--smtp-encryption=<encryption>::
|
||||
Specify the encryption to use, either 'ssl' or 'tls'. Any other
|
||||
value reverts to plain SMTP. Default is the value of
|
||||
@ -211,13 +219,16 @@ a password is obtained using 'git-credential'.
|
||||
|
||||
--smtp-server=<host>::
|
||||
If set, specifies the outgoing SMTP server to use (e.g.
|
||||
`smtp.example.com` or a raw IP address). Alternatively it can
|
||||
specify a full pathname of a sendmail-like program instead;
|
||||
the program must support the `-i` option. Default value can
|
||||
be specified by the `sendemail.smtpServer` configuration
|
||||
option; the built-in default is to search for `sendmail` in
|
||||
`/usr/sbin`, `/usr/lib` and $PATH if such program is
|
||||
available, falling back to `localhost` otherwise.
|
||||
`smtp.example.com` or a raw IP address). If unspecified, and if
|
||||
`--sendmail-cmd` is also unspecified, the default is to search
|
||||
for `sendmail` in `/usr/sbin`, `/usr/lib` and $PATH if such a
|
||||
program is available, falling back to `localhost` otherwise.
|
||||
+
|
||||
For backward compatibility, this option can also specify a full pathname
|
||||
of a sendmail-like program instead; the program must support the `-i`
|
||||
option. This method does not support passing arguments or using plain
|
||||
command names. For those use cases, consider using `--sendmail-cmd`
|
||||
instead.
|
||||
|
||||
--smtp-server-port=<port>::
|
||||
Specifies a port different from the default port (SMTP
|
||||
|
@ -91,8 +91,10 @@ show [-u|--include-untracked|--only-untracked] [<diff-options>] [<stash>]::
|
||||
By default, the command shows the diffstat, but it will accept any
|
||||
format known to 'git diff' (e.g., `git stash show -p stash@{1}`
|
||||
to view the second most recent entry in patch form).
|
||||
You can use stash.showIncludeUntracked, stash.showStat, and
|
||||
stash.showPatch config variables to change the default behavior.
|
||||
If no `<diff-option>` is provided, the default behavior will be given
|
||||
by the `stash.showStat`, and `stash.showPatch` config variables. You
|
||||
can also use `stash.showIncludeUntracked` to set whether
|
||||
`--include-untracked` is enabled by default.
|
||||
|
||||
pop [--index] [-q|--quiet] [<stash>]::
|
||||
|
||||
|
@ -9,7 +9,7 @@ git-worktree - Manage multiple working trees
|
||||
SYNOPSIS
|
||||
--------
|
||||
[verse]
|
||||
'git worktree add' [-f] [--detach] [--checkout] [--lock] [-b <new-branch>] <path> [<commit-ish>]
|
||||
'git worktree add' [-f] [--detach] [--checkout] [--lock [--reason <string>]] [-b <new-branch>] <path> [<commit-ish>]
|
||||
'git worktree list' [--porcelain]
|
||||
'git worktree lock' [--reason <string>] <worktree>
|
||||
'git worktree move' <worktree> <new-path>
|
||||
@ -242,7 +242,7 @@ With `list`, annotate missing working trees as prunable if they are
|
||||
older than `<time>`.
|
||||
|
||||
--reason <string>::
|
||||
With `lock`, an explanation why the working tree is locked.
|
||||
With `lock` or with `add --lock`, an explanation why the working tree is locked.
|
||||
|
||||
<worktree>::
|
||||
Working trees can be identified by path, either relative or
|
||||
@ -387,7 +387,7 @@ These annotations are:
|
||||
------------
|
||||
$ git worktree list
|
||||
/path/to/linked-worktree abcd1234 [master]
|
||||
/path/to/locked-worktreee acbd5678 (brancha) locked
|
||||
/path/to/locked-worktree acbd5678 (brancha) locked
|
||||
/path/to/prunable-worktree 5678abc (detached HEAD) prunable
|
||||
------------
|
||||
|
||||
|
@ -27,12 +27,11 @@ precedence, the last matching pattern decides the outcome):
|
||||
them.
|
||||
|
||||
* Patterns read from a `.gitignore` file in the same directory
|
||||
as the path, or in any parent directory, with patterns in the
|
||||
higher level files (up to the toplevel of the work tree) being overridden
|
||||
by those in lower level files down to the directory containing the file.
|
||||
These patterns match relative to the location of the
|
||||
`.gitignore` file. A project normally includes such
|
||||
`.gitignore` files in its repository, containing patterns for
|
||||
as the path, or in any parent directory (up to the top-level of the working
|
||||
tree), with patterns in the higher level files being overridden by those in
|
||||
lower level files down to the directory containing the file. These patterns
|
||||
match relative to the location of the `.gitignore` file. A project normally
|
||||
includes such `.gitignore` files in its repository, containing patterns for
|
||||
files generated as part of the project build.
|
||||
|
||||
* Patterns read from `$GIT_DIR/info/exclude`.
|
||||
|
@ -146,8 +146,8 @@ current branch integrates with) obviously do not work, as there is no
|
||||
<<def_revision,revision>> and you are "merging" another
|
||||
<<def_branch,branch>>'s changes that happen to be a descendant of what
|
||||
you have. In such a case, you do not make a new <<def_merge,merge>>
|
||||
<<def_commit,commit>> but instead just update to his
|
||||
revision. This will happen frequently on a
|
||||
<<def_commit,commit>> but instead just update your branch to point at the same
|
||||
revision as the branch you are merging. This will happen frequently on a
|
||||
<<def_remote_tracking_branch,remote-tracking branch>> of a remote
|
||||
<<def_repository,repository>>.
|
||||
|
||||
|
@ -897,7 +897,7 @@ which are not of the requested type.
|
||||
+
|
||||
The form '--filter=sparse:oid=<blob-ish>' uses a sparse-checkout
|
||||
specification contained in the blob (or blob-expression) '<blob-ish>'
|
||||
to omit blobs that would not be not required for a sparse checkout on
|
||||
to omit blobs that would not be required for a sparse checkout on
|
||||
the requested refs.
|
||||
+
|
||||
The form '--filter=tree:<depth>' omits all blobs and trees whose depth
|
||||
@ -1064,6 +1064,14 @@ ifdef::git-rev-list[]
|
||||
--header::
|
||||
Print the contents of the commit in raw-format; each record is
|
||||
separated with a NUL character.
|
||||
|
||||
--no-commit-header::
|
||||
Suppress the header line containing "commit" and the object ID printed before
|
||||
the specified format. This has no effect on the built-in formats; only custom
|
||||
formats are affected.
|
||||
|
||||
--commit-header::
|
||||
Overrides a previous `--no-commit-header`.
|
||||
endif::git-rev-list[]
|
||||
|
||||
--parents::
|
||||
|
@ -260,6 +260,9 @@ any of the given commits.
|
||||
A commit's reachable set is the commit itself and the commits in
|
||||
its ancestry chain.
|
||||
|
||||
There are several notations to specify a set of connected commits
|
||||
(called a "revision range"), illustrated below.
|
||||
|
||||
|
||||
Commit Exclusions
|
||||
~~~~~~~~~~~~~~~~~
|
||||
@ -294,6 +297,26 @@ is a shorthand for 'HEAD..origin' and asks "What did the origin do since
|
||||
I forked from them?" Note that '..' would mean 'HEAD..HEAD' which is an
|
||||
empty range that is both reachable and unreachable from HEAD.
|
||||
|
||||
Commands that are specifically designed to take two distinct ranges
|
||||
(e.g. "git range-diff R1 R2" to compare two ranges) do exist, but
|
||||
they are exceptions. Unless otherwise noted, all "git" commands
|
||||
that operate on a set of commits work on a single revision range.
|
||||
In other words, writing two "two-dot range notation" next to each
|
||||
other, e.g.
|
||||
|
||||
$ git log A..B C..D
|
||||
|
||||
does *not* specify two revision ranges for most commands. Instead
|
||||
it will name a single connected set of commits, i.e. those that are
|
||||
reachable from either B or D but are reachable from neither A or C.
|
||||
In a linear history like this:
|
||||
|
||||
---A---B---o---o---C---D
|
||||
|
||||
because A and B are reachable from C, the revision range specified
|
||||
by these two dotted ranges is a single commit D.
|
||||
|
||||
|
||||
Other <rev>{caret} Parent Shorthand Notations
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Three other shorthands exist, particularly useful for merge commits,
|
||||
|
@ -396,14 +396,14 @@ only present on the "start" and "atexit" events.
|
||||
}
|
||||
------------
|
||||
|
||||
`"discard"`::
|
||||
`"too_many_files"`::
|
||||
This event is written to the git-trace2-discard sentinel file if there
|
||||
are too many files in the target trace directory (see the
|
||||
trace2.maxFiles config option).
|
||||
+
|
||||
------------
|
||||
{
|
||||
"event":"discard",
|
||||
"event":"too_many_files",
|
||||
...
|
||||
}
|
||||
------------
|
||||
|
@ -599,7 +599,7 @@ supports four different modes of operation:
|
||||
convert any object names written to output to SHA-1, but store
|
||||
objects using SHA-256. This allows users to test the code with no
|
||||
visible behavior change except for performance. This allows
|
||||
allows running even tests that assume the SHA-1 hash function, to
|
||||
running even tests that assume the SHA-1 hash function, to
|
||||
sanity-check the behavior of the new mode.
|
||||
|
||||
2. ("early transition") Allow both SHA-1 and SHA-256 object names in
|
||||
|
@ -35,13 +35,14 @@ include some sort of non-trivial implementation in the Minimum Viable Product,
|
||||
at least so that we can test the client.
|
||||
|
||||
This is the implementation: a feature, marked experimental, that allows the
|
||||
server to be configured by one or more `uploadpack.blobPackfileUri=<sha1>
|
||||
<uri>` entries. Whenever the list of objects to be sent is assembled, all such
|
||||
blobs are excluded, replaced with URIs. As noted in "Future work" below, the
|
||||
server can evolve in the future to support excluding other objects (or other
|
||||
implementations of servers could be made that support excluding other objects)
|
||||
without needing a protocol change, so clients should not expect that packfiles
|
||||
downloaded in this way only contain single blobs.
|
||||
server to be configured by one or more `uploadpack.blobPackfileUri=
|
||||
<object-hash> <pack-hash> <uri>` entries. Whenever the list of objects to be
|
||||
sent is assembled, all such blobs are excluded, replaced with URIs. As noted
|
||||
in "Future work" below, the server can evolve in the future to support
|
||||
excluding other objects (or other implementations of servers could be made
|
||||
that support excluding other objects) without needing a protocol change, so
|
||||
clients should not expect that packfiles downloaded in this way only contain
|
||||
single blobs.
|
||||
|
||||
Client design
|
||||
-------------
|
||||
|
@ -242,8 +242,7 @@ remote in a specific order.
|
||||
repository and can satisfy all such requests.
|
||||
|
||||
- Repack essentially treats promisor and non-promisor packfiles as 2
|
||||
distinct partitions and does not mix them. Repack currently only works
|
||||
on non-promisor packfiles and loose objects.
|
||||
distinct partitions and does not mix them.
|
||||
|
||||
- Dynamic object fetching invokes fetch-pack once *for each item*
|
||||
because most algorithms stumble upon a missing object and need to have
|
||||
@ -273,9 +272,6 @@ to use those promisor remotes in that order."
|
||||
The user might want to work in a triangular work flow with multiple
|
||||
promisor remotes that each have an incomplete view of the repository.
|
||||
|
||||
- Allow repack to work on promisor packfiles (while keeping them distinct
|
||||
from non-promisor packfiles).
|
||||
|
||||
- Allow non-pathname-based filters to make use of packfile bitmaps (when
|
||||
present). This was just an omission during the initial implementation.
|
||||
|
||||
|
@ -540,7 +540,7 @@ An `object-info` request takes the following arguments:
|
||||
Indicates to the server an object which the client wants to obtain
|
||||
information for.
|
||||
|
||||
The response of `object-info` is a list of the the requested object ids
|
||||
The response of `object-info` is a list of the requested object ids
|
||||
and associated requested information, each separated by a single space.
|
||||
|
||||
output = info flush-pkt
|
||||
|
671
Documentation/technical/remembering-renames.txt
Normal file
671
Documentation/technical/remembering-renames.txt
Normal file
@ -0,0 +1,671 @@
|
||||
Rebases and cherry-picks involve a sequence of merges whose results are
|
||||
recorded as new single-parent commits. The first parent side of those
|
||||
merges represent the "upstream" side, and often include a far larger set of
|
||||
changes than the second parent side. Traditionally, the renames on the
|
||||
first-parent side of that sequence of merges were repeatedly re-detected
|
||||
for every merge. This file explains why it is safe and effective during
|
||||
rebases and cherry-picks to remember renames on the upstream side of
|
||||
history as an optimization, assuming all merges are automatic and clean
|
||||
(i.e. no conflicts and not interrupted for user input or editing).
|
||||
|
||||
Outline:
|
||||
|
||||
0. Assumptions
|
||||
|
||||
1. How rebasing and cherry-picking work
|
||||
|
||||
2. Why the renames on MERGE_SIDE1 in any given pick are *always* a
|
||||
superset of the renames on MERGE_SIDE1 for the next pick.
|
||||
|
||||
3. Why any rename on MERGE_SIDE1 in any given pick is _almost_ always also
|
||||
a rename on MERGE_SIDE1 for the next pick
|
||||
|
||||
4. A detailed description of the the counter-examples to #3.
|
||||
|
||||
5. Why the special cases in #4 are still fully reasonable to use to pair
|
||||
up files for three-way content merging in the merge machinery, and why
|
||||
they do not affect the correctness of the merge.
|
||||
|
||||
6. Interaction with skipping of "irrelevant" renames
|
||||
|
||||
7. Additional items that need to be cached
|
||||
|
||||
8. How directory rename detection interacts with the above and why this
|
||||
optimization is still safe even if merge.directoryRenames is set to
|
||||
"true".
|
||||
|
||||
|
||||
=== 0. Assumptions ===
|
||||
|
||||
There are two assumptions that will hold throughout this document:
|
||||
|
||||
* The upstream side where commits are transplanted to is treated as the
|
||||
first parent side when rebase/cherry-pick call the merge machinery
|
||||
|
||||
* All merges are fully automatic
|
||||
|
||||
and a third that will hold in sections 2-5 for simplicity, that I'll later
|
||||
address in section 8:
|
||||
|
||||
* No directory renames occur
|
||||
|
||||
|
||||
Let me explain more about each assumption and why I include it:
|
||||
|
||||
|
||||
The first assumption is merely for the purposes of making this document
|
||||
clearer; the optimization implementation does not actually depend upon it.
|
||||
However, the assumption does hold in all cases because it reflects the way
|
||||
that both rebase and cherry-pick were implemented; and the implementation
|
||||
of cherry-pick and rebase are not readily changeable for backwards
|
||||
compatibility reasons (see for example the discussion of the --ours and
|
||||
--theirs flag in the documentation of `git checkout`, particularly the
|
||||
comments about how they behave with rebase). The optimization avoids
|
||||
checking first-parent-ness, though. It checks the conditions that make the
|
||||
optimization valid instead, so it would still continue working if someone
|
||||
changed the parent ordering that cherry-pick and rebase use. But making
|
||||
this assumption does make this document much clearer and prevents me from
|
||||
having to repeat every example twice.
|
||||
|
||||
If the second assumption is violated, then the optimization simply is
|
||||
turned off and thus isn't relevant to consider. The second assumption can
|
||||
also be stated as "there is no interruption for a user to resolve conflicts
|
||||
or to just further edit or tweak files". While real rebases and
|
||||
cherry-picks are often interrupted (either because it's an interactive
|
||||
rebase where the user requested to stop and edit, or because there were
|
||||
conflicts that the user needs to resolve), the cache of renames is not
|
||||
stored on disk, and thus is thrown away as soon as the rebase or cherry
|
||||
pick stops for the user to resolve the operation.
|
||||
|
||||
The third assumption makes sections 2-5 simpler, and allows people to
|
||||
understand the basics of why this optimization is safe and effective, and
|
||||
then I can go back and address the specifics in section 8. It is probably
|
||||
also worth noting that if directory renames do occur, then the default of
|
||||
merge.directoryRenames being set to "conflict" means that the operation
|
||||
will stop for users to resolve the conflicts and the cache will be thrown
|
||||
away, and thus that there won't be an optimization to apply. So, the only
|
||||
reason we need to address directory renames specifically, is that some
|
||||
users will have set merge.directoryRenames to "true" to allow the merges to
|
||||
continue to proceed automatically. The optimization is still safe with
|
||||
this config setting, but we have to discuss a few more cases to show why;
|
||||
this discussion is deferred until section 8.
|
||||
|
||||
|
||||
=== 1. How rebasing and cherry-picking work ===
|
||||
|
||||
Consider the following setup (from the git-rebase manpage):
|
||||
|
||||
A---B---C topic
|
||||
/
|
||||
D---E---F---G main
|
||||
|
||||
After rebasing or cherry-picking topic onto main, this will appear as:
|
||||
|
||||
A'--B'--C' topic
|
||||
/
|
||||
D---E---F---G main
|
||||
|
||||
The way the commits A', B', and C' are created is through a series of
|
||||
merges, where rebase or cherry-pick sequentially uses each of the three
|
||||
A-B-C commits in a special merge operation. Let's label the three commits
|
||||
in the merge operation as MERGE_BASE, MERGE_SIDE1, and MERGE_SIDE2. For
|
||||
this picture, the three commits for each of the three merges would be:
|
||||
|
||||
To create A':
|
||||
MERGE_BASE: E
|
||||
MERGE_SIDE1: G
|
||||
MERGE_SIDE2: A
|
||||
|
||||
To create B':
|
||||
MERGE_BASE: A
|
||||
MERGE_SIDE1: A'
|
||||
MERGE_SIDE2: B
|
||||
|
||||
To create C':
|
||||
MERGE_BASE: B
|
||||
MERGE_SIDE1: B'
|
||||
MERGE_SIDE2: C
|
||||
|
||||
Sometimes, folks are surprised that these three-way merges are done. It
|
||||
can be useful in understanding these three-way merges to view them in a
|
||||
slightly different light. For example, in creating C', you can view it as
|
||||
either:
|
||||
|
||||
* Apply the changes between B & C to B'
|
||||
* Apply the changes between B & B' to C
|
||||
|
||||
Conceptually the two statements above are the same as a three-way merge of
|
||||
B, B', and C, at least the parts before you decide to record a commit.
|
||||
|
||||
|
||||
=== 2. Why the renames on MERGE_SIDE1 in any given pick are always a ===
|
||||
=== superset of the renames on MERGE_SIDE1 for the next pick. ===
|
||||
|
||||
The merge machinery uses the filenames it is fed from MERGE_BASE,
|
||||
MERGE_SIDE1, and MERGE_SIDE2. It will only move content to a different
|
||||
filename under one of three conditions:
|
||||
|
||||
* To make both pieces of a conflict available to a user during conflict
|
||||
resolution (examples: directory/file conflict, add/add type conflict
|
||||
such as symlink vs. regular file)
|
||||
|
||||
* When MERGE_SIDE1 renames the file.
|
||||
|
||||
* When MERGE_SIDE2 renames the file.
|
||||
|
||||
First, let's remember what commits are involved in the first and second
|
||||
picks of the cherry-pick or rebase sequence:
|
||||
|
||||
To create A':
|
||||
MERGE_BASE: E
|
||||
MERGE_SIDE1: G
|
||||
MERGE_SIDE2: A
|
||||
|
||||
To create B':
|
||||
MERGE_BASE: A
|
||||
MERGE_SIDE1: A'
|
||||
MERGE_SIDE2: B
|
||||
|
||||
So, in particular, we need to show that the renames between E and G are a
|
||||
superset of those between A and A'.
|
||||
|
||||
A' is created by the first merge. A' will only have renames for one of the
|
||||
three reasons listed above. The first case, a conflict, results in a
|
||||
situation where the cache is dropped and thus this optimization doesn't
|
||||
take effect, so we need not consider that case. The third case, a rename
|
||||
on MERGE_SIDE2 (i.e. from G to A), will show up in A' but it also shows up
|
||||
in A -- therefore when diffing A and A' that path does not show up as a
|
||||
rename. The only remaining way for renames to show up in A' is for the
|
||||
rename to come from MERGE_SIDE1. Therefore, all renames between A and A'
|
||||
are a subset of those between E and G. Equivalently, all renames between E
|
||||
and G are a superset of those between A and A'.
|
||||
|
||||
|
||||
=== 3. Why any rename on MERGE_SIDE1 in any given pick is _almost_ ===
|
||||
=== always also a rename on MERGE_SIDE1 for the next pick. ===
|
||||
|
||||
Let's again look at the first two picks:
|
||||
|
||||
To create A':
|
||||
MERGE_BASE: E
|
||||
MERGE_SIDE1: G
|
||||
MERGE_SIDE2: A
|
||||
|
||||
To create B':
|
||||
MERGE_BASE: A
|
||||
MERGE_SIDE1: A'
|
||||
MERGE_SIDE2: B
|
||||
|
||||
Now let's look at any given rename from MERGE_SIDE1 of the first pick, i.e.
|
||||
any given rename from E to G. Let's use the filenames 'oldfile' and
|
||||
'newfile' for demonstration purposes. That first pick will function as
|
||||
follows; when the rename is detected, the merge machinery will do a
|
||||
three-way content merge of the following:
|
||||
E:oldfile
|
||||
G:newfile
|
||||
A:oldfile
|
||||
and produce a new result:
|
||||
A':newfile
|
||||
|
||||
Note above that I've assumed that E->A did not rename oldfile. If that
|
||||
side did rename, then we most likely have a rename/rename(1to2) conflict
|
||||
that will cause the rebase or cherry-pick operation to halt and drop the
|
||||
in-memory cache of renames and thus doesn't need to be considered further.
|
||||
In the special case that E->A does rename the file but also renames it to
|
||||
newfile, then there is no conflict from the renaming and the merge can
|
||||
succeed. In this special case, the rename is not valid to cache because
|
||||
the second merge will find A:newfile in the MERGE_BASE (see also the new
|
||||
testcases in t6429 with "rename same file identically" in their
|
||||
description). So a rename/rename(1to1) needs to be specially handled by
|
||||
pruning renames from the cache and decrementing the dir_rename_counts in
|
||||
the current and leading directories associated with those renames. Or,
|
||||
since these are really rare, one could just take the easy way out and
|
||||
disable the remembering renames optimization when a rename/rename(1to1)
|
||||
happens.
|
||||
|
||||
The previous paragraph handled the cases for E->A renaming oldfile, let's
|
||||
continue assuming that oldfile is not renamed in A.
|
||||
|
||||
As per the diagram for creating B', MERGE_SIDE1 involves the changes from A
|
||||
to A'. So, we are curious whether A:oldfile and A':newfile will be viewed
|
||||
as renames. Note that:
|
||||
|
||||
* There will be no A':oldfile (because there could not have been a
|
||||
G:oldfile as we do not do break detection in the merge machinery and
|
||||
G:newfile was detected as a rename, and by the construction of the
|
||||
rename above that merged cleanly, the merge machinery will ensure there
|
||||
is no 'oldfile' in the result).
|
||||
|
||||
* There will be no A:newfile (if there had been, we would have had a
|
||||
rename/add conflict).
|
||||
|
||||
* Clearly A:oldfile and A':newfile are "related" (A':newfile came from a
|
||||
clean three-way content merge involving A:oldfile).
|
||||
|
||||
We can also expound on the third point above, by noting that three-way
|
||||
content merges can also be viewed as applying the differences between the
|
||||
base and one side to the other side. Thus we can view A':newfile as
|
||||
having been created by taking the changes between E:oldfile and G:newfile
|
||||
(which were detected as being related, i.e. <50% changed) to A:oldfile.
|
||||
|
||||
Thus A:oldfile and A':newfile are just as related as E:oldfile and
|
||||
G:newfile are -- they have exactly identical differences. Since the latter
|
||||
were detected as renames, A:oldfile and A':newfile should also be
|
||||
detectable as renames almost always.
|
||||
|
||||
|
||||
=== 4. A detailed description of the counter-examples to #3. ===
|
||||
|
||||
We already noted in section 3 that rename/rename(1to1) (i.e. both sides
|
||||
renaming a file the same way) was one counter-example. The more
|
||||
interesting bit, though, is why did we need to use the "almost" qualifier
|
||||
when stating that A:oldfile and A':newfile are "almost" always detectable
|
||||
as renames?
|
||||
|
||||
Let's repeat an earlier point that section 3 made:
|
||||
|
||||
A':newfile was created by applying the changes between E:oldfile and
|
||||
G:newfile to A:oldfile. The changes between E:oldfile and G:newfile were
|
||||
<50% of the size of E:oldfile.
|
||||
|
||||
If those changes that were <50% of the size of E:oldfile are also <50% of
|
||||
the size of A:oldfile, then A:oldfile and A':newfile will be detectable as
|
||||
renames. However, if there is a dramatic size reduction between E:oldfile
|
||||
and A:oldfile (but the changes between E:oldfile, G:newfile, and A:oldfile
|
||||
still somehow merge cleanly), then traditional rename detection would not
|
||||
detect A:oldfile and A':newfile as renames.
|
||||
|
||||
Here's an example where that can happen:
|
||||
* E:oldfile had 20 lines
|
||||
* G:newfile added 10 new lines at the beginning of the file
|
||||
* A:oldfile kept the first 3 lines of the file, and deleted all the rest
|
||||
then
|
||||
=> A':newfile would have 13 lines, 3 of which matches those in A:oldfile.
|
||||
E:oldfile -> G:newfile would be detected as a rename, but A:oldfile and
|
||||
A':newfile would not be.
|
||||
|
||||
|
||||
=== 5. Why the special cases in #4 are still fully reasonable to use to ===
|
||||
=== pair up files for three-way content merging in the merge machinery, ===
|
||||
=== and why they do not affect the correctness of the merge. ===
|
||||
|
||||
In the rename/rename(1to1) case, A:newfile and A':newfile are not renames
|
||||
since they use the *same* filename. However, files with the same filename
|
||||
are obviously fine to pair up for three-way content merging (the merge
|
||||
machinery has never employed break detection). The interesting
|
||||
counter-example case is thus not the rename/rename(1to1) case, but the case
|
||||
where A did not rename oldfile. That was the case that we spent most of
|
||||
the time discussing in sections 3 and 4. The remainder of this section
|
||||
will be devoted to that case as well.
|
||||
|
||||
So, even if A:oldfile and A':newfile aren't detectable as renames, why is
|
||||
it still reasonable to pair them up for three-way content merging in the
|
||||
merge machinery? There are multiple reasons:
|
||||
|
||||
* As noted in sections 3 and 4, the diff between A:oldfile and A':newfile
|
||||
is *exactly* the same as the diff between E:oldfile and G:newfile. The
|
||||
latter pair were detected as renames, so it seems unlikely to surprise
|
||||
users for us to treat A:oldfile and A':newfile as renames.
|
||||
|
||||
* In fact, "oldfile" and "newfile" were at one point detected as renames
|
||||
due to how they were constructed in the E..G chain. And we used that
|
||||
information once already in this rebase/cherry-pick. I think users
|
||||
would be unlikely to be surprised at us continuing to treat the files
|
||||
as renames and would quickly understand why we had done so.
|
||||
|
||||
* Marking or declaring files as renames is *not* the end goal for merges.
|
||||
Merges use renames to determine which files make sense to be paired up
|
||||
for three-way content merges.
|
||||
|
||||
* A:oldfile and A':newfile were _already_ paired up in a three-way
|
||||
content merge; that is how A':newfile was created. In fact, that
|
||||
three-way content merge was clean. So using them again in a later
|
||||
three-way content merge seems very reasonable.
|
||||
|
||||
However, the above is focusing on the common scenarios. Let's try to look
|
||||
at all possible unusual scenarios and compare without the optimization to
|
||||
with the optimization. Consider the following theoretical cases; we will
|
||||
then dive into each to determine which of them are possible,
|
||||
and if so, what they mean:
|
||||
|
||||
1. Without the optimization, the second merge results in a conflict.
|
||||
With the optimization, the second merge also results in a conflict.
|
||||
Questions: Are the conflicts confusingly different? Better in one case?
|
||||
|
||||
2. Without the optimization, the second merge results in NO conflict.
|
||||
With the optimization, the second merge also results in NO conflict.
|
||||
Questions: Are the merges the same?
|
||||
|
||||
3. Without the optimization, the second merge results in a conflict.
|
||||
With the optimization, the second merge results in NO conflict.
|
||||
Questions: Possible? Bug, bugfix, or something else?
|
||||
|
||||
4. Without the optimization, the second merge results in NO conflict.
|
||||
With the optimization, the second merge results in a conflict.
|
||||
Questions: Possible? Bug, bugfix, or something else?
|
||||
|
||||
I'll consider all four cases, but out of order.
|
||||
|
||||
The fourth case is impossible. For the code without the remembering
|
||||
renames optimization to not get a conflict, B:oldfile would need to exactly
|
||||
match A:oldfile -- if it doesn't, there would be a modify/delete conflict.
|
||||
If A:oldfile matches B:oldfile exactly, then a three-way content merge
|
||||
between A:oldfile, A':newfile, and B:oldfile would have no conflict and
|
||||
just give us the version of newfile from A' as the result.
|
||||
|
||||
From the same logic as the above paragraph, the second case would indeed
|
||||
result in identical merges. When A:oldfile exactly matches B:oldfile, an
|
||||
undetected rename would say, "Oh, I see one side didn't modify 'oldfile'
|
||||
and the other side deleted it. I'll delete it. And I see you have this
|
||||
brand new file named 'newfile' in A', so I'll keep it." That gives the
|
||||
same results as three-way content merging A:oldfile, A':newfile, and
|
||||
B:oldfile -- a removal of oldfile with the version of newfile from A'
|
||||
showing up in the result.
|
||||
|
||||
The third case is interesting. It means that A:oldfile and A':newfile were
|
||||
not just similar enough, but that the changes between them did not conflict
|
||||
with the changes between A:oldfile and B:oldfile. This would validate our
|
||||
hunch that the files were similar enough to be used in a three-way content
|
||||
merge, and thus seems entirely correct for us to have used them that way.
|
||||
(Sidenote: One particular example here may be enlightening. Let's say that
|
||||
B was an immediate revert of A. B clearly would have been a clean revert
|
||||
of A, since A was B's immediate parent. One would assume that if you can
|
||||
pick a commit, you should also be able to cherry-pick its immediate revert.
|
||||
However, this is one of those funny corner cases; without this
|
||||
optimization, we just successfully picked a commit cleanly, but we are
|
||||
unable to cherry-pick its immediate revert due to the size differences
|
||||
between E:oldfile and A:oldfile.)
|
||||
|
||||
That leaves only the first case to consider -- when we get conflicts both
|
||||
with or without the optimization. Without the optimization, we'll have a
|
||||
modify/delete conflict, where both A':newfile and B:oldfile are left in the
|
||||
tree for the user to deal with and no hints about the potential similarity
|
||||
between the two. With the optimization, we'll have a three-way content
|
||||
merged A:oldfile, A':newfile, and B:oldfile with conflict markers
|
||||
suggesting we thought the files were related but giving the user the chance
|
||||
to resolve. As noted above, I don't think users will find us treating
|
||||
'oldfile' and 'newfile' as related as a surprise since they were between E
|
||||
and G. In any event, though, this case shouldn't be concerning since we
|
||||
hit a conflict in both cases, told the user what we know, and asked them to
|
||||
resolve it.
|
||||
|
||||
So, in summary, case 4 is impossible, case 2 yields the same behavior, and
|
||||
cases 1 and 3 seem to provide as good or better behavior with the
|
||||
optimization than without.
|
||||
|
||||
|
||||
=== 6. Interaction with skipping of "irrelevant" renames ===
|
||||
|
||||
Previous optimizations involved skipping rename detection for paths
|
||||
considered to be "irrelevant". See for example the following commits:
|
||||
|
||||
* 32a56dfb99 ("merge-ort: precompute subset of sources for which we
|
||||
need rename detection", 2021-03-11)
|
||||
* 2fd9eda462 ("merge-ort: precompute whether directory rename
|
||||
detection is needed", 2021-03-11)
|
||||
* 9bd342137e ("diffcore-rename: determine which relevant_sources are
|
||||
no longer relevant", 2021-03-13)
|
||||
|
||||
Relevance is always determined by what the _other_ side of history has
|
||||
done, in terms of modifing a file that our side renamed, or adding a
|
||||
file to a directory which our side renamed. This means that a path
|
||||
that is "irrelevant" when picking the first commit of a series in a
|
||||
rebase or cherry-pick, may suddenly become "relevant" when picking the
|
||||
next commit.
|
||||
|
||||
The upshot of this is that we can only cache rename detection results
|
||||
for relevant paths, and need to re-check relevance in subsequent
|
||||
commits. If those subsequent commits have additional paths that are
|
||||
relevant for rename detection, then we will need to redo rename
|
||||
detection -- though we can limit it to the paths for which we have not
|
||||
already detected renames.
|
||||
|
||||
|
||||
=== 7. Additional items that need to be cached ===
|
||||
|
||||
It turns out we have to cache more than just renames; we also cache:
|
||||
|
||||
A) non-renames (i.e. unpaired deletes)
|
||||
B) counts of renames within directories
|
||||
C) sources that were marked as RELEVANT_LOCATION, but which were
|
||||
downgraded to RELEVANT_NO_MORE
|
||||
D) the toplevel trees involved in the merge
|
||||
|
||||
These are all stored in struct rename_info, and respectively appear in
|
||||
* cached_pairs (along side actual renames, just with a value of NULL)
|
||||
* dir_rename_counts
|
||||
* cached_irrelevant
|
||||
* merge_trees
|
||||
|
||||
The reason for (A) comes from the irrelevant renames skipping
|
||||
optimization discussed in section 6. The fact that irrelevant renames
|
||||
are skipped means we only get a subset of the potential renames
|
||||
detected and subsequent commits may need to run rename detection on
|
||||
the upstream side on a subset of the remaining renames (to get the
|
||||
renames that are relevant for that later commit). Since unpaired
|
||||
deletes are involved in rename detection too, we don't want to
|
||||
repeatedly check that those paths remain unpaired on the upstream side
|
||||
with every commit we are transplanting.
|
||||
|
||||
The reason for (B) is that diffcore_rename_extended() is what
|
||||
generates the counts of renames by directory which is needed in
|
||||
directory rename detection, and if we don't run
|
||||
diffcore_rename_extended() again then we need to have the output from
|
||||
it, including dir_rename_counts, from the previous run.
|
||||
|
||||
The reason for (C) is that merge-ort's tree traversal will again think
|
||||
those paths are relevant (marking them as RELEVANT_LOCATION), but the
|
||||
fact that they were downgraded to RELEVANT_NO_MORE means that
|
||||
dir_rename_counts already has the information we need for directory
|
||||
rename detection. (A path which becomes RELEVANT_CONTENT in a
|
||||
subsequent commit will be removed from cached_irrelevant.)
|
||||
|
||||
The reason for (D) is that is how we determine whether the remember
|
||||
renames optimization can be used. In particular, remembering that our
|
||||
sequence of merges looks like:
|
||||
|
||||
Merge 1:
|
||||
MERGE_BASE: E
|
||||
MERGE_SIDE1: G
|
||||
MERGE_SIDE2: A
|
||||
=> Creates A'
|
||||
|
||||
Merge 2:
|
||||
MERGE_BASE: A
|
||||
MERGE_SIDE1: A'
|
||||
MERGE_SIDE2: B
|
||||
=> Creates B'
|
||||
|
||||
It is the fact that the trees A and A' appear both in Merge 1 and in
|
||||
Merge 2, with A as a parent of A' that allows this optimization. So
|
||||
we store the trees to compare with what we are asked to merge next
|
||||
time.
|
||||
|
||||
|
||||
=== 8. How directory rename detection interacts with the above and ===
|
||||
=== why this optimization is still safe even if ===
|
||||
=== merge.directoryRenames is set to "true". ===
|
||||
|
||||
As noted in the assumptions section:
|
||||
|
||||
"""
|
||||
...if directory renames do occur, then the default of
|
||||
merge.directoryRenames being set to "conflict" means that the operation
|
||||
will stop for users to resolve the conflicts and the cache will be
|
||||
thrown away, and thus that there won't be an optimization to apply.
|
||||
So, the only reason we need to address directory renames specifically,
|
||||
is that some users will have set merge.directoryRenames to "true" to
|
||||
allow the merges to continue to proceed automatically.
|
||||
"""
|
||||
|
||||
Let's remember that we need to look at how any given pick affects the next
|
||||
one. So let's again use the first two picks from the diagram in section
|
||||
one:
|
||||
|
||||
First pick does this three-way merge:
|
||||
MERGE_BASE: E
|
||||
MERGE_SIDE1: G
|
||||
MERGE_SIDE2: A
|
||||
=> creates A'
|
||||
|
||||
Second pick does this three-way merge:
|
||||
MERGE_BASE: A
|
||||
MERGE_SIDE1: A'
|
||||
MERGE_SIDE2: B
|
||||
=> creates B'
|
||||
|
||||
Now, directory rename detection exists so that if one side of history
|
||||
renames a directory, and the other side adds a new file to the old
|
||||
directory, then the merge (with merge.directoryRenames=true) can move the
|
||||
file into the new directory. There are two qualitatively different ways to
|
||||
add a new file to an old directory: create a new file, or rename a file
|
||||
into that directory. Also, directory renames can be done on either side of
|
||||
history, so there are four cases to consider:
|
||||
|
||||
* MERGE_SIDE1 renames old dir, MERGE_SIDE2 adds new file to old dir
|
||||
* MERGE_SIDE1 renames old dir, MERGE_SIDE2 renames file into old dir
|
||||
* MERGE_SIDE1 adds new file to old dir, MERGE_SIDE2 renames old dir
|
||||
* MERGE_SIDE1 renames file into old dir, MERGE_SIDE2 renames old dir
|
||||
|
||||
One last note before we consider these four cases: There are some
|
||||
important properties about how we implement this optimization with
|
||||
respect to directory rename detection that we need to bear in mind
|
||||
while considering all of these cases:
|
||||
|
||||
* rename caching occurs *after* applying directory renames
|
||||
|
||||
* a rename created by directory rename detection is recorded for the side
|
||||
of history that did the directory rename.
|
||||
|
||||
* dir_rename_counts, the nested map of
|
||||
{oldname => {newname => count}},
|
||||
is cached between runs as well. This basically means that directory
|
||||
rename detection is also cached, though only on the side of history
|
||||
that we cache renames for (MERGE_SIDE1 as far as this document is
|
||||
concerned; see the assumptions section). Two interesting sub-notes
|
||||
about these counts:
|
||||
|
||||
* If we need to perform rename-detection again on the given side (e.g.
|
||||
some paths are relevant for rename detection that weren't before),
|
||||
then we clear dir_rename_counts and recompute it, making use of
|
||||
cached_pairs. The reason it is important to do this is optimizations
|
||||
around RELEVANT_LOCATION exist to prevent us from computing
|
||||
unnecessary renames for directory rename detection and from computing
|
||||
dir_rename_counts for irrelevant directories; but those same renames
|
||||
or directories may become necessary for subsequent merges. The
|
||||
easiest way to "fix up" dir_rename_counts in such cases is to just
|
||||
recompute it.
|
||||
|
||||
* If we prune rename/rename(1to1) entries from the cache, then we also
|
||||
need to update dir_rename_counts to decrement the counts for the
|
||||
involved directory and any relevant parent directories (to undo what
|
||||
update_dir_rename_counts() in diffcore-rename.c incremented when the
|
||||
rename was initially found). If we instead just disable the
|
||||
remembering renames optimization when the exceedingly rare
|
||||
rename/rename(1to1) cases occur, then dir_rename_counts will get
|
||||
re-computed the next time rename detection occurs, as noted above.
|
||||
|
||||
* the side with multiple commits to pick, is the side of history that we
|
||||
do NOT cache renames for. Thus, there are no additional commits to
|
||||
change the number of renames in a directory, except for those done by
|
||||
directory rename detection (which always pad the majority).
|
||||
|
||||
* the "renames" we cache are modified slightly by any directory rename,
|
||||
as noted below.
|
||||
|
||||
Now, with those notes out of the way, let's go through the four cases
|
||||
in order:
|
||||
|
||||
Case 1: MERGE_SIDE1 renames old dir, MERGE_SIDE2 adds new file to old dir
|
||||
|
||||
This case looks like this:
|
||||
|
||||
MERGE_BASE: E, Has olddir/
|
||||
MERGE_SIDE1: G, Renames olddir/ -> newdir/
|
||||
MERGE_SIDE2: A, Adds olddir/newfile
|
||||
=> creates A', With newdir/newfile
|
||||
|
||||
MERGE_BASE: A, Has olddir/newfile
|
||||
MERGE_SIDE1: A', Has newdir/newfile
|
||||
MERGE_SIDE2: B, Modifies olddir/newfile
|
||||
=> expected B', with threeway-merged newdir/newfile from above
|
||||
|
||||
In this case, with the optimization, note that after the first commit:
|
||||
* MERGE_SIDE1 remembers olddir/ -> newdir/
|
||||
* MERGE_SIDE1 has cached olddir/newfile -> newdir/newfile
|
||||
Given the cached rename noted above, the second merge can proceed as
|
||||
expected without needing to perform rename detection from A -> A'.
|
||||
|
||||
Case 2: MERGE_SIDE1 renames old dir, MERGE_SIDE2 renames file into old dir
|
||||
|
||||
This case looks like this:
|
||||
MERGE_BASE: E oldfile, olddir/
|
||||
MERGE_SIDE1: G oldfile, olddir/ -> newdir/
|
||||
MERGE_SIDE2: A oldfile -> olddir/newfile
|
||||
=> creates A', With newdir/newfile representing original oldfile
|
||||
|
||||
MERGE_BASE: A olddir/newfile
|
||||
MERGE_SIDE1: A' newdir/newfile
|
||||
MERGE_SIDE2: B modify olddir/newfile
|
||||
=> expected B', with threeway-merged newdir/newfile from above
|
||||
|
||||
In this case, with the optimization, note that after the first commit:
|
||||
* MERGE_SIDE1 remembers olddir/ -> newdir/
|
||||
* MERGE_SIDE1 has cached olddir/newfile -> newdir/newfile
|
||||
(NOT oldfile -> newdir/newfile; compare to case with
|
||||
(p->status == 'R' && new_path) in possibly_cache_new_pair())
|
||||
|
||||
Given the cached rename noted above, the second merge can proceed as
|
||||
expected without needing to perform rename detection from A -> A'.
|
||||
|
||||
Case 3: MERGE_SIDE1 adds new file to old dir, MERGE_SIDE2 renames old dir
|
||||
|
||||
This case looks like this:
|
||||
|
||||
MERGE_BASE: E, Has olddir/
|
||||
MERGE_SIDE1: G, Adds olddir/newfile
|
||||
MERGE_SIDE2: A, Renames olddir/ -> newdir/
|
||||
=> creates A', With newdir/newfile
|
||||
|
||||
MERGE_BASE: A, Has newdir/, but no notion of newdir/newfile
|
||||
MERGE_SIDE1: A', Has newdir/newfile
|
||||
MERGE_SIDE2: B, Has newdir/, but no notion of newdir/newfile
|
||||
=> expected B', with newdir/newfile from A'
|
||||
|
||||
In this case, with the optimization, note that after the first commit there
|
||||
were no renames on MERGE_SIDE1, and any renames on MERGE_SIDE2 are tossed.
|
||||
But the second merge didn't need any renames so this is fine.
|
||||
|
||||
Case 4: MERGE_SIDE1 renames file into old dir, MERGE_SIDE2 renames old dir
|
||||
|
||||
This case looks like this:
|
||||
|
||||
MERGE_BASE: E, Has olddir/
|
||||
MERGE_SIDE1: G, Renames oldfile -> olddir/newfile
|
||||
MERGE_SIDE2: A, Renames olddir/ -> newdir/
|
||||
=> creates A', With newdir/newfile representing original oldfile
|
||||
|
||||
MERGE_BASE: A, Has oldfile
|
||||
MERGE_SIDE1: A', Has newdir/newfile
|
||||
MERGE_SIDE2: B, Modifies oldfile
|
||||
=> expected B', with threeway-merged newdir/newfile from above
|
||||
|
||||
In this case, with the optimization, note that after the first commit:
|
||||
* MERGE_SIDE1 remembers oldfile -> newdir/newfile
|
||||
(NOT oldfile -> olddir/newfile; compare to case of second
|
||||
block under p->status == 'R' in possibly_cache_new_pair())
|
||||
* MERGE_SIDE2 renames are tossed because only MERGE_SIDE1 is remembered
|
||||
|
||||
Given the cached rename noted above, the second merge can proceed as
|
||||
expected without needing to perform rename detection from A -> A'.
|
||||
|
||||
Finally, I'll just note here that interactions with the
|
||||
skip-irrelevant-renames optimization means we sometimes don't detect
|
||||
renames for any files within a directory that was renamed, in which
|
||||
case we will not have been able to detect any rename for the directory
|
||||
itself. In such a case, we do not know whether the directory was
|
||||
renamed; we want to be careful to avoid cacheing some kind of "this
|
||||
directory was not renamed" statement. If we did, then a subsequent
|
||||
commit being rebased could add a file to the old directory, and the
|
||||
user would expect it to end up in the correct directory -- something
|
||||
our erroneous "this directory was not renamed" cache would preclude.
|
@ -2792,7 +2792,7 @@ A fast-forward looks something like this:
|
||||
|
||||
In some cases it is possible that the new head will *not* actually be
|
||||
a descendant of the old head. For example, the developer may have
|
||||
realized she made a serious mistake, and decided to backtrack,
|
||||
realized a serious mistake was made and decided to backtrack,
|
||||
resulting in a situation like:
|
||||
|
||||
................................................
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
GVF=GIT-VERSION-FILE
|
||||
DEF_VER=v2.32.0-rc0
|
||||
DEF_VER=v2.33.0-rc0
|
||||
|
||||
LF='
|
||||
'
|
||||
|
84
Makefile
84
Makefile
@ -398,6 +398,10 @@ all::
|
||||
# with a different indexfile format version. If it isn't set the index
|
||||
# file format used is index-v[23].
|
||||
#
|
||||
# Define GIT_TEST_UTF8_LOCALE to preferred utf-8 locale for testing.
|
||||
# If it isn't set, fallback to $LC_ALL, $LANG or use the first utf-8
|
||||
# locale returned by "locale -a".
|
||||
#
|
||||
# Define HAVE_CLOCK_GETTIME if your platform has clock_gettime.
|
||||
#
|
||||
# Define HAVE_CLOCK_MONOTONIC if your platform has CLOCK_MONOTONIC.
|
||||
@ -722,9 +726,11 @@ TEST_BUILTINS_OBJS += test-mergesort.o
|
||||
TEST_BUILTINS_OBJS += test-mktemp.o
|
||||
TEST_BUILTINS_OBJS += test-oid-array.o
|
||||
TEST_BUILTINS_OBJS += test-oidmap.o
|
||||
TEST_BUILTINS_OBJS += test-oidtree.o
|
||||
TEST_BUILTINS_OBJS += test-online-cpus.o
|
||||
TEST_BUILTINS_OBJS += test-parse-options.o
|
||||
TEST_BUILTINS_OBJS += test-parse-pathspec-file.o
|
||||
TEST_BUILTINS_OBJS += test-partial-clone.o
|
||||
TEST_BUILTINS_OBJS += test-path-utils.o
|
||||
TEST_BUILTINS_OBJS += test-pcre2-config.o
|
||||
TEST_BUILTINS_OBJS += test-pkt-line.o
|
||||
@ -845,6 +851,7 @@ LIB_OBJS += branch.o
|
||||
LIB_OBJS += bulk-checkin.o
|
||||
LIB_OBJS += bundle.o
|
||||
LIB_OBJS += cache-tree.o
|
||||
LIB_OBJS += cbtree.o
|
||||
LIB_OBJS += chdir-notify.o
|
||||
LIB_OBJS += checkout.o
|
||||
LIB_OBJS += chunk-format.o
|
||||
@ -940,6 +947,7 @@ LIB_OBJS += object.o
|
||||
LIB_OBJS += oid-array.o
|
||||
LIB_OBJS += oidmap.o
|
||||
LIB_OBJS += oidset.o
|
||||
LIB_OBJS += oidtree.o
|
||||
LIB_OBJS += pack-bitmap-write.o
|
||||
LIB_OBJS += pack-bitmap.o
|
||||
LIB_OBJS += pack-check.o
|
||||
@ -1687,13 +1695,31 @@ ifdef NO_UNIX_SOCKETS
|
||||
else
|
||||
LIB_OBJS += unix-socket.o
|
||||
LIB_OBJS += unix-stream-server.o
|
||||
endif
|
||||
|
||||
# Simple IPC requires threads and platform-specific IPC support.
|
||||
# Only platforms that have both should include these source files
|
||||
# in the build.
|
||||
#
|
||||
# On Windows-based systems, Simple IPC requires threads and Windows
|
||||
# Named Pipes. These are always available, so Simple IPC support
|
||||
# is optional.
|
||||
#
|
||||
# On Unix-based systems, Simple IPC requires pthreads and Unix
|
||||
# domain sockets. So support is only enabled when both are present.
|
||||
#
|
||||
ifdef USE_WIN32_IPC
|
||||
BASIC_CFLAGS += -DSUPPORTS_SIMPLE_IPC
|
||||
LIB_OBJS += compat/simple-ipc/ipc-shared.o
|
||||
LIB_OBJS += compat/simple-ipc/ipc-win32.o
|
||||
else
|
||||
ifndef NO_PTHREADS
|
||||
ifndef NO_UNIX_SOCKETS
|
||||
BASIC_CFLAGS += -DSUPPORTS_SIMPLE_IPC
|
||||
LIB_OBJS += compat/simple-ipc/ipc-shared.o
|
||||
LIB_OBJS += compat/simple-ipc/ipc-unix-socket.o
|
||||
endif
|
||||
|
||||
ifdef USE_WIN32_IPC
|
||||
LIB_OBJS += compat/simple-ipc/ipc-shared.o
|
||||
LIB_OBJS += compat/simple-ipc/ipc-win32.o
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef NO_ICONV
|
||||
@ -1987,6 +2013,7 @@ ETC_GITCONFIG_SQ = $(subst ','\'',$(ETC_GITCONFIG))
|
||||
ETC_GITATTRIBUTES_SQ = $(subst ','\'',$(ETC_GITATTRIBUTES))
|
||||
|
||||
DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
|
||||
NO_GETTEXT_SQ = $(subst ','\'',$(NO_GETTEXT))
|
||||
bindir_SQ = $(subst ','\'',$(bindir))
|
||||
bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
|
||||
mandir_SQ = $(subst ','\'',$(mandir))
|
||||
@ -2141,6 +2168,16 @@ shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
|
||||
strip: $(PROGRAMS) git$X
|
||||
$(STRIP) $(STRIP_OPTS) $^
|
||||
|
||||
### Flags affecting all rules
|
||||
|
||||
# A GNU make extension since gmake 3.72 (released in late 1994) to
|
||||
# remove the target of rules if commands in those rules fail. The
|
||||
# default is to only do that if make itself receives a signal. Affects
|
||||
# all targets, see:
|
||||
#
|
||||
# info make --index-search=.DELETE_ON_ERROR
|
||||
.DELETE_ON_ERROR:
|
||||
|
||||
### Target-specific flags and dependencies
|
||||
|
||||
# The generic compilation pattern rule and automatically
|
||||
@ -2224,7 +2261,6 @@ SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\
|
||||
$(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV):\
|
||||
$(perllibdir_SQ)
|
||||
define cmd_munge_script
|
||||
$(RM) $@ $@+ && \
|
||||
sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
|
||||
-e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \
|
||||
-e 's|@@DIFF@@|$(DIFF_SQ)|' \
|
||||
@ -2271,10 +2307,13 @@ perl_localedir_SQ = $(localedir_SQ)
|
||||
|
||||
ifndef NO_PERL
|
||||
PERL_HEADER_TEMPLATE = perl/header_templates/fixed_prefix.template.pl
|
||||
PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ):$(perllibdir_SQ)
|
||||
|
||||
PERL_DEFINES := $(PERL_PATH_SQ) $(PERLLIB_EXTRA_SQ) $(perllibdir_SQ)
|
||||
PERL_DEFINES =
|
||||
PERL_DEFINES += $(PERL_PATH_SQ)
|
||||
PERL_DEFINES += $(PERLLIB_EXTRA_SQ)
|
||||
PERL_DEFINES += $(perllibdir_SQ)
|
||||
PERL_DEFINES += $(RUNTIME_PREFIX)
|
||||
PERL_DEFINES += $(NO_PERL_CPAN_FALLBACKS)
|
||||
PERL_DEFINES += $(NO_GETTEXT)
|
||||
|
||||
# Support Perl runtime prefix. In this mode, a different header is installed
|
||||
# into Perl scripts.
|
||||
@ -2291,7 +2330,7 @@ endif
|
||||
PERL_DEFINES += $(gitexecdir) $(perllibdir) $(localedir)
|
||||
|
||||
$(SCRIPT_PERL_GEN): % : %.perl GIT-PERL-DEFINES GIT-PERL-HEADER GIT-VERSION-FILE
|
||||
$(QUIET_GEN)$(RM) $@ $@+ && \
|
||||
$(QUIET_GEN) \
|
||||
sed -e '1{' \
|
||||
-e ' s|#!.*perl|#!$(PERL_PATH_SQ)|' \
|
||||
-e ' r GIT-PERL-HEADER' \
|
||||
@ -2311,7 +2350,7 @@ GIT-PERL-DEFINES: FORCE
|
||||
fi
|
||||
|
||||
GIT-PERL-HEADER: $(PERL_HEADER_TEMPLATE) GIT-PERL-DEFINES Makefile
|
||||
$(QUIET_GEN)$(RM) $@ && \
|
||||
$(QUIET_GEN) \
|
||||
INSTLIBDIR='$(perllibdir_SQ)' && \
|
||||
INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \
|
||||
INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \
|
||||
@ -2337,7 +2376,7 @@ git-instaweb: git-instaweb.sh GIT-SCRIPT-DEFINES
|
||||
mv $@+ $@
|
||||
else # NO_PERL
|
||||
$(SCRIPT_PERL_GEN) git-instaweb: % : unimplemented.sh
|
||||
$(QUIET_GEN)$(RM) $@ $@+ && \
|
||||
$(QUIET_GEN) \
|
||||
sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
|
||||
-e 's|@@REASON@@|NO_PERL=$(NO_PERL)|g' \
|
||||
unimplemented.sh >$@+ && \
|
||||
@ -2351,14 +2390,14 @@ $(SCRIPT_PYTHON_GEN): GIT-BUILD-OPTIONS
|
||||
ifndef NO_PYTHON
|
||||
$(SCRIPT_PYTHON_GEN): GIT-CFLAGS GIT-PREFIX GIT-PYTHON-VARS
|
||||
$(SCRIPT_PYTHON_GEN): % : %.py
|
||||
$(QUIET_GEN)$(RM) $@ $@+ && \
|
||||
$(QUIET_GEN) \
|
||||
sed -e '1s|#!.*python|#!$(PYTHON_PATH_SQ)|' \
|
||||
$< >$@+ && \
|
||||
chmod +x $@+ && \
|
||||
mv $@+ $@
|
||||
else # NO_PYTHON
|
||||
$(SCRIPT_PYTHON_GEN): % : unimplemented.sh
|
||||
$(QUIET_GEN)$(RM) $@ $@+ && \
|
||||
$(QUIET_GEN) \
|
||||
sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
|
||||
-e 's|@@REASON@@|NO_PYTHON=$(NO_PYTHON)|g' \
|
||||
unimplemented.sh >$@+ && \
|
||||
@ -2366,8 +2405,7 @@ $(SCRIPT_PYTHON_GEN): % : unimplemented.sh
|
||||
mv $@+ $@
|
||||
endif # NO_PYTHON
|
||||
|
||||
CONFIGURE_RECIPE = $(RM) configure configure.ac+ && \
|
||||
sed -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \
|
||||
CONFIGURE_RECIPE = sed -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \
|
||||
configure.ac >configure.ac+ && \
|
||||
autoconf -o configure configure.ac+ && \
|
||||
$(RM) configure.ac+
|
||||
@ -2492,7 +2530,6 @@ endif
|
||||
ifeq ($(GENERATE_COMPILATION_DATABASE),yes)
|
||||
all:: compile_commands.json
|
||||
compile_commands.json:
|
||||
@$(RM) $@
|
||||
$(QUIET_GEN)sed -e '1s/^/[/' -e '$$s/,$$/]/' $(compdb_dir)/*.o.json > $@+
|
||||
@if test -s $@+; then mv $@+ $@; else $(RM) $@+; fi
|
||||
endif
|
||||
@ -2565,10 +2602,10 @@ $(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o GIT-LDFLAGS $(GITLIBS
|
||||
$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
|
||||
|
||||
$(LIB_FILE): $(LIB_OBJS)
|
||||
$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
|
||||
$(QUIET_AR)$(AR) $(ARFLAGS) $@ $^
|
||||
|
||||
$(XDIFF_LIB): $(XDIFF_OBJS)
|
||||
$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
|
||||
$(QUIET_AR)$(AR) $(ARFLAGS) $@ $^
|
||||
|
||||
export DEFAULT_EDITOR DEFAULT_PAGER
|
||||
|
||||
@ -2653,10 +2690,13 @@ po/git.pot: $(GENERATED_H) FORCE
|
||||
.PHONY: pot
|
||||
pot: po/git.pot
|
||||
|
||||
ifdef NO_GETTEXT
|
||||
POFILES :=
|
||||
MOFILES :=
|
||||
else
|
||||
POFILES := $(wildcard po/*.po)
|
||||
MOFILES := $(patsubst po/%.po,po/build/locale/%/LC_MESSAGES/git.mo,$(POFILES))
|
||||
|
||||
ifndef NO_GETTEXT
|
||||
all:: $(MOFILES)
|
||||
endif
|
||||
|
||||
@ -2676,9 +2716,10 @@ endif
|
||||
NO_PERL_CPAN_FALLBACKS_SQ = $(subst ','\'',$(NO_PERL_CPAN_FALLBACKS))
|
||||
endif
|
||||
|
||||
perl/build/lib/%.pm: perl/%.pm
|
||||
perl/build/lib/%.pm: perl/%.pm GIT-PERL-DEFINES
|
||||
$(QUIET_GEN)mkdir -p $(dir $@) && \
|
||||
sed -e 's|@@LOCALEDIR@@|$(perl_localedir_SQ)|g' \
|
||||
-e 's|@@NO_GETTEXT@@|$(NO_GETTEXT_SQ)|g' \
|
||||
-e 's|@@NO_PERL_CPAN_FALLBACKS@@|$(NO_PERL_CPAN_FALLBACKS_SQ)|g' \
|
||||
< $< > $@
|
||||
|
||||
@ -2778,6 +2819,9 @@ ifdef GIT_TEST_CMP
|
||||
endif
|
||||
ifdef GIT_TEST_CMP_USE_COPIED_CONTEXT
|
||||
@echo GIT_TEST_CMP_USE_COPIED_CONTEXT=YesPlease >>$@+
|
||||
endif
|
||||
ifdef GIT_TEST_UTF8_LOCALE
|
||||
@echo GIT_TEST_UTF8_LOCALE=\''$(subst ','\'',$(subst ','\'',$(GIT_TEST_UTF8_LOCALE)))'\' >>$@+
|
||||
endif
|
||||
@echo NO_GETTEXT=\''$(subst ','\'',$(subst ','\'',$(NO_GETTEXT)))'\' >>$@+
|
||||
ifdef GIT_PERF_REPEAT_COUNT
|
||||
|
@ -280,6 +280,7 @@ static void add_p_state_clear(struct add_p_state *s)
|
||||
clear_add_i_state(&s->s);
|
||||
}
|
||||
|
||||
__attribute__((format (printf, 2, 3)))
|
||||
static void err(struct add_p_state *s, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
1
advice.h
1
advice.h
@ -90,6 +90,7 @@ int advice_enabled(enum advice_type type);
|
||||
/**
|
||||
* Checks the visibility of the advice before printing.
|
||||
*/
|
||||
__attribute__((format (printf, 2, 3)))
|
||||
void advise_if_enabled(enum advice_type type, const char *advice, ...);
|
||||
|
||||
int error_resolve_conflict(const char *me);
|
||||
|
6
apply.c
6
apply.c
@ -101,9 +101,9 @@ int init_apply_state(struct apply_state *state,
|
||||
state->ws_error_action = warn_on_ws_error;
|
||||
state->ws_ignore_action = ignore_ws_none;
|
||||
state->linenr = 1;
|
||||
string_list_init(&state->fn_table, 0);
|
||||
string_list_init(&state->limit_by_name, 0);
|
||||
string_list_init(&state->symlink_changes, 0);
|
||||
string_list_init_nodup(&state->fn_table);
|
||||
string_list_init_nodup(&state->limit_by_name);
|
||||
string_list_init_nodup(&state->symlink_changes);
|
||||
strbuf_init(&state->root, 0);
|
||||
|
||||
git_apply_config();
|
||||
|
@ -645,7 +645,7 @@ int write_archive(int argc, const char **argv, const char *prefix,
|
||||
args.pretty_ctx = &ctx;
|
||||
args.repo = repo;
|
||||
args.prefix = prefix;
|
||||
string_list_init(&args.extra_files, 1);
|
||||
string_list_init_dup(&args.extra_files);
|
||||
argc = parse_archive_args(argc, argv, &ar, &args, name_hint, remote);
|
||||
if (!startup_info->have_repository) {
|
||||
/*
|
||||
|
2
attr.c
2
attr.c
@ -685,7 +685,7 @@ static struct attr_stack *read_attr_from_array(const char **list)
|
||||
* Callers into the attribute system assume there is a single, system-wide
|
||||
* global state where attributes are read from and when the state is flipped by
|
||||
* calling git_attr_set_direction(), the stack frames that have been
|
||||
* constructed need to be discarded so so that subsequent calls into the
|
||||
* constructed need to be discarded so that subsequent calls into the
|
||||
* attribute system will lazily read from the right place. Since changing
|
||||
* direction causes a global paradigm shift, it should not ever be called while
|
||||
* another thread could potentially be calling into the attribute system.
|
||||
|
@ -470,7 +470,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
||||
{
|
||||
int exit_status = 0;
|
||||
struct pathspec pathspec;
|
||||
struct dir_struct dir;
|
||||
struct dir_struct dir = DIR_INIT;
|
||||
int flags;
|
||||
int add_new_files;
|
||||
int require_pathspec;
|
||||
@ -577,7 +577,6 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
||||
die_in_unpopulated_submodule(&the_index, prefix);
|
||||
die_path_inside_submodule(&the_index, &pathspec);
|
||||
|
||||
dir_init(&dir);
|
||||
if (add_new_files) {
|
||||
int baselen;
|
||||
|
||||
|
@ -210,6 +210,7 @@ static void write_state_bool(const struct am_state *state,
|
||||
* If state->quiet is false, calls fprintf(fp, fmt, ...), and appends a newline
|
||||
* at the end.
|
||||
*/
|
||||
__attribute__((format (printf, 3, 4)))
|
||||
static void say(const struct am_state *state, FILE *fp, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
@ -117,6 +117,7 @@ static int write_in_file(const char *path, const char *mode, const char *format,
|
||||
return fclose(fp);
|
||||
}
|
||||
|
||||
__attribute__((format (printf, 2, 3)))
|
||||
static int write_to_file(const char *path, const char *format, ...)
|
||||
{
|
||||
int res;
|
||||
@ -129,6 +130,7 @@ static int write_to_file(const char *path, const char *format, ...)
|
||||
return res;
|
||||
}
|
||||
|
||||
__attribute__((format (printf, 2, 3)))
|
||||
static int append_to_file(const char *path, const char *format, ...)
|
||||
{
|
||||
int res;
|
||||
|
@ -46,7 +46,7 @@ static int parse_options_cmd_bundle(int argc,
|
||||
const char* prefix,
|
||||
const char * const usagestr[],
|
||||
const struct option options[],
|
||||
const char **bundle_file) {
|
||||
char **bundle_file) {
|
||||
int newargc;
|
||||
newargc = parse_options(argc, argv, NULL, options, usagestr,
|
||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||
@ -61,7 +61,7 @@ static int cmd_bundle_create(int argc, const char **argv, const char *prefix) {
|
||||
int progress = isatty(STDERR_FILENO);
|
||||
struct strvec pack_opts;
|
||||
int version = -1;
|
||||
|
||||
int ret;
|
||||
struct option options[] = {
|
||||
OPT_SET_INT('q', "quiet", &progress,
|
||||
N_("do not show progress meter"), 0),
|
||||
@ -76,7 +76,7 @@ static int cmd_bundle_create(int argc, const char **argv, const char *prefix) {
|
||||
N_("specify bundle format version")),
|
||||
OPT_END()
|
||||
};
|
||||
const char* bundle_file;
|
||||
char *bundle_file;
|
||||
|
||||
argc = parse_options_cmd_bundle(argc, argv, prefix,
|
||||
builtin_bundle_create_usage, options, &bundle_file);
|
||||
@ -94,75 +94,95 @@ static int cmd_bundle_create(int argc, const char **argv, const char *prefix) {
|
||||
|
||||
if (!startup_info->have_repository)
|
||||
die(_("Need a repository to create a bundle."));
|
||||
return !!create_bundle(the_repository, bundle_file, argc, argv, &pack_opts, version);
|
||||
ret = !!create_bundle(the_repository, bundle_file, argc, argv, &pack_opts, version);
|
||||
free(bundle_file);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cmd_bundle_verify(int argc, const char **argv, const char *prefix) {
|
||||
struct bundle_header header;
|
||||
struct bundle_header header = BUNDLE_HEADER_INIT;
|
||||
int bundle_fd = -1;
|
||||
int quiet = 0;
|
||||
|
||||
int ret;
|
||||
struct option options[] = {
|
||||
OPT_BOOL('q', "quiet", &quiet,
|
||||
N_("do not show bundle details")),
|
||||
OPT_END()
|
||||
};
|
||||
const char* bundle_file;
|
||||
char *bundle_file;
|
||||
|
||||
argc = parse_options_cmd_bundle(argc, argv, prefix,
|
||||
builtin_bundle_verify_usage, options, &bundle_file);
|
||||
/* bundle internals use argv[1] as further parameters */
|
||||
|
||||
memset(&header, 0, sizeof(header));
|
||||
if ((bundle_fd = read_bundle_header(bundle_file, &header)) < 0)
|
||||
return 1;
|
||||
if ((bundle_fd = read_bundle_header(bundle_file, &header)) < 0) {
|
||||
ret = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
close(bundle_fd);
|
||||
if (verify_bundle(the_repository, &header, !quiet))
|
||||
return 1;
|
||||
if (verify_bundle(the_repository, &header, !quiet)) {
|
||||
ret = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
fprintf(stderr, _("%s is okay\n"), bundle_file);
|
||||
return 0;
|
||||
ret = 0;
|
||||
cleanup:
|
||||
free(bundle_file);
|
||||
bundle_header_release(&header);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cmd_bundle_list_heads(int argc, const char **argv, const char *prefix) {
|
||||
struct bundle_header header;
|
||||
struct bundle_header header = BUNDLE_HEADER_INIT;
|
||||
int bundle_fd = -1;
|
||||
|
||||
int ret;
|
||||
struct option options[] = {
|
||||
OPT_END()
|
||||
};
|
||||
const char* bundle_file;
|
||||
char *bundle_file;
|
||||
|
||||
argc = parse_options_cmd_bundle(argc, argv, prefix,
|
||||
builtin_bundle_list_heads_usage, options, &bundle_file);
|
||||
/* bundle internals use argv[1] as further parameters */
|
||||
|
||||
memset(&header, 0, sizeof(header));
|
||||
if ((bundle_fd = read_bundle_header(bundle_file, &header)) < 0)
|
||||
return 1;
|
||||
if ((bundle_fd = read_bundle_header(bundle_file, &header)) < 0) {
|
||||
ret = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
close(bundle_fd);
|
||||
return !!list_bundle_refs(&header, argc, argv);
|
||||
ret = !!list_bundle_refs(&header, argc, argv);
|
||||
cleanup:
|
||||
free(bundle_file);
|
||||
bundle_header_release(&header);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix) {
|
||||
struct bundle_header header;
|
||||
struct bundle_header header = BUNDLE_HEADER_INIT;
|
||||
int bundle_fd = -1;
|
||||
|
||||
int ret;
|
||||
struct option options[] = {
|
||||
OPT_END()
|
||||
};
|
||||
const char* bundle_file;
|
||||
char *bundle_file;
|
||||
|
||||
argc = parse_options_cmd_bundle(argc, argv, prefix,
|
||||
builtin_bundle_unbundle_usage, options, &bundle_file);
|
||||
/* bundle internals use argv[1] as further parameters */
|
||||
|
||||
memset(&header, 0, sizeof(header));
|
||||
if ((bundle_fd = read_bundle_header(bundle_file, &header)) < 0)
|
||||
return 1;
|
||||
if ((bundle_fd = read_bundle_header(bundle_file, &header)) < 0) {
|
||||
ret = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
if (!startup_info->have_repository)
|
||||
die(_("Need a repository to unbundle."));
|
||||
return !!unbundle(the_repository, &header, bundle_fd, 0) ||
|
||||
ret = !!unbundle(the_repository, &header, bundle_fd, 0) ||
|
||||
list_bundle_refs(&header, argc, argv);
|
||||
bundle_header_release(&header);
|
||||
cleanup:
|
||||
free(bundle_file);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cmd_bundle(int argc, const char **argv, const char *prefix)
|
||||
|
@ -512,12 +512,6 @@ static int batch_objects(struct batch_options *opt)
|
||||
if (opt->cmdmode)
|
||||
data.split_on_whitespace = 1;
|
||||
|
||||
if (opt->all_objects) {
|
||||
struct object_info empty = OBJECT_INFO_INIT;
|
||||
if (!memcmp(&data.info, &empty, sizeof(empty)))
|
||||
data.skip_object_info = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are printing out the object, then always fill in the type,
|
||||
* since we will want to decide whether or not to stream.
|
||||
@ -527,6 +521,10 @@ static int batch_objects(struct batch_options *opt)
|
||||
|
||||
if (opt->all_objects) {
|
||||
struct object_cb_data cb;
|
||||
struct object_info empty = OBJECT_INFO_INIT;
|
||||
|
||||
if (!memcmp(&data.info, &empty, sizeof(empty)))
|
||||
data.skip_object_info = 1;
|
||||
|
||||
if (has_promisor_remote())
|
||||
warning("This repository uses promisor remotes. Some objects may not be loaded.");
|
||||
|
@ -153,7 +153,7 @@ static int check_ignore_stdin_paths(struct dir_struct *dir, const char *prefix)
|
||||
int cmd_check_ignore(int argc, const char **argv, const char *prefix)
|
||||
{
|
||||
int num_ignored;
|
||||
struct dir_struct dir;
|
||||
struct dir_struct dir = DIR_INIT;
|
||||
|
||||
git_config(git_default_config, NULL);
|
||||
|
||||
@ -182,7 +182,6 @@ int cmd_check_ignore(int argc, const char **argv, const char *prefix)
|
||||
if (!no_index && read_cache() < 0)
|
||||
die(_("index file corrupt"));
|
||||
|
||||
dir_init(&dir);
|
||||
setup_standard_excludes(&dir);
|
||||
|
||||
if (stdin_paths) {
|
||||
|
@ -53,7 +53,7 @@ static void packet_to_pc_item(const char *buffer, int len,
|
||||
|
||||
static void report_result(struct parallel_checkout_item *pc_item)
|
||||
{
|
||||
struct pc_item_result res;
|
||||
struct pc_item_result res = { 0 };
|
||||
size_t size;
|
||||
|
||||
res.id = pc_item->id;
|
||||
|
@ -189,10 +189,8 @@ static int remove_dirs(struct strbuf *path, const char *prefix, int force_flag,
|
||||
strbuf_complete(path, '/');
|
||||
|
||||
len = path->len;
|
||||
while ((e = readdir(dir)) != NULL) {
|
||||
while ((e = readdir_skip_dot_and_dotdot(dir)) != NULL) {
|
||||
struct stat st;
|
||||
if (is_dot_or_dotdot(e->d_name))
|
||||
continue;
|
||||
|
||||
strbuf_setlen(path, len);
|
||||
strbuf_addstr(path, e->d_name);
|
||||
@ -643,7 +641,7 @@ static int clean_cmd(void)
|
||||
|
||||
static int filter_by_patterns_cmd(void)
|
||||
{
|
||||
struct dir_struct dir;
|
||||
struct dir_struct dir = DIR_INIT;
|
||||
struct strbuf confirm = STRBUF_INIT;
|
||||
struct strbuf **ignore_list;
|
||||
struct string_list_item *item;
|
||||
@ -667,7 +665,6 @@ static int filter_by_patterns_cmd(void)
|
||||
if (!confirm.len)
|
||||
break;
|
||||
|
||||
dir_init(&dir);
|
||||
pl = add_pattern_list(&dir, EXC_CMDL, "manual exclude");
|
||||
ignore_list = strbuf_split_max(&confirm, ' ', 0);
|
||||
|
||||
@ -892,7 +889,7 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
|
||||
int ignored_only = 0, config_set = 0, errors = 0, gone = 1;
|
||||
int rm_flags = REMOVE_DIR_KEEP_NESTED_GIT;
|
||||
struct strbuf abs_path = STRBUF_INIT;
|
||||
struct dir_struct dir;
|
||||
struct dir_struct dir = DIR_INIT;
|
||||
struct pathspec pathspec;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
struct string_list exclude_list = STRING_LIST_INIT_NODUP;
|
||||
@ -923,7 +920,6 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
|
||||
argc = parse_options(argc, argv, prefix, options, builtin_clean_usage,
|
||||
0);
|
||||
|
||||
dir_init(&dir);
|
||||
if (!interactive && !dry_run && !force) {
|
||||
if (config_set)
|
||||
die(_("clean.requireForce set to true and neither -i, -n, nor -f given; "
|
||||
|
@ -1320,9 +1320,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
|
||||
}
|
||||
|
||||
if (!is_local && !complete_refs_before_fetch) {
|
||||
err = transport_fetch_refs(transport, mapped_refs);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
if (transport_fetch_refs(transport, mapped_refs))
|
||||
die(_("remote transport reported error"));
|
||||
}
|
||||
|
||||
remote_head = find_ref_by_name(refs, "HEAD");
|
||||
@ -1380,9 +1379,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
|
||||
if (is_local)
|
||||
clone_local(path, git_dir);
|
||||
else if (refs && complete_refs_before_fetch) {
|
||||
err = transport_fetch_refs(transport, mapped_refs);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
if (transport_fetch_refs(transport, mapped_refs))
|
||||
die(_("remote transport reported error"));
|
||||
}
|
||||
|
||||
update_remote_refs(refs, mapped_refs, remote_head_points_at,
|
||||
@ -1410,7 +1408,6 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
|
||||
junk_mode = JUNK_LEAVE_REPO;
|
||||
err = checkout(submodule_progress);
|
||||
|
||||
cleanup:
|
||||
free(remote_name);
|
||||
strbuf_release(&reflog_msg);
|
||||
strbuf_release(&branch_top);
|
||||
|
@ -889,7 +889,22 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
|
||||
int ident_shown = 0;
|
||||
int saved_color_setting;
|
||||
struct ident_split ci, ai;
|
||||
|
||||
const char *hint_cleanup_all = allow_empty_message ?
|
||||
_("Please enter the commit message for your changes."
|
||||
" Lines starting\nwith '%c' will be ignored.\n") :
|
||||
_("Please enter the commit message for your changes."
|
||||
" Lines starting\nwith '%c' will be ignored, and an empty"
|
||||
" message aborts the commit.\n");
|
||||
const char *hint_cleanup_space = allow_empty_message ?
|
||||
_("Please enter the commit message for your changes."
|
||||
" Lines starting\n"
|
||||
"with '%c' will be kept; you may remove them"
|
||||
" yourself if you want to.\n") :
|
||||
_("Please enter the commit message for your changes."
|
||||
" Lines starting\n"
|
||||
"with '%c' will be kept; you may remove them"
|
||||
" yourself if you want to.\n"
|
||||
"An empty message aborts the commit.\n");
|
||||
if (whence != FROM_COMMIT) {
|
||||
if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
|
||||
!merge_contains_scissors)
|
||||
@ -911,20 +926,12 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
|
||||
|
||||
fprintf(s->fp, "\n");
|
||||
if (cleanup_mode == COMMIT_MSG_CLEANUP_ALL)
|
||||
status_printf(s, GIT_COLOR_NORMAL,
|
||||
_("Please enter the commit message for your changes."
|
||||
" Lines starting\nwith '%c' will be ignored, and an empty"
|
||||
" message aborts the commit.\n"), comment_line_char);
|
||||
status_printf(s, GIT_COLOR_NORMAL, hint_cleanup_all, comment_line_char);
|
||||
else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) {
|
||||
if (whence == FROM_COMMIT && !merge_contains_scissors)
|
||||
wt_status_add_cut_line(s->fp);
|
||||
} else /* COMMIT_MSG_CLEANUP_SPACE, that is. */
|
||||
status_printf(s, GIT_COLOR_NORMAL,
|
||||
_("Please enter the commit message for your changes."
|
||||
" Lines starting\n"
|
||||
"with '%c' will be kept; you may remove them"
|
||||
" yourself if you want to.\n"
|
||||
"An empty message aborts the commit.\n"), comment_line_char);
|
||||
status_printf(s, GIT_COLOR_NORMAL, hint_cleanup_space, comment_line_char);
|
||||
|
||||
/*
|
||||
* These should never fail because they come from our own
|
||||
@ -1510,6 +1517,9 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
||||
if (argc == 2 && !strcmp(argv[1], "-h"))
|
||||
usage_with_options(builtin_status_usage, builtin_status_options);
|
||||
|
||||
prepare_repo_settings(the_repository);
|
||||
the_repository->settings.command_requires_full_index = 0;
|
||||
|
||||
status_init_config(&s, git_status_config);
|
||||
argc = parse_options(argc, argv, prefix,
|
||||
builtin_status_options,
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "cache.h"
|
||||
#include "config.h"
|
||||
#include "diff.h"
|
||||
#include "diff-merges.h"
|
||||
#include "commit.h"
|
||||
#include "revision.h"
|
||||
#include "builtin.h"
|
||||
@ -27,6 +28,12 @@ int cmd_diff_index(int argc, const char **argv, const char *prefix)
|
||||
rev.abbrev = 0;
|
||||
prefix = precompose_argv_prefix(argc, argv, prefix);
|
||||
|
||||
/*
|
||||
* We need no diff for merges options, and we need to avoid conflict
|
||||
* with our own meaning of "-m".
|
||||
*/
|
||||
diff_merges_suppress_options_parsing();
|
||||
|
||||
argc = setup_revisions(argc, argv, &rev, NULL);
|
||||
for (i = 1; i < argc; i++) {
|
||||
const char *arg = argv[i];
|
||||
@ -35,6 +42,8 @@ int cmd_diff_index(int argc, const char **argv, const char *prefix)
|
||||
option |= DIFF_INDEX_CACHED;
|
||||
else if (!strcmp(arg, "--merge-base"))
|
||||
option |= DIFF_INDEX_MERGE_BASE;
|
||||
else if (!strcmp(arg, "-m"))
|
||||
rev.match_missing = 1;
|
||||
else
|
||||
usage(diff_cache_usage);
|
||||
}
|
||||
|
@ -26,8 +26,8 @@
|
||||
|
||||
static const char builtin_diff_usage[] =
|
||||
"git diff [<options>] [<commit>] [--] [<path>...]\n"
|
||||
" or: git diff [<options>] --cached [<commit>] [--] [<path>...]\n"
|
||||
" or: git diff [<options>] <commit> [--merge-base] [<commit>...] <commit> [--] [<path>...]\n"
|
||||
" or: git diff [<options>] --cached [--merge-base] [<commit>] [--] [<path>...]\n"
|
||||
" or: git diff [<options>] [--merge-base] <commit> [<commit>...] <commit> [--] [<path>...]\n"
|
||||
" or: git diff [<options>] <commit>...<commit>] [--] [<path>...]\n"
|
||||
" or: git diff [<options>] <blob> <blob>]\n"
|
||||
" or: git diff [<options>] --no-index [--] <path> <path>]\n"
|
||||
|
@ -675,7 +675,7 @@ static int run_file_diff(int prompt, const char *prefix,
|
||||
"GIT_PAGER=", "GIT_EXTERNAL_DIFF=git-difftool--helper", NULL,
|
||||
NULL
|
||||
};
|
||||
int ret = 0, i;
|
||||
int i;
|
||||
|
||||
if (prompt > 0)
|
||||
env[2] = "GIT_DIFFTOOL_PROMPT=true";
|
||||
@ -686,8 +686,7 @@ static int run_file_diff(int prompt, const char *prefix,
|
||||
strvec_push(&args, "diff");
|
||||
for (i = 0; i < argc; i++)
|
||||
strvec_push(&args, argv[i]);
|
||||
ret = run_command_v_opt_cd_env(args.v, RUN_GIT_CMD, prefix, env);
|
||||
exit(ret);
|
||||
return run_command_v_opt_cd_env(args.v, RUN_GIT_CMD, prefix, env);
|
||||
}
|
||||
|
||||
int cmd_difftool(int argc, const char **argv, const char *prefix)
|
||||
|
@ -1126,7 +1126,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
|
||||
|
||||
if (rm->status == REF_STATUS_REJECT_SHALLOW) {
|
||||
if (want_status == FETCH_HEAD_MERGE)
|
||||
warning(_("reject %s because shallow roots are not allowed to be updated"),
|
||||
warning(_("rejected %s because shallow roots are not allowed to be updated"),
|
||||
rm->peer_ref ? rm->peer_ref->name : rm->name);
|
||||
continue;
|
||||
}
|
||||
@ -1990,6 +1990,9 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
|
||||
fetch_config_from_gitmodules(sfjc, rs);
|
||||
}
|
||||
|
||||
if (negotiate_only && !negotiation_tip.nr)
|
||||
die(_("--negotiate-only needs one or more --negotiate-tip=*"));
|
||||
|
||||
if (deepen_relative) {
|
||||
if (deepen_relative < 0)
|
||||
die(_("Negative depth in --deepen is not supported"));
|
||||
|
@ -109,7 +109,8 @@ static int fsck_error_func(struct fsck_options *o,
|
||||
|
||||
static struct object_array pending;
|
||||
|
||||
static int mark_object(struct object *obj, int type, void *data, struct fsck_options *options)
|
||||
static int mark_object(struct object *obj, enum object_type type,
|
||||
void *data, struct fsck_options *options)
|
||||
{
|
||||
struct object *parent = data;
|
||||
|
||||
|
@ -704,10 +704,9 @@ static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec,
|
||||
static int grep_directory(struct grep_opt *opt, const struct pathspec *pathspec,
|
||||
int exc_std, int use_index)
|
||||
{
|
||||
struct dir_struct dir;
|
||||
struct dir_struct dir = DIR_INIT;
|
||||
int i, hit = 0;
|
||||
|
||||
dir_init(&dir);
|
||||
if (!use_index)
|
||||
dir.flags |= DIR_NO_GITLINKS;
|
||||
if (exc_std)
|
||||
|
@ -436,10 +436,9 @@ static void exec_viewer(const char *name, const char *page)
|
||||
warning(_("'%s': unknown man viewer."), name);
|
||||
}
|
||||
|
||||
static void show_man_page(const char *git_cmd)
|
||||
static void show_man_page(const char *page)
|
||||
{
|
||||
struct man_viewer_list *viewer;
|
||||
const char *page = cmd_to_page(git_cmd);
|
||||
const char *fallback = getenv("GIT_MAN_VIEWER");
|
||||
|
||||
setup_man_path();
|
||||
@ -453,9 +452,8 @@ static void show_man_page(const char *git_cmd)
|
||||
die(_("no man viewer handled the request"));
|
||||
}
|
||||
|
||||
static void show_info_page(const char *git_cmd)
|
||||
static void show_info_page(const char *page)
|
||||
{
|
||||
const char *page = cmd_to_page(git_cmd);
|
||||
setenv("INFOPATH", system_path(GIT_INFO_PATH), 1);
|
||||
execlp("info", "info", "gitman", page, (char *)NULL);
|
||||
die(_("no info viewer handled the request"));
|
||||
@ -486,9 +484,8 @@ static void open_html(const char *path)
|
||||
execl_git_cmd("web--browse", "-c", "help.browser", path, (char *)NULL);
|
||||
}
|
||||
|
||||
static void show_html_page(const char *git_cmd)
|
||||
static void show_html_page(const char *page)
|
||||
{
|
||||
const char *page = cmd_to_page(git_cmd);
|
||||
struct strbuf page_path; /* it leaks but we exec bellow */
|
||||
|
||||
get_html_page_path(&page_path, page);
|
||||
@ -548,6 +545,7 @@ int cmd_help(int argc, const char **argv, const char *prefix)
|
||||
{
|
||||
int nongit;
|
||||
enum help_format parsed_help_format;
|
||||
const char *page;
|
||||
|
||||
argc = parse_options(argc, argv, prefix, builtin_help_options,
|
||||
builtin_help_usage, 0);
|
||||
@ -606,16 +604,17 @@ int cmd_help(int argc, const char **argv, const char *prefix)
|
||||
|
||||
argv[0] = check_git_cmd(argv[0]);
|
||||
|
||||
page = cmd_to_page(argv[0]);
|
||||
switch (help_format) {
|
||||
case HELP_FORMAT_NONE:
|
||||
case HELP_FORMAT_MAN:
|
||||
show_man_page(argv[0]);
|
||||
show_man_page(page);
|
||||
break;
|
||||
case HELP_FORMAT_INFO:
|
||||
show_info_page(argv[0]);
|
||||
show_info_page(page);
|
||||
break;
|
||||
case HELP_FORMAT_WEB:
|
||||
show_html_page(argv[0]);
|
||||
show_html_page(page);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -369,9 +369,7 @@ static void parse_pack_header(void)
|
||||
use(sizeof(struct pack_header));
|
||||
}
|
||||
|
||||
static NORETURN void bad_object(off_t offset, const char *format,
|
||||
...) __attribute__((format (printf, 2, 3)));
|
||||
|
||||
__attribute__((format (printf, 2, 3)))
|
||||
static NORETURN void bad_object(off_t offset, const char *format, ...)
|
||||
{
|
||||
va_list params;
|
||||
|
@ -212,8 +212,9 @@ static int create_default_files(const char *template_path,
|
||||
* values (since we've just potentially changed what's available on
|
||||
* disk).
|
||||
*/
|
||||
git_config_get_value("init.templatedir", &init_template_dir);
|
||||
git_config_get_pathname("init.templatedir", &init_template_dir);
|
||||
copy_templates(template_path, init_template_dir);
|
||||
free((char *)init_template_dir);
|
||||
git_config_clear();
|
||||
reset_shared_repository();
|
||||
git_config(git_default_config, NULL);
|
||||
|
@ -245,6 +245,9 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
|
||||
rev->abbrev_commit = 0;
|
||||
}
|
||||
|
||||
if (rev->commit_format == CMIT_FMT_USERFORMAT && !w.decorate)
|
||||
decoration_style = 0;
|
||||
|
||||
if (decoration_style) {
|
||||
const struct string_list *config_exclude =
|
||||
repo_config_get_value_multi(the_repository,
|
||||
@ -1968,8 +1971,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
|
||||
} else if (rev.diffopt.close_file) {
|
||||
/*
|
||||
* The diff code parsed --output; it has already opened the
|
||||
* file, but but we must instruct it not to close after each
|
||||
* diff.
|
||||
* file, but we must instruct it not to close after each diff.
|
||||
*/
|
||||
rev.diffopt.no_free = 1;
|
||||
} else {
|
||||
|
@ -608,7 +608,7 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix)
|
||||
{
|
||||
int require_work_tree = 0, show_tag = 0, i;
|
||||
char *max_prefix;
|
||||
struct dir_struct dir;
|
||||
struct dir_struct dir = DIR_INIT;
|
||||
struct pattern_list *pl;
|
||||
struct string_list exclude_list = STRING_LIST_INIT_NODUP;
|
||||
struct option builtin_ls_files_options[] = {
|
||||
@ -678,7 +678,6 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix)
|
||||
if (argc == 2 && !strcmp(argv[1], "-h"))
|
||||
usage_with_options(ls_files_usage, builtin_ls_files_options);
|
||||
|
||||
dir_init(&dir);
|
||||
prefix = cmd_prefix;
|
||||
if (prefix)
|
||||
prefix_len = strlen(prefix);
|
||||
@ -752,6 +751,9 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix)
|
||||
if (pathspec.nr && error_unmatch)
|
||||
ps_matched = xcalloc(pathspec.nr, 1);
|
||||
|
||||
if ((dir.flags & DIR_SHOW_IGNORED) && !show_others && !show_cached)
|
||||
die("ls-files -i must be used with either -o or -c");
|
||||
|
||||
if ((dir.flags & DIR_SHOW_IGNORED) && !exc_given)
|
||||
die("ls-files --ignored needs some exclude pattern");
|
||||
|
||||
|
@ -28,6 +28,6 @@ int cmd_merge_ours(int argc, const char **argv, const char *prefix)
|
||||
if (read_cache() < 0)
|
||||
die_errno("read_cache failed");
|
||||
if (index_differs_from(the_repository, "HEAD", NULL, 0))
|
||||
exit(2);
|
||||
exit(0);
|
||||
return 2;
|
||||
return 0;
|
||||
}
|
||||
|
@ -107,15 +107,12 @@ static void show_diff(struct merge_list *entry)
|
||||
mmfile_t src, dst;
|
||||
xpparam_t xpp;
|
||||
xdemitconf_t xecfg;
|
||||
xdemitcb_t ecb;
|
||||
xdemitcb_t ecb = { .out_line = show_outf };
|
||||
|
||||
memset(&xpp, 0, sizeof(xpp));
|
||||
xpp.flags = 0;
|
||||
memset(&xecfg, 0, sizeof(xecfg));
|
||||
xecfg.ctxlen = 3;
|
||||
ecb.out_hunk = NULL;
|
||||
ecb.out_line = show_outf;
|
||||
ecb.priv = NULL;
|
||||
|
||||
src.ptr = origin(entry, &size);
|
||||
if (!src.ptr)
|
||||
|
@ -56,8 +56,8 @@ struct strategy {
|
||||
|
||||
static const char * const builtin_merge_usage[] = {
|
||||
N_("git merge [<options>] [<commit>...]"),
|
||||
N_("git merge --abort"),
|
||||
N_("git merge --continue"),
|
||||
"git merge --abort",
|
||||
"git merge --continue",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -189,5 +189,5 @@ int cmd_mktree(int ac, const char **av, const char *prefix)
|
||||
used=0; /* reset tree entry buffer for re-use in batch mode */
|
||||
}
|
||||
strbuf_release(&sb);
|
||||
exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -176,8 +176,8 @@ int cmd_multi_pack_index(int argc, const char **argv,
|
||||
else if (!strcmp(argv[0], "expire"))
|
||||
return cmd_multi_pack_index_expire(argc, argv);
|
||||
else {
|
||||
usage:
|
||||
error(_("unrecognized subcommand: %s"), argv[0]);
|
||||
usage:
|
||||
usage_with_options(builtin_multi_pack_index_usage,
|
||||
builtin_multi_pack_index_options);
|
||||
}
|
||||
|
@ -37,6 +37,134 @@
|
||||
#include "shallow.h"
|
||||
#include "promisor-remote.h"
|
||||
|
||||
/*
|
||||
* Objects we are going to pack are collected in the `to_pack` structure.
|
||||
* It contains an array (dynamically expanded) of the object data, and a map
|
||||
* that can resolve SHA1s to their position in the array.
|
||||
*/
|
||||
static struct packing_data to_pack;
|
||||
|
||||
static inline struct object_entry *oe_delta(
|
||||
const struct packing_data *pack,
|
||||
const struct object_entry *e)
|
||||
{
|
||||
if (!e->delta_idx)
|
||||
return NULL;
|
||||
if (e->ext_base)
|
||||
return &pack->ext_bases[e->delta_idx - 1];
|
||||
else
|
||||
return &pack->objects[e->delta_idx - 1];
|
||||
}
|
||||
|
||||
static inline unsigned long oe_delta_size(struct packing_data *pack,
|
||||
const struct object_entry *e)
|
||||
{
|
||||
if (e->delta_size_valid)
|
||||
return e->delta_size_;
|
||||
|
||||
/*
|
||||
* pack->delta_size[] can't be NULL because oe_set_delta_size()
|
||||
* must have been called when a new delta is saved with
|
||||
* oe_set_delta().
|
||||
* If oe_delta() returns NULL (i.e. default state, which means
|
||||
* delta_size_valid is also false), then the caller must never
|
||||
* call oe_delta_size().
|
||||
*/
|
||||
return pack->delta_size[e - pack->objects];
|
||||
}
|
||||
|
||||
unsigned long oe_get_size_slow(struct packing_data *pack,
|
||||
const struct object_entry *e);
|
||||
|
||||
static inline unsigned long oe_size(struct packing_data *pack,
|
||||
const struct object_entry *e)
|
||||
{
|
||||
if (e->size_valid)
|
||||
return e->size_;
|
||||
|
||||
return oe_get_size_slow(pack, e);
|
||||
}
|
||||
|
||||
static inline void oe_set_delta(struct packing_data *pack,
|
||||
struct object_entry *e,
|
||||
struct object_entry *delta)
|
||||
{
|
||||
if (delta)
|
||||
e->delta_idx = (delta - pack->objects) + 1;
|
||||
else
|
||||
e->delta_idx = 0;
|
||||
}
|
||||
|
||||
static inline struct object_entry *oe_delta_sibling(
|
||||
const struct packing_data *pack,
|
||||
const struct object_entry *e)
|
||||
{
|
||||
if (e->delta_sibling_idx)
|
||||
return &pack->objects[e->delta_sibling_idx - 1];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct object_entry *oe_delta_child(
|
||||
const struct packing_data *pack,
|
||||
const struct object_entry *e)
|
||||
{
|
||||
if (e->delta_child_idx)
|
||||
return &pack->objects[e->delta_child_idx - 1];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void oe_set_delta_child(struct packing_data *pack,
|
||||
struct object_entry *e,
|
||||
struct object_entry *delta)
|
||||
{
|
||||
if (delta)
|
||||
e->delta_child_idx = (delta - pack->objects) + 1;
|
||||
else
|
||||
e->delta_child_idx = 0;
|
||||
}
|
||||
|
||||
static inline void oe_set_delta_sibling(struct packing_data *pack,
|
||||
struct object_entry *e,
|
||||
struct object_entry *delta)
|
||||
{
|
||||
if (delta)
|
||||
e->delta_sibling_idx = (delta - pack->objects) + 1;
|
||||
else
|
||||
e->delta_sibling_idx = 0;
|
||||
}
|
||||
|
||||
static inline void oe_set_size(struct packing_data *pack,
|
||||
struct object_entry *e,
|
||||
unsigned long size)
|
||||
{
|
||||
if (size < pack->oe_size_limit) {
|
||||
e->size_ = size;
|
||||
e->size_valid = 1;
|
||||
} else {
|
||||
e->size_valid = 0;
|
||||
if (oe_get_size_slow(pack, e) != size)
|
||||
BUG("'size' is supposed to be the object size!");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void oe_set_delta_size(struct packing_data *pack,
|
||||
struct object_entry *e,
|
||||
unsigned long size)
|
||||
{
|
||||
if (size < pack->oe_delta_size_limit) {
|
||||
e->delta_size_ = size;
|
||||
e->delta_size_valid = 1;
|
||||
} else {
|
||||
packing_data_lock(pack);
|
||||
if (!pack->delta_size)
|
||||
ALLOC_ARRAY(pack->delta_size, pack->nr_alloc);
|
||||
packing_data_unlock(pack);
|
||||
|
||||
pack->delta_size[e - pack->objects] = size;
|
||||
e->delta_size_valid = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
|
||||
#define SIZE(obj) oe_size(&to_pack, obj)
|
||||
#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
|
||||
@ -56,13 +184,6 @@ static const char *pack_usage[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
/*
|
||||
* Objects we are going to pack are collected in the `to_pack` structure.
|
||||
* It contains an array (dynamically expanded) of the object data, and a map
|
||||
* that can resolve SHA1s to their position in the array.
|
||||
*/
|
||||
static struct packing_data to_pack;
|
||||
|
||||
static struct pack_idx_entry **written_list;
|
||||
static uint32_t nr_result, nr_written, nr_seen;
|
||||
static struct bitmap_index *bitmap_git;
|
||||
@ -301,6 +422,17 @@ static void copy_pack_data(struct hashfile *f,
|
||||
}
|
||||
}
|
||||
|
||||
static inline int oe_size_greater_than(struct packing_data *pack,
|
||||
const struct object_entry *lhs,
|
||||
unsigned long rhs)
|
||||
{
|
||||
if (lhs->size_valid)
|
||||
return lhs->size_ > rhs;
|
||||
if (rhs < pack->oe_size_limit) /* rhs < 2^x <= lhs ? */
|
||||
return 1;
|
||||
return oe_get_size_slow(pack, lhs) > rhs;
|
||||
}
|
||||
|
||||
/* Return 0 if we will bust the pack-size limit */
|
||||
static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
|
||||
unsigned long limit, int usable_delta)
|
||||
@ -642,6 +774,14 @@ static int mark_tagged(const char *path, const struct object_id *oid, int flag,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned char oe_layer(struct packing_data *pack,
|
||||
struct object_entry *e)
|
||||
{
|
||||
if (!pack->layer)
|
||||
return 0;
|
||||
return pack->layer[e - pack->objects];
|
||||
}
|
||||
|
||||
static inline void add_to_write_order(struct object_entry **wo,
|
||||
unsigned int *endp,
|
||||
struct object_entry *e)
|
||||
@ -2231,6 +2371,26 @@ static pthread_mutex_t progress_mutex;
|
||||
* progress_mutex for protection.
|
||||
*/
|
||||
|
||||
static inline int oe_size_less_than(struct packing_data *pack,
|
||||
const struct object_entry *lhs,
|
||||
unsigned long rhs)
|
||||
{
|
||||
if (lhs->size_valid)
|
||||
return lhs->size_ < rhs;
|
||||
if (rhs < pack->oe_size_limit) /* rhs < 2^x <= lhs ? */
|
||||
return 0;
|
||||
return oe_get_size_slow(pack, lhs) < rhs;
|
||||
}
|
||||
|
||||
static inline void oe_set_tree_depth(struct packing_data *pack,
|
||||
struct object_entry *e,
|
||||
unsigned int tree_depth)
|
||||
{
|
||||
if (!pack->tree_depth)
|
||||
CALLOC_ARRAY(pack->tree_depth, pack->nr_alloc);
|
||||
pack->tree_depth[e - pack->objects] = tree_depth;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the size of the object without doing any delta
|
||||
* reconstruction (so non-deltas are true object sizes, but deltas
|
||||
|
@ -126,9 +126,9 @@ static struct option pull_options[] = {
|
||||
/* Options passed to git-merge or git-rebase */
|
||||
OPT_GROUP(N_("Options related to merging")),
|
||||
OPT_CALLBACK_F('r', "rebase", &opt_rebase,
|
||||
"(false|true|merges|preserve|interactive)",
|
||||
N_("incorporate changes by rebasing rather than merging"),
|
||||
PARSE_OPT_OPTARG, parse_opt_rebase),
|
||||
"(false|true|merges|preserve|interactive)",
|
||||
N_("incorporate changes by rebasing rather than merging"),
|
||||
PARSE_OPT_OPTARG, parse_opt_rebase),
|
||||
OPT_PASSTHRU('n', NULL, &opt_diffstat, NULL,
|
||||
N_("do not show a diffstat at the end of the merge"),
|
||||
PARSE_OPT_NOARG | PARSE_OPT_NONEG),
|
||||
@ -947,7 +947,6 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
|
||||
struct oid_array merge_heads = OID_ARRAY_INIT;
|
||||
struct object_id orig_head, curr_head;
|
||||
struct object_id rebase_fork_point;
|
||||
int autostash;
|
||||
int rebase_unspecified = 0;
|
||||
int can_ff;
|
||||
|
||||
@ -982,8 +981,8 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
|
||||
if (get_oid("HEAD", &orig_head))
|
||||
oidclr(&orig_head);
|
||||
|
||||
autostash = config_autostash;
|
||||
if (opt_rebase) {
|
||||
int autostash = config_autostash;
|
||||
if (opt_autostash != -1)
|
||||
autostash = opt_autostash;
|
||||
|
||||
@ -1054,7 +1053,6 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
|
||||
|
||||
if (opt_rebase) {
|
||||
int ret = 0;
|
||||
int ran_ff = 0;
|
||||
|
||||
struct object_id newbase;
|
||||
struct object_id upstream;
|
||||
@ -1065,16 +1063,14 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
|
||||
recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND) &&
|
||||
submodule_touches_in_range(the_repository, &upstream, &curr_head))
|
||||
die(_("cannot rebase with locally recorded submodule modifications"));
|
||||
if (!autostash) {
|
||||
if (can_ff) {
|
||||
/* we can fast-forward this without invoking rebase */
|
||||
opt_ff = "--ff-only";
|
||||
ran_ff = 1;
|
||||
ret = run_merge();
|
||||
}
|
||||
}
|
||||
if (!ran_ff)
|
||||
|
||||
if (can_ff) {
|
||||
/* we can fast-forward this without invoking rebase */
|
||||
opt_ff = "--ff-only";
|
||||
ret = run_merge();
|
||||
} else {
|
||||
ret = run_rebase(&newbase, &upstream);
|
||||
}
|
||||
|
||||
if (!ret && (recurse_submodules == RECURSE_SUBMODULES_ON ||
|
||||
recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND))
|
||||
|
@ -185,82 +185,73 @@ static const char message_detached_head_die[] =
|
||||
"\n"
|
||||
" git push %s HEAD:<name-of-remote-branch>\n");
|
||||
|
||||
static void setup_push_upstream(struct remote *remote, struct branch *branch,
|
||||
int triangular, int simple)
|
||||
static const char *get_upstream_ref(struct branch *branch, const char *remote_name)
|
||||
{
|
||||
if (!branch)
|
||||
die(_(message_detached_head_die), remote->name);
|
||||
if (!branch->merge_nr || !branch->merge || !branch->remote_name)
|
||||
die(_("The current branch %s has no upstream branch.\n"
|
||||
"To push the current branch and set the remote as upstream, use\n"
|
||||
"\n"
|
||||
" git push --set-upstream %s %s\n"),
|
||||
branch->name,
|
||||
remote->name,
|
||||
remote_name,
|
||||
branch->name);
|
||||
if (branch->merge_nr != 1)
|
||||
die(_("The current branch %s has multiple upstream branches, "
|
||||
"refusing to push."), branch->name);
|
||||
if (triangular)
|
||||
die(_("You are pushing to remote '%s', which is not the upstream of\n"
|
||||
"your current branch '%s', without telling me what to push\n"
|
||||
"to update which remote branch."),
|
||||
remote->name, branch->name);
|
||||
|
||||
if (simple) {
|
||||
/* Additional safety */
|
||||
if (strcmp(branch->refname, branch->merge[0]->src))
|
||||
die_push_simple(branch, remote);
|
||||
}
|
||||
|
||||
refspec_appendf(&rs, "%s:%s", branch->refname, branch->merge[0]->src);
|
||||
}
|
||||
|
||||
static void setup_push_current(struct remote *remote, struct branch *branch)
|
||||
{
|
||||
if (!branch)
|
||||
die(_(message_detached_head_die), remote->name);
|
||||
refspec_appendf(&rs, "%s:%s", branch->refname, branch->refname);
|
||||
}
|
||||
|
||||
static int is_workflow_triangular(struct remote *remote)
|
||||
{
|
||||
struct remote *fetch_remote = remote_get(NULL);
|
||||
return (fetch_remote && fetch_remote != remote);
|
||||
return branch->merge[0]->src;
|
||||
}
|
||||
|
||||
static void setup_default_push_refspecs(struct remote *remote)
|
||||
{
|
||||
struct branch *branch = branch_get(NULL);
|
||||
int triangular = is_workflow_triangular(remote);
|
||||
struct branch *branch;
|
||||
const char *dst;
|
||||
int same_remote;
|
||||
|
||||
switch (push_default) {
|
||||
default:
|
||||
case PUSH_DEFAULT_MATCHING:
|
||||
refspec_append(&rs, ":");
|
||||
break;
|
||||
|
||||
case PUSH_DEFAULT_UNSPECIFIED:
|
||||
case PUSH_DEFAULT_SIMPLE:
|
||||
if (triangular)
|
||||
setup_push_current(remote, branch);
|
||||
else
|
||||
setup_push_upstream(remote, branch, triangular, 1);
|
||||
break;
|
||||
|
||||
case PUSH_DEFAULT_UPSTREAM:
|
||||
setup_push_upstream(remote, branch, triangular, 0);
|
||||
break;
|
||||
|
||||
case PUSH_DEFAULT_CURRENT:
|
||||
setup_push_current(remote, branch);
|
||||
break;
|
||||
return;
|
||||
|
||||
case PUSH_DEFAULT_NOTHING:
|
||||
die(_("You didn't specify any refspecs to push, and "
|
||||
"push.default is \"nothing\"."));
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
branch = branch_get(NULL);
|
||||
if (!branch)
|
||||
die(_(message_detached_head_die), remote->name);
|
||||
|
||||
dst = branch->refname;
|
||||
same_remote = !strcmp(remote->name, remote_for_branch(branch, NULL));
|
||||
|
||||
switch (push_default) {
|
||||
default:
|
||||
case PUSH_DEFAULT_UNSPECIFIED:
|
||||
case PUSH_DEFAULT_SIMPLE:
|
||||
if (!same_remote)
|
||||
break;
|
||||
if (strcmp(branch->refname, get_upstream_ref(branch, remote->name)))
|
||||
die_push_simple(branch, remote);
|
||||
break;
|
||||
|
||||
case PUSH_DEFAULT_UPSTREAM:
|
||||
if (!same_remote)
|
||||
die(_("You are pushing to remote '%s', which is not the upstream of\n"
|
||||
"your current branch '%s', without telling me what to push\n"
|
||||
"to update which remote branch."),
|
||||
remote->name, branch->name);
|
||||
dst = get_upstream_ref(branch, remote->name);
|
||||
break;
|
||||
|
||||
case PUSH_DEFAULT_CURRENT:
|
||||
break;
|
||||
}
|
||||
|
||||
refspec_appendf(&rs, "%s:%s", branch->refname, dst);
|
||||
}
|
||||
|
||||
static const char message_advice_pull_before_push[] =
|
||||
|
@ -425,9 +425,6 @@ static int proc_receive_ref_matches(struct command *cmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rp_error(const char *err, ...) __attribute__((format (printf, 1, 2)));
|
||||
static void rp_warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
|
||||
|
||||
static void report_message(const char *prefix, const char *err, va_list params)
|
||||
{
|
||||
int sz;
|
||||
@ -445,6 +442,7 @@ static void report_message(const char *prefix, const char *err, va_list params)
|
||||
xwrite(2, msg, sz);
|
||||
}
|
||||
|
||||
__attribute__((format (printf, 1, 2)))
|
||||
static void rp_warning(const char *err, ...)
|
||||
{
|
||||
va_list params;
|
||||
@ -453,6 +451,7 @@ static void rp_warning(const char *err, ...)
|
||||
va_end(params);
|
||||
}
|
||||
|
||||
__attribute__((format (printf, 1, 2)))
|
||||
static void rp_error(const char *err, ...)
|
||||
{
|
||||
va_list params;
|
||||
|
@ -28,7 +28,7 @@ static int diff_two(const char *file1, const char *label1,
|
||||
{
|
||||
xpparam_t xpp;
|
||||
xdemitconf_t xecfg;
|
||||
xdemitcb_t ecb;
|
||||
xdemitcb_t ecb = { .out_line = outf };
|
||||
mmfile_t minus, plus;
|
||||
int ret;
|
||||
|
||||
@ -41,8 +41,6 @@ static int diff_two(const char *file1, const char *label1,
|
||||
xpp.flags = 0;
|
||||
memset(&xecfg, 0, sizeof(xecfg));
|
||||
xecfg.ctxlen = 3;
|
||||
ecb.out_hunk = NULL;
|
||||
ecb.out_line = outf;
|
||||
ret = xdi_diff(&minus, &plus, &xpp, &xecfg, &ecb);
|
||||
|
||||
free(minus.ptr);
|
||||
|
@ -127,13 +127,15 @@ static void show_commit(struct commit *commit, void *data)
|
||||
if (info->header_prefix)
|
||||
fputs(info->header_prefix, stdout);
|
||||
|
||||
if (!revs->graph)
|
||||
fputs(get_revision_mark(revs, commit), stdout);
|
||||
if (revs->abbrev_commit && revs->abbrev)
|
||||
fputs(find_unique_abbrev(&commit->object.oid, revs->abbrev),
|
||||
stdout);
|
||||
else
|
||||
fputs(oid_to_hex(&commit->object.oid), stdout);
|
||||
if (revs->include_header) {
|
||||
if (!revs->graph)
|
||||
fputs(get_revision_mark(revs, commit), stdout);
|
||||
if (revs->abbrev_commit && revs->abbrev)
|
||||
fputs(find_unique_abbrev(&commit->object.oid, revs->abbrev),
|
||||
stdout);
|
||||
else
|
||||
fputs(oid_to_hex(&commit->object.oid), stdout);
|
||||
}
|
||||
if (revs->print_parents) {
|
||||
struct commit_list *parents = commit->parents;
|
||||
while (parents) {
|
||||
@ -153,7 +155,7 @@ static void show_commit(struct commit *commit, void *data)
|
||||
show_decorations(revs, commit);
|
||||
if (revs->commit_format == CMIT_FMT_ONELINE)
|
||||
putchar(' ');
|
||||
else
|
||||
else if (revs->include_header)
|
||||
putchar('\n');
|
||||
|
||||
if (revs->verbose_header) {
|
||||
@ -512,6 +514,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
|
||||
repo_init_revisions(the_repository, &revs, prefix);
|
||||
revs.abbrev = DEFAULT_ABBREV;
|
||||
revs.commit_format = CMIT_FMT_UNSPECIFIED;
|
||||
revs.include_header = 1;
|
||||
|
||||
/*
|
||||
* Scan the argument list before invoking setup_revisions(), so that we
|
||||
@ -627,6 +630,16 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strcmp(arg, ("--commit-header"))) {
|
||||
revs.include_header = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strcmp(arg, ("--no-commit-header"))) {
|
||||
revs.include_header = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strcmp(arg, "--disk-usage")) {
|
||||
show_disk_usage = 1;
|
||||
info.flags |= REV_LIST_QUIET;
|
||||
@ -636,10 +649,12 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
|
||||
usage(rev_list_usage);
|
||||
|
||||
}
|
||||
if (revs.commit_format != CMIT_FMT_USERFORMAT)
|
||||
revs.include_header = 1;
|
||||
if (revs.commit_format != CMIT_FMT_UNSPECIFIED) {
|
||||
/* The command line has a --pretty */
|
||||
info.hdr_termination = '\n';
|
||||
if (revs.commit_format == CMIT_FMT_ONELINE)
|
||||
if (revs.commit_format == CMIT_FMT_ONELINE || !revs.include_header)
|
||||
info.header_prefix = "";
|
||||
else
|
||||
info.header_prefix = "commit ";
|
||||
|
@ -435,11 +435,11 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix)
|
||||
/* get the usage up to the first line with a -- on it */
|
||||
for (;;) {
|
||||
if (strbuf_getline(&sb, stdin) == EOF)
|
||||
die("premature end of input");
|
||||
die(_("premature end of input"));
|
||||
ALLOC_GROW(usage, unb + 1, usz);
|
||||
if (!strcmp("--", sb.buf)) {
|
||||
if (unb < 1)
|
||||
die("no usage string given before the `--' separator");
|
||||
die(_("no usage string given before the `--' separator"));
|
||||
usage[unb] = NULL;
|
||||
break;
|
||||
}
|
||||
@ -545,7 +545,7 @@ static void die_no_single_rev(int quiet)
|
||||
if (quiet)
|
||||
exit(1);
|
||||
else
|
||||
die("Needed a single revision");
|
||||
die(_("Needed a single revision"));
|
||||
}
|
||||
|
||||
static const char builtin_rev_parse_usage[] =
|
||||
@ -709,10 +709,10 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
||||
if (!strcmp(arg, "--resolve-git-dir")) {
|
||||
const char *gitdir = argv[++i];
|
||||
if (!gitdir)
|
||||
die("--resolve-git-dir requires an argument");
|
||||
die(_("--resolve-git-dir requires an argument"));
|
||||
gitdir = resolve_gitdir(gitdir);
|
||||
if (!gitdir)
|
||||
die("not a gitdir '%s'", argv[i]);
|
||||
die(_("not a gitdir '%s'"), argv[i]);
|
||||
puts(gitdir);
|
||||
continue;
|
||||
}
|
||||
@ -736,7 +736,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
||||
if (!seen_end_of_options && *arg == '-') {
|
||||
if (!strcmp(arg, "--git-path")) {
|
||||
if (!argv[i + 1])
|
||||
die("--git-path requires an argument");
|
||||
die(_("--git-path requires an argument"));
|
||||
strbuf_reset(&buf);
|
||||
print_path(git_path("%s", argv[i + 1]), prefix,
|
||||
format,
|
||||
@ -746,7 +746,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
||||
}
|
||||
if (!strcmp(arg,"-n")) {
|
||||
if (++i >= argc)
|
||||
die("-n requires an argument");
|
||||
die(_("-n requires an argument"));
|
||||
if ((filter & DO_FLAGS) && (filter & DO_REVS)) {
|
||||
show(arg);
|
||||
show(argv[i]);
|
||||
@ -759,25 +759,27 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
||||
continue;
|
||||
}
|
||||
if (opt_with_value(arg, "--path-format", &arg)) {
|
||||
if (!arg)
|
||||
die(_("--path-format requires an argument"));
|
||||
if (!strcmp(arg, "absolute")) {
|
||||
format = FORMAT_CANONICAL;
|
||||
} else if (!strcmp(arg, "relative")) {
|
||||
format = FORMAT_RELATIVE;
|
||||
} else {
|
||||
die("unknown argument to --path-format: %s", arg);
|
||||
die(_("unknown argument to --path-format: %s"), arg);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (!strcmp(arg, "--default")) {
|
||||
def = argv[++i];
|
||||
if (!def)
|
||||
die("--default requires an argument");
|
||||
die(_("--default requires an argument"));
|
||||
continue;
|
||||
}
|
||||
if (!strcmp(arg, "--prefix")) {
|
||||
prefix = argv[++i];
|
||||
if (!prefix)
|
||||
die("--prefix requires an argument");
|
||||
die(_("--prefix requires an argument"));
|
||||
startup_info->prefix = prefix;
|
||||
output_prefix = 1;
|
||||
continue;
|
||||
@ -846,7 +848,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
||||
else if (!strcmp(arg, "loose"))
|
||||
abbrev_ref_strict = 0;
|
||||
else
|
||||
die("unknown mode for --abbrev-ref: %s",
|
||||
die(_("unknown mode for --abbrev-ref: %s"),
|
||||
arg);
|
||||
}
|
||||
continue;
|
||||
@ -890,7 +892,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
||||
if (work_tree)
|
||||
print_path(work_tree, prefix, format, DEFAULT_UNMODIFIED);
|
||||
else
|
||||
die("this operation must be run in a work tree");
|
||||
die(_("this operation must be run in a work tree"));
|
||||
continue;
|
||||
}
|
||||
if (!strcmp(arg, "--show-superproject-working-tree")) {
|
||||
@ -1018,7 +1020,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
||||
if (strcmp(val, "storage") &&
|
||||
strcmp(val, "input") &&
|
||||
strcmp(val, "output"))
|
||||
die("unknown mode for --show-object-format: %s",
|
||||
die(_("unknown mode for --show-object-format: %s"),
|
||||
arg);
|
||||
puts(the_hash_algo->name);
|
||||
continue;
|
||||
@ -1056,7 +1058,7 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
||||
if (verify)
|
||||
die_no_single_rev(quiet);
|
||||
if (has_dashdash)
|
||||
die("bad revision '%s'", arg);
|
||||
die(_("bad revision '%s'"), arg);
|
||||
as_is = 1;
|
||||
if (!show_file(arg, output_prefix))
|
||||
continue;
|
||||
|
@ -939,9 +939,12 @@ int cmd_show_branch(int ac, const char **av, const char *prefix)
|
||||
mark = '*';
|
||||
else
|
||||
mark = '+';
|
||||
printf("%s%c%s",
|
||||
get_color_code(i),
|
||||
mark, get_color_reset_code());
|
||||
if (mark == ' ')
|
||||
putchar(mark);
|
||||
else
|
||||
printf("%s%c%s",
|
||||
get_color_code(i),
|
||||
mark, get_color_reset_code());
|
||||
}
|
||||
putchar(' ');
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ static const char * const git_stash_usage[] = {
|
||||
N_("git stash drop [-q|--quiet] [<stash>]"),
|
||||
N_("git stash ( pop | apply ) [--index] [-q|--quiet] [<stash>]"),
|
||||
N_("git stash branch <branchname> [<stash>]"),
|
||||
N_("git stash clear"),
|
||||
"git stash clear",
|
||||
N_("git stash [push [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
|
||||
" [-u|--include-untracked] [-a|--all] [-m|--message <message>]\n"
|
||||
" [--pathspec-from-file=<file> [--pathspec-file-nul]]\n"
|
||||
@ -67,7 +67,7 @@ static const char * const git_stash_branch_usage[] = {
|
||||
};
|
||||
|
||||
static const char * const git_stash_clear_usage[] = {
|
||||
N_("git stash clear"),
|
||||
"git stash clear",
|
||||
NULL
|
||||
};
|
||||
|
||||
@ -761,7 +761,7 @@ static int list_stash(int argc, const char **argv, const char *prefix)
|
||||
|
||||
cp.git_cmd = 1;
|
||||
strvec_pushl(&cp.args, "log", "--format=%gd: %gs", "-g",
|
||||
"--first-parent", "-m", NULL);
|
||||
"--first-parent", NULL);
|
||||
strvec_pushv(&cp.args, argv);
|
||||
strvec_push(&cp.args, ref_stash);
|
||||
strvec_push(&cp.args, "--");
|
||||
@ -833,7 +833,7 @@ static int show_stash(int argc, const char **argv, const char *prefix)
|
||||
UNTRACKED_NONE,
|
||||
UNTRACKED_INCLUDE,
|
||||
UNTRACKED_ONLY
|
||||
} show_untracked = UNTRACKED_NONE;
|
||||
} show_untracked = show_include_untracked ? UNTRACKED_INCLUDE : UNTRACKED_NONE;
|
||||
struct option options[] = {
|
||||
OPT_SET_INT('u', "include-untracked", &show_untracked,
|
||||
N_("include untracked files in the stash"),
|
||||
@ -876,9 +876,6 @@ static int show_stash(int argc, const char **argv, const char *prefix)
|
||||
if (show_patch)
|
||||
rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
|
||||
|
||||
if (show_include_untracked)
|
||||
show_untracked = UNTRACKED_INCLUDE;
|
||||
|
||||
if (!show_stat && !show_patch) {
|
||||
free_stash_info(&info);
|
||||
return 0;
|
||||
@ -994,9 +991,8 @@ static int get_untracked_files(const struct pathspec *ps, int include_untracked,
|
||||
{
|
||||
int i;
|
||||
int found = 0;
|
||||
struct dir_struct dir;
|
||||
struct dir_struct dir = DIR_INIT;
|
||||
|
||||
dir_init(&dir);
|
||||
if (include_untracked != INCLUDE_ALL_FILES)
|
||||
setup_standard_excludes(&dir);
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "diffcore.h"
|
||||
#include "diff.h"
|
||||
#include "object-store.h"
|
||||
#include "dir.h"
|
||||
#include "advice.h"
|
||||
|
||||
#define OPT_QUIET (1 << 0)
|
||||
@ -1300,7 +1299,7 @@ static int module_summary(int argc, const char **argv, const char *prefix)
|
||||
OPT_BOOL(0, "cached", &cached,
|
||||
N_("use the commit stored in the index instead of the submodule HEAD")),
|
||||
OPT_BOOL(0, "files", &files,
|
||||
N_("to compare the commit in the index with that in the submodule HEAD")),
|
||||
N_("compare the commit in the index with that in the submodule HEAD")),
|
||||
OPT_BOOL(0, "for-status", &for_status,
|
||||
N_("skip submodules with 'ignore_config' value set to 'all'")),
|
||||
OPT_INTEGER('n', "summary-limit", &summary_limit,
|
||||
|
@ -30,7 +30,7 @@ struct add_opts {
|
||||
int detach;
|
||||
int quiet;
|
||||
int checkout;
|
||||
int keep_locked;
|
||||
const char *keep_locked;
|
||||
};
|
||||
|
||||
static int show_only;
|
||||
@ -118,10 +118,8 @@ static void prune_worktrees(void)
|
||||
struct dirent *d;
|
||||
if (!dir)
|
||||
return;
|
||||
while ((d = readdir(dir)) != NULL) {
|
||||
while ((d = readdir_skip_dot_and_dotdot(dir)) != NULL) {
|
||||
char *path;
|
||||
if (is_dot_or_dotdot(d->d_name))
|
||||
continue;
|
||||
strbuf_reset(&reason);
|
||||
if (should_prune_worktree(d->d_name, &reason, &path, expire))
|
||||
prune_worktree(d->d_name, reason.buf);
|
||||
@ -304,10 +302,10 @@ static int add_worktree(const char *path, const char *refname,
|
||||
* after the preparation is over.
|
||||
*/
|
||||
strbuf_addf(&sb, "%s/locked", sb_repo.buf);
|
||||
if (!opts->keep_locked)
|
||||
write_file(sb.buf, "initializing");
|
||||
if (opts->keep_locked)
|
||||
write_file(sb.buf, "%s", opts->keep_locked);
|
||||
else
|
||||
write_file(sb.buf, "added with --lock");
|
||||
write_file(sb.buf, _("initializing"));
|
||||
|
||||
strbuf_addf(&sb_git, "%s/.git", path);
|
||||
if (safe_create_leading_directories_const(sb_git.buf))
|
||||
@ -477,6 +475,8 @@ static int add(int ac, const char **av, const char *prefix)
|
||||
const char *branch;
|
||||
const char *new_branch = NULL;
|
||||
const char *opt_track = NULL;
|
||||
const char *lock_reason = NULL;
|
||||
int keep_locked = 0;
|
||||
struct option options[] = {
|
||||
OPT__FORCE(&opts.force,
|
||||
N_("checkout <branch> even if already checked out in other worktree"),
|
||||
@ -487,7 +487,9 @@ static int add(int ac, const char **av, const char *prefix)
|
||||
N_("create or reset a branch")),
|
||||
OPT_BOOL('d', "detach", &opts.detach, N_("detach HEAD at named commit")),
|
||||
OPT_BOOL(0, "checkout", &opts.checkout, N_("populate the new working tree")),
|
||||
OPT_BOOL(0, "lock", &opts.keep_locked, N_("keep the new working tree locked")),
|
||||
OPT_BOOL(0, "lock", &keep_locked, N_("keep the new working tree locked")),
|
||||
OPT_STRING(0, "reason", &lock_reason, N_("string"),
|
||||
N_("reason for locking")),
|
||||
OPT__QUIET(&opts.quiet, N_("suppress progress reporting")),
|
||||
OPT_PASSTHRU(0, "track", &opt_track, NULL,
|
||||
N_("set up tracking mode (see git-branch(1))"),
|
||||
@ -502,6 +504,13 @@ static int add(int ac, const char **av, const char *prefix)
|
||||
ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
|
||||
if (!!opts.detach + !!new_branch + !!new_branch_force > 1)
|
||||
die(_("-b, -B, and --detach are mutually exclusive"));
|
||||
if (lock_reason && !keep_locked)
|
||||
die(_("--reason requires --lock"));
|
||||
if (lock_reason)
|
||||
opts.keep_locked = lock_reason;
|
||||
else if (keep_locked)
|
||||
opts.keep_locked = _("added with --lock");
|
||||
|
||||
if (ac < 1 || ac > 2)
|
||||
usage_with_options(worktree_usage, options);
|
||||
|
||||
|
@ -100,6 +100,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
|
||||
const char *path, unsigned flags)
|
||||
{
|
||||
git_zstream s;
|
||||
unsigned char ibuf[16384];
|
||||
unsigned char obuf[16384];
|
||||
unsigned hdrlen;
|
||||
int status = Z_OK;
|
||||
@ -113,8 +114,6 @@ static int stream_to_pack(struct bulk_checkin_state *state,
|
||||
s.avail_out = sizeof(obuf) - hdrlen;
|
||||
|
||||
while (status != Z_STREAM_END) {
|
||||
unsigned char ibuf[16384];
|
||||
|
||||
if (size && !s.avail_in) {
|
||||
ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
|
||||
ssize_t read_result = read_in_full(fd, ibuf, rsize);
|
||||
|
64
bundle.c
64
bundle.c
@ -23,13 +23,16 @@ static struct {
|
||||
{ 3, v3_bundle_signature },
|
||||
};
|
||||
|
||||
static void add_to_ref_list(const struct object_id *oid, const char *name,
|
||||
struct ref_list *list)
|
||||
void bundle_header_init(struct bundle_header *header)
|
||||
{
|
||||
ALLOC_GROW(list->list, list->nr + 1, list->alloc);
|
||||
oidcpy(&list->list[list->nr].oid, oid);
|
||||
list->list[list->nr].name = xstrdup(name);
|
||||
list->nr++;
|
||||
struct bundle_header blank = BUNDLE_HEADER_INIT;
|
||||
memcpy(header, &blank, sizeof(*header));
|
||||
}
|
||||
|
||||
void bundle_header_release(struct bundle_header *header)
|
||||
{
|
||||
string_list_clear(&header->prerequisites, 1);
|
||||
string_list_clear(&header->references, 1);
|
||||
}
|
||||
|
||||
static int parse_capability(struct bundle_header *header, const char *capability)
|
||||
@ -112,10 +115,11 @@ static int parse_bundle_header(int fd, struct bundle_header *header,
|
||||
status = -1;
|
||||
break;
|
||||
} else {
|
||||
struct object_id *dup = oiddup(&oid);
|
||||
if (is_prereq)
|
||||
add_to_ref_list(&oid, "", &header->prerequisites);
|
||||
string_list_append(&header->prerequisites, "")->util = dup;
|
||||
else
|
||||
add_to_ref_list(&oid, p + 1, &header->references);
|
||||
string_list_append(&header->references, p + 1)->util = dup;
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,33 +143,38 @@ int read_bundle_header(const char *path, struct bundle_header *header)
|
||||
|
||||
int is_bundle(const char *path, int quiet)
|
||||
{
|
||||
struct bundle_header header;
|
||||
struct bundle_header header = BUNDLE_HEADER_INIT;
|
||||
int fd = open(path, O_RDONLY);
|
||||
|
||||
if (fd < 0)
|
||||
return 0;
|
||||
memset(&header, 0, sizeof(header));
|
||||
fd = parse_bundle_header(fd, &header, quiet ? NULL : path);
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
bundle_header_release(&header);
|
||||
return (fd >= 0);
|
||||
}
|
||||
|
||||
static int list_refs(struct ref_list *r, int argc, const char **argv)
|
||||
static int list_refs(struct string_list *r, int argc, const char **argv)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < r->nr; i++) {
|
||||
struct object_id *oid;
|
||||
const char *name;
|
||||
|
||||
if (argc > 1) {
|
||||
int j;
|
||||
for (j = 1; j < argc; j++)
|
||||
if (!strcmp(r->list[i].name, argv[j]))
|
||||
if (!strcmp(r->items[i].string, argv[j]))
|
||||
break;
|
||||
if (j == argc)
|
||||
continue;
|
||||
}
|
||||
printf("%s %s\n", oid_to_hex(&r->list[i].oid),
|
||||
r->list[i].name);
|
||||
|
||||
oid = r->items[i].util;
|
||||
name = r->items[i].string;
|
||||
printf("%s %s\n", oid_to_hex(oid), name);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -181,7 +190,7 @@ int verify_bundle(struct repository *r,
|
||||
* Do fast check, then if any prereqs are missing then go line by line
|
||||
* to be verbose about the errors
|
||||
*/
|
||||
struct ref_list *p = &header->prerequisites;
|
||||
struct string_list *p = &header->prerequisites;
|
||||
struct rev_info revs;
|
||||
const char *argv[] = {NULL, "--all", NULL};
|
||||
struct commit *commit;
|
||||
@ -193,16 +202,18 @@ int verify_bundle(struct repository *r,
|
||||
|
||||
repo_init_revisions(r, &revs, NULL);
|
||||
for (i = 0; i < p->nr; i++) {
|
||||
struct ref_list_entry *e = p->list + i;
|
||||
struct object *o = parse_object(r, &e->oid);
|
||||
struct string_list_item *e = p->items + i;
|
||||
const char *name = e->string;
|
||||
struct object_id *oid = e->util;
|
||||
struct object *o = parse_object(r, oid);
|
||||
if (o) {
|
||||
o->flags |= PREREQ_MARK;
|
||||
add_pending_object(&revs, o, e->name);
|
||||
add_pending_object(&revs, o, name);
|
||||
continue;
|
||||
}
|
||||
if (++ret == 1)
|
||||
error("%s", message);
|
||||
error("%s %s", oid_to_hex(&e->oid), e->name);
|
||||
error("%s %s", oid_to_hex(oid), name);
|
||||
}
|
||||
if (revs.pending.nr != p->nr)
|
||||
return ret;
|
||||
@ -218,26 +229,29 @@ int verify_bundle(struct repository *r,
|
||||
i--;
|
||||
|
||||
for (i = 0; i < p->nr; i++) {
|
||||
struct ref_list_entry *e = p->list + i;
|
||||
struct object *o = parse_object(r, &e->oid);
|
||||
struct string_list_item *e = p->items + i;
|
||||
const char *name = e->string;
|
||||
const struct object_id *oid = e->util;
|
||||
struct object *o = parse_object(r, oid);
|
||||
assert(o); /* otherwise we'd have returned early */
|
||||
if (o->flags & SHOWN)
|
||||
continue;
|
||||
if (++ret == 1)
|
||||
error("%s", message);
|
||||
error("%s %s", oid_to_hex(&e->oid), e->name);
|
||||
error("%s %s", oid_to_hex(oid), name);
|
||||
}
|
||||
|
||||
/* Clean up objects used, as they will be reused. */
|
||||
for (i = 0; i < p->nr; i++) {
|
||||
struct ref_list_entry *e = p->list + i;
|
||||
commit = lookup_commit_reference_gently(r, &e->oid, 1);
|
||||
struct string_list_item *e = p->items + i;
|
||||
struct object_id *oid = e->util;
|
||||
commit = lookup_commit_reference_gently(r, oid, 1);
|
||||
if (commit)
|
||||
clear_commit_marks(commit, ALL_REV_FLAGS);
|
||||
}
|
||||
|
||||
if (verbose) {
|
||||
struct ref_list *r;
|
||||
struct string_list *r;
|
||||
|
||||
r = &header->references;
|
||||
printf_ln(Q_("The bundle contains this ref:",
|
||||
|
21
bundle.h
21
bundle.h
@ -3,22 +3,23 @@
|
||||
|
||||
#include "strvec.h"
|
||||
#include "cache.h"
|
||||
|
||||
struct ref_list {
|
||||
unsigned int nr, alloc;
|
||||
struct ref_list_entry {
|
||||
struct object_id oid;
|
||||
char *name;
|
||||
} *list;
|
||||
};
|
||||
#include "string-list.h"
|
||||
|
||||
struct bundle_header {
|
||||
unsigned version;
|
||||
struct ref_list prerequisites;
|
||||
struct ref_list references;
|
||||
struct string_list prerequisites;
|
||||
struct string_list references;
|
||||
const struct git_hash_algo *hash_algo;
|
||||
};
|
||||
|
||||
#define BUNDLE_HEADER_INIT \
|
||||
{ \
|
||||
.prerequisites = STRING_LIST_INIT_DUP, \
|
||||
.references = STRING_LIST_INIT_DUP, \
|
||||
}
|
||||
void bundle_header_init(struct bundle_header *header);
|
||||
void bundle_header_release(struct bundle_header *header);
|
||||
|
||||
int is_bundle(const char *path, int quiet);
|
||||
int read_bundle_header(const char *path, struct bundle_header *header);
|
||||
int create_bundle(struct repository *r, const char *path,
|
||||
|
11
cache-tree.c
11
cache-tree.c
@ -237,6 +237,11 @@ int cache_tree_fully_valid(struct cache_tree *it)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int must_check_existence(const struct cache_entry *ce)
|
||||
{
|
||||
return !(has_promisor_remote() && ce_skip_worktree(ce));
|
||||
}
|
||||
|
||||
static int update_one(struct cache_tree *it,
|
||||
struct cache_entry **cache,
|
||||
int entries,
|
||||
@ -378,8 +383,7 @@ static int update_one(struct cache_tree *it,
|
||||
}
|
||||
|
||||
ce_missing_ok = mode == S_IFGITLINK || missing_ok ||
|
||||
(has_promisor_remote() &&
|
||||
ce_skip_worktree(ce));
|
||||
!must_check_existence(ce);
|
||||
if (is_null_oid(oid) ||
|
||||
(!ce_missing_ok && !has_object_file(oid))) {
|
||||
strbuf_release(&buffer);
|
||||
@ -466,6 +470,9 @@ int cache_tree_update(struct index_state *istate, int flags)
|
||||
if (!istate->cache_tree)
|
||||
istate->cache_tree = cache_tree();
|
||||
|
||||
if (!(flags & WRITE_TREE_MISSING_OK) && has_promisor_remote())
|
||||
prefetch_cache_entries(istate, must_check_existence);
|
||||
|
||||
trace_performance_enter();
|
||||
trace2_region_enter("cache_tree", "update", the_repository);
|
||||
i = update_one(istate->cache_tree, istate->cache, istate->cache_nr,
|
||||
|
10
cache.h
10
cache.h
@ -410,6 +410,15 @@ struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_s
|
||||
*/
|
||||
void validate_cache_entries(const struct index_state *istate);
|
||||
|
||||
/*
|
||||
* Bulk prefetch all missing cache entries that are not GITLINKs and that match
|
||||
* the given predicate. This function should only be called if
|
||||
* has_promisor_remote() returns true.
|
||||
*/
|
||||
typedef int (*must_prefetch_predicate)(const struct cache_entry *);
|
||||
void prefetch_cache_entries(const struct index_state *istate,
|
||||
must_prefetch_predicate must_prefetch);
|
||||
|
||||
#ifdef USE_THE_INDEX_COMPATIBILITY_MACROS
|
||||
extern struct index_state the_index;
|
||||
|
||||
@ -1385,6 +1394,7 @@ enum get_oid_result {
|
||||
};
|
||||
|
||||
int repo_get_oid(struct repository *r, const char *str, struct object_id *oid);
|
||||
__attribute__((format (printf, 2, 3)))
|
||||
int get_oidf(struct object_id *oid, const char *fmt, ...);
|
||||
int repo_get_oid_commit(struct repository *r, const char *str, struct object_id *oid);
|
||||
int repo_get_oid_committish(struct repository *r, const char *str, struct object_id *oid);
|
||||
|
167
cbtree.c
Normal file
167
cbtree.c
Normal file
@ -0,0 +1,167 @@
|
||||
/*
|
||||
* crit-bit tree implementation, does no allocations internally
|
||||
* For more information on crit-bit trees: https://cr.yp.to/critbit.html
|
||||
* Based on Adam Langley's adaptation of Dan Bernstein's public domain code
|
||||
* git clone https://github.com/agl/critbit.git
|
||||
*/
|
||||
#include "cbtree.h"
|
||||
|
||||
static struct cb_node *cb_node_of(const void *p)
|
||||
{
|
||||
return (struct cb_node *)((uintptr_t)p - 1);
|
||||
}
|
||||
|
||||
/* locate the best match, does not do a final comparision */
|
||||
static struct cb_node *cb_internal_best_match(struct cb_node *p,
|
||||
const uint8_t *k, size_t klen)
|
||||
{
|
||||
while (1 & (uintptr_t)p) {
|
||||
struct cb_node *q = cb_node_of(p);
|
||||
uint8_t c = q->byte < klen ? k[q->byte] : 0;
|
||||
size_t direction = (1 + (q->otherbits | c)) >> 8;
|
||||
|
||||
p = q->child[direction];
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
/* returns NULL if successful, existing cb_node if duplicate */
|
||||
struct cb_node *cb_insert(struct cb_tree *t, struct cb_node *node, size_t klen)
|
||||
{
|
||||
size_t newbyte, newotherbits;
|
||||
uint8_t c;
|
||||
int newdirection;
|
||||
struct cb_node **wherep, *p;
|
||||
|
||||
assert(!((uintptr_t)node & 1)); /* allocations must be aligned */
|
||||
|
||||
if (!t->root) { /* insert into empty tree */
|
||||
t->root = node;
|
||||
return NULL; /* success */
|
||||
}
|
||||
|
||||
/* see if a node already exists */
|
||||
p = cb_internal_best_match(t->root, node->k, klen);
|
||||
|
||||
/* find first differing byte */
|
||||
for (newbyte = 0; newbyte < klen; newbyte++) {
|
||||
if (p->k[newbyte] != node->k[newbyte])
|
||||
goto different_byte_found;
|
||||
}
|
||||
return p; /* element exists, let user deal with it */
|
||||
|
||||
different_byte_found:
|
||||
newotherbits = p->k[newbyte] ^ node->k[newbyte];
|
||||
newotherbits |= newotherbits >> 1;
|
||||
newotherbits |= newotherbits >> 2;
|
||||
newotherbits |= newotherbits >> 4;
|
||||
newotherbits = (newotherbits & ~(newotherbits >> 1)) ^ 255;
|
||||
c = p->k[newbyte];
|
||||
newdirection = (1 + (newotherbits | c)) >> 8;
|
||||
|
||||
node->byte = newbyte;
|
||||
node->otherbits = newotherbits;
|
||||
node->child[1 - newdirection] = node;
|
||||
|
||||
/* find a place to insert it */
|
||||
wherep = &t->root;
|
||||
for (;;) {
|
||||
struct cb_node *q;
|
||||
size_t direction;
|
||||
|
||||
p = *wherep;
|
||||
if (!(1 & (uintptr_t)p))
|
||||
break;
|
||||
q = cb_node_of(p);
|
||||
if (q->byte > newbyte)
|
||||
break;
|
||||
if (q->byte == newbyte && q->otherbits > newotherbits)
|
||||
break;
|
||||
c = q->byte < klen ? node->k[q->byte] : 0;
|
||||
direction = (1 + (q->otherbits | c)) >> 8;
|
||||
wherep = q->child + direction;
|
||||
}
|
||||
|
||||
node->child[newdirection] = *wherep;
|
||||
*wherep = (struct cb_node *)(1 + (uintptr_t)node);
|
||||
|
||||
return NULL; /* success */
|
||||
}
|
||||
|
||||
struct cb_node *cb_lookup(struct cb_tree *t, const uint8_t *k, size_t klen)
|
||||
{
|
||||
struct cb_node *p = cb_internal_best_match(t->root, k, klen);
|
||||
|
||||
return p && !memcmp(p->k, k, klen) ? p : NULL;
|
||||
}
|
||||
|
||||
struct cb_node *cb_unlink(struct cb_tree *t, const uint8_t *k, size_t klen)
|
||||
{
|
||||
struct cb_node **wherep = &t->root;
|
||||
struct cb_node **whereq = NULL;
|
||||
struct cb_node *q = NULL;
|
||||
size_t direction = 0;
|
||||
uint8_t c;
|
||||
struct cb_node *p = t->root;
|
||||
|
||||
if (!p) return NULL; /* empty tree, nothing to delete */
|
||||
|
||||
/* traverse to find best match, keeping link to parent */
|
||||
while (1 & (uintptr_t)p) {
|
||||
whereq = wherep;
|
||||
q = cb_node_of(p);
|
||||
c = q->byte < klen ? k[q->byte] : 0;
|
||||
direction = (1 + (q->otherbits | c)) >> 8;
|
||||
wherep = q->child + direction;
|
||||
p = *wherep;
|
||||
}
|
||||
|
||||
if (memcmp(p->k, k, klen))
|
||||
return NULL; /* no match, nothing unlinked */
|
||||
|
||||
/* found an exact match */
|
||||
if (whereq) /* update parent */
|
||||
*whereq = q->child[1 - direction];
|
||||
else
|
||||
t->root = NULL;
|
||||
return p;
|
||||
}
|
||||
|
||||
static enum cb_next cb_descend(struct cb_node *p, cb_iter fn, void *arg)
|
||||
{
|
||||
if (1 & (uintptr_t)p) {
|
||||
struct cb_node *q = cb_node_of(p);
|
||||
enum cb_next n = cb_descend(q->child[0], fn, arg);
|
||||
|
||||
return n == CB_BREAK ? n : cb_descend(q->child[1], fn, arg);
|
||||
} else {
|
||||
return fn(p, arg);
|
||||
}
|
||||
}
|
||||
|
||||
void cb_each(struct cb_tree *t, const uint8_t *kpfx, size_t klen,
|
||||
cb_iter fn, void *arg)
|
||||
{
|
||||
struct cb_node *p = t->root;
|
||||
struct cb_node *top = p;
|
||||
size_t i = 0;
|
||||
|
||||
if (!p) return; /* empty tree */
|
||||
|
||||
/* Walk tree, maintaining top pointer */
|
||||
while (1 & (uintptr_t)p) {
|
||||
struct cb_node *q = cb_node_of(p);
|
||||
uint8_t c = q->byte < klen ? kpfx[q->byte] : 0;
|
||||
size_t direction = (1 + (q->otherbits | c)) >> 8;
|
||||
|
||||
p = q->child[direction];
|
||||
if (q->byte < klen)
|
||||
top = p;
|
||||
}
|
||||
|
||||
for (i = 0; i < klen; i++) {
|
||||
if (p->k[i] != kpfx[i])
|
||||
return; /* "best" match failed */
|
||||
}
|
||||
cb_descend(top, fn, arg);
|
||||
}
|
56
cbtree.h
Normal file
56
cbtree.h
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* crit-bit tree implementation, does no allocations internally
|
||||
* For more information on crit-bit trees: https://cr.yp.to/critbit.html
|
||||
* Based on Adam Langley's adaptation of Dan Bernstein's public domain code
|
||||
* git clone https://github.com/agl/critbit.git
|
||||
*
|
||||
* This is adapted to store arbitrary data (not just NUL-terminated C strings
|
||||
* and allocates no memory internally. The user needs to allocate
|
||||
* "struct cb_node" and fill cb_node.k[] with arbitrary match data
|
||||
* for memcmp.
|
||||
* If "klen" is variable, then it should be embedded into "c_node.k[]"
|
||||
* Recursion is bound by the maximum value of "klen" used.
|
||||
*/
|
||||
#ifndef CBTREE_H
|
||||
#define CBTREE_H
|
||||
|
||||
#include "git-compat-util.h"
|
||||
|
||||
struct cb_node;
|
||||
struct cb_node {
|
||||
struct cb_node *child[2];
|
||||
/*
|
||||
* n.b. uint32_t for `byte' is excessive for OIDs,
|
||||
* we may consider shorter variants if nothing else gets stored.
|
||||
*/
|
||||
uint32_t byte;
|
||||
uint8_t otherbits;
|
||||
uint8_t k[FLEX_ARRAY]; /* arbitrary data */
|
||||
};
|
||||
|
||||
struct cb_tree {
|
||||
struct cb_node *root;
|
||||
};
|
||||
|
||||
enum cb_next {
|
||||
CB_CONTINUE = 0,
|
||||
CB_BREAK = 1
|
||||
};
|
||||
|
||||
#define CBTREE_INIT { .root = NULL }
|
||||
|
||||
static inline void cb_init(struct cb_tree *t)
|
||||
{
|
||||
t->root = NULL;
|
||||
}
|
||||
|
||||
struct cb_node *cb_lookup(struct cb_tree *, const uint8_t *k, size_t klen);
|
||||
struct cb_node *cb_insert(struct cb_tree *, struct cb_node *, size_t klen);
|
||||
struct cb_node *cb_unlink(struct cb_tree *t, const uint8_t *k, size_t klen);
|
||||
|
||||
typedef enum cb_next (*cb_iter)(struct cb_node *, void *arg);
|
||||
|
||||
void cb_each(struct cb_tree *, const uint8_t *kpfx, size_t klen,
|
||||
cb_iter, void *arg);
|
||||
|
||||
#endif /* CBTREE_H */
|
@ -58,9 +58,11 @@ void add_chunk(struct chunkfile *cf,
|
||||
|
||||
int write_chunkfile(struct chunkfile *cf, void *data)
|
||||
{
|
||||
int i;
|
||||
int i, result = 0;
|
||||
uint64_t cur_offset = hashfile_total(cf->f);
|
||||
|
||||
trace2_region_enter("chunkfile", "write", the_repository);
|
||||
|
||||
/* Add the table of contents to the current offset */
|
||||
cur_offset += (cf->chunks_nr + 1) * CHUNK_TOC_ENTRY_SIZE;
|
||||
|
||||
@ -77,10 +79,10 @@ int write_chunkfile(struct chunkfile *cf, void *data)
|
||||
|
||||
for (i = 0; i < cf->chunks_nr; i++) {
|
||||
off_t start_offset = hashfile_total(cf->f);
|
||||
int result = cf->chunks[i].write_fn(cf->f, data);
|
||||
result = cf->chunks[i].write_fn(cf->f, data);
|
||||
|
||||
if (result)
|
||||
return result;
|
||||
goto cleanup;
|
||||
|
||||
if (hashfile_total(cf->f) - start_offset != cf->chunks[i].size)
|
||||
BUG("expected to write %"PRId64" bytes to chunk %"PRIx32", but wrote %"PRId64" instead",
|
||||
@ -88,7 +90,9 @@ int write_chunkfile(struct chunkfile *cf, void *data)
|
||||
hashfile_total(cf->f) - start_offset);
|
||||
}
|
||||
|
||||
return 0;
|
||||
cleanup:
|
||||
trace2_region_leave("chunkfile", "write", the_repository);
|
||||
return result;
|
||||
}
|
||||
|
||||
int read_table_of_contents(struct chunkfile *cf,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user