2005-05-14 07:50:32 +02:00
|
|
|
#!/bin/sh
|
|
|
|
#
|
|
|
|
# Copyright (c) 2005 Junio C Hamano
|
|
|
|
#
|
|
|
|
|
|
|
|
test_description='Test the very basics part #1.
|
|
|
|
|
|
|
|
The rest of the test suite does not check the basic operation of git
|
|
|
|
plumbing commands to work very carefully. Their job is to concentrate
|
|
|
|
on tricky features that caused bugs in the past to detect regression.
|
|
|
|
|
|
|
|
This test runs very basic features, like registering things in cache,
|
|
|
|
writing tree, etc.
|
|
|
|
|
|
|
|
Note that this test *deliberately* hard-codes many expected object
|
|
|
|
IDs. When object ID computation changes, like in the previous case of
|
|
|
|
swapping compression and hashing order, the person who is making the
|
|
|
|
modification *should* take notice and update the test vectors here.
|
|
|
|
'
|
2005-12-11 05:55:32 +01:00
|
|
|
|
2005-05-14 07:50:32 +02:00
|
|
|
. ./test-lib.sh
|
|
|
|
|
2019-11-14 01:52:15 +01:00
|
|
|
try_local_xy () {
|
|
|
|
local x="local" y="alsolocal" &&
|
|
|
|
echo "$x $y"
|
2017-10-26 10:18:53 +02:00
|
|
|
}
|
|
|
|
|
2019-08-08 11:37:33 +02:00
|
|
|
# Check whether the shell supports the "local" keyword. "local" is not
|
2017-10-26 10:18:53 +02:00
|
|
|
# POSIX-standard, but it is very widely supported by POSIX-compliant
|
2019-08-08 11:37:33 +02:00
|
|
|
# shells, and we rely on it within Git's test framework.
|
2017-10-26 10:18:53 +02:00
|
|
|
#
|
2019-08-08 11:37:33 +02:00
|
|
|
# If your shell fails this test, the results of other tests may be
|
|
|
|
# unreliable. You may wish to report the problem to the Git mailing
|
|
|
|
# list <git@vger.kernel.org>, as it could cause us to reconsider
|
|
|
|
# relying on "local".
|
2017-10-26 10:18:53 +02:00
|
|
|
test_expect_success 'verify that the running shell supports "local"' '
|
|
|
|
x="notlocal" &&
|
2019-11-14 01:52:15 +01:00
|
|
|
y="alsonotlocal" &&
|
|
|
|
echo "local alsolocal" >expected1 &&
|
|
|
|
try_local_xy >actual1 &&
|
2017-10-26 10:18:53 +02:00
|
|
|
test_cmp expected1 actual1 &&
|
2019-11-14 01:52:15 +01:00
|
|
|
echo "notlocal alsonotlocal" >expected2 &&
|
|
|
|
echo "$x $y" >actual2 &&
|
2017-10-26 10:18:53 +02:00
|
|
|
test_cmp expected2 actual2
|
|
|
|
'
|
|
|
|
|
2005-05-14 07:50:32 +02:00
|
|
|
################################################################
|
2007-07-03 07:52:14 +02:00
|
|
|
# git init has been done in an empty repository.
|
2005-05-14 07:50:32 +02:00
|
|
|
# make sure it is empty.
|
|
|
|
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success '.git/objects should be empty after git init in an empty repo' '
|
|
|
|
find .git/objects -type f -print >should-be-empty &&
|
|
|
|
test_line_count = 0 should-be-empty
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2005-10-09 11:30:17 +02:00
|
|
|
# also it should have 2 subdirectories; no fan-out anymore, pack, and info.
|
|
|
|
# 3 is counting "objects" itself
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success '.git/objects should have 3 subdirectories' '
|
|
|
|
find .git/objects -type d -print >full-of-directories &&
|
|
|
|
test_line_count = 3 full-of-directories
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2008-02-01 10:50:53 +01:00
|
|
|
################################################################
|
|
|
|
# Test harness
|
|
|
|
test_expect_success 'success is reported like this' '
|
2012-03-02 10:08:28 +01:00
|
|
|
:
|
2008-02-01 10:50:53 +01:00
|
|
|
'
|
2010-08-19 18:08:12 +02:00
|
|
|
|
2014-04-30 11:50:44 +02:00
|
|
|
_run_sub_test_lib_test_common () {
|
|
|
|
neg="$1" name="$2" descr="$3" # stdin is the body of the test code
|
|
|
|
shift 3
|
2012-12-16 19:28:13 +01:00
|
|
|
mkdir "$name" &&
|
|
|
|
(
|
t0000: simplify HARNESS_ACTIVE hack
Commit 517cd55 set HARNESS_ACTIVE unconditionally in
sub-tests, because that value affects the output of
"--verbose". t0000 needs stable output from its sub-tests,
and we may or may not be running under a TAP harness.
That commit made the decision to always set the variable,
since it has another useful side effect, which is
suppressing writes to t/test-results by the sub-tests (which
would just pollute the real results).
Since the last commit, though, the sub-tests have their own
test-results directories, so this is no longer an issue. We
can now update a few comments that are no longer accurate
nor necessary.
We can also revisit the choice of HARNESS_ACTIVE. Since we
must choose one value for stability, it's probably saner to
have it off. This means that future patches could test
things like the test-results writing, or the "--quiet"
option, which is currently ignored when run under a harness.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-12-28 10:31:49 +01:00
|
|
|
# Pretend we're not running under a test harness, whether we
|
|
|
|
# are or not. The test-lib output depends on the setting of
|
|
|
|
# this variable, so we need a stable setting under which to run
|
|
|
|
# the sub-test.
|
|
|
|
sane_unset HARNESS_ACTIVE &&
|
2012-12-16 19:28:13 +01:00
|
|
|
cd "$name" &&
|
2020-05-07 03:07:46 +02:00
|
|
|
write_script "$name.sh" "$TEST_SHELL_PATH" <<-EOF &&
|
2012-12-16 19:28:13 +01:00
|
|
|
test_description='$descr (run in sub test-lib)
|
|
|
|
|
|
|
|
This is run in a sub test-lib so that we do not get incorrect
|
|
|
|
passing metrics
|
|
|
|
'
|
|
|
|
|
t0000: do not get self-test disrupted by environment warnings
The test framework test-lib.sh itself would want to give warnings
and hints, e.g. when it sees a deprecated environment variable is in
use that we want to encourage users to migrate to another variable.
The self-test of test framework done in t0000 however do not expect
to see these warnings and hints, so depending on the settings of
environment variables, a running test may or may not produce these
messages to the standard error output, breaking the expectations of
self-test test framework does on itself. Here is what we see:
$ TEST_GIT_INDEX_VERSION=4 sh t0000-basic.sh -i -v
...
'err' is not empty, it contains:
warning: TEST_GIT_INDEX_VERSION is now GIT_TEST_INDEX_VERSION
hint: set GIT_TEST_INDEX_VERSION too during the transition period
not ok 5 - pretend we have a fully passing test suite
The following quick attempt to work it around does not work, because
some tests in t0000 do want to see expected errors from the test
framework itself.
t/t0000-basic.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh
index 850f651e4e..88c6ed4696 100755
--- a/t/t0000-basic.sh
+++ b/t/t0000-basic.sh
@@ -88,7 +88,7 @@ _run_sub_test_lib_test_common () {
'
# Point to the t/test-lib.sh, which isn't in ../ as usual
- . "\$TEST_DIRECTORY"/test-lib.sh
+ . "\$TEST_DIRECTORY"/test-lib.sh >/dev/null 2>&1
EOF
cat >>"$name.sh" &&
chmod +x "$name.sh" &&
There are a few possible ways to work this around:
* We could strip the warning: and hint: unconditionally from the
error output before the error messages are checked in the
self-test (helper functions check_sub_test_lib_test_err and
check_sub_test_lib_test); the problem with this approach is that
it will make it impossible to write self-tests to ensure that
right warnings and hints are given.
* We could force a sane environment settings before the test helper
_run_sub_test_lib_test_common dot-sources test-lib.sh; the
problem with this approach is that _run_sub_test_lib_test_common
now needs to be aware of what pairs of environment variables are
checked in test-lib.sh using check_var_migration helper.
The final patch I came up with is probably the solution that is
least bad. Set a variable to tell test-lib.sh that we are running
a self-test, so that various pieces in test-lib.sh can react to keep
the output stable.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-09-20 20:43:43 +02:00
|
|
|
# Tell the framework that we are self-testing to make sure
|
|
|
|
# it yields a stable result.
|
|
|
|
GIT_TEST_FRAMEWORK_SELFTEST=t &&
|
|
|
|
|
2012-12-16 19:28:13 +01:00
|
|
|
# Point to the t/test-lib.sh, which isn't in ../ as usual
|
|
|
|
. "\$TEST_DIRECTORY"/test-lib.sh
|
|
|
|
EOF
|
|
|
|
cat >>"$name.sh" &&
|
|
|
|
export TEST_DIRECTORY &&
|
2013-12-28 10:29:15 +01:00
|
|
|
TEST_OUTPUT_DIRECTORY=$(pwd) &&
|
|
|
|
export TEST_OUTPUT_DIRECTORY &&
|
2020-04-28 10:14:21 +02:00
|
|
|
sane_unset GIT_TEST_FAIL_PREREQS &&
|
2014-04-30 11:50:44 +02:00
|
|
|
if test -z "$neg"
|
|
|
|
then
|
|
|
|
./"$name.sh" "$@" >out 2>err
|
|
|
|
else
|
2020-05-07 03:07:46 +02:00
|
|
|
! ./"$name.sh" "$@" >out 2>err
|
2014-04-30 11:50:44 +02:00
|
|
|
fi
|
2012-12-16 19:28:13 +01:00
|
|
|
)
|
|
|
|
}
|
2012-03-02 10:08:28 +01:00
|
|
|
|
2014-04-30 11:50:44 +02:00
|
|
|
run_sub_test_lib_test () {
|
|
|
|
_run_sub_test_lib_test_common '' "$@"
|
|
|
|
}
|
|
|
|
|
|
|
|
run_sub_test_lib_test_err () {
|
|
|
|
_run_sub_test_lib_test_common '!' "$@"
|
|
|
|
}
|
|
|
|
|
2012-12-16 19:28:13 +01:00
|
|
|
check_sub_test_lib_test () {
|
|
|
|
name="$1" # stdin is the expected output from the test
|
|
|
|
(
|
|
|
|
cd "$name" &&
|
2018-08-19 23:57:22 +02:00
|
|
|
test_must_be_empty err &&
|
2012-12-16 19:28:13 +01:00
|
|
|
sed -e 's/^> //' -e 's/Z$//' >expect &&
|
|
|
|
test_cmp expect out
|
|
|
|
)
|
|
|
|
}
|
2012-03-02 10:08:28 +01:00
|
|
|
|
2014-04-30 11:50:44 +02:00
|
|
|
check_sub_test_lib_test_err () {
|
2016-05-06 14:36:46 +02:00
|
|
|
name="$1" # stdin is the expected output from the test
|
2019-11-05 18:07:24 +01:00
|
|
|
# expected error output is in descriptor 3
|
2014-04-30 11:50:44 +02:00
|
|
|
(
|
|
|
|
cd "$name" &&
|
|
|
|
sed -e 's/^> //' -e 's/Z$//' >expect.out &&
|
|
|
|
test_cmp expect.out out &&
|
|
|
|
sed -e 's/^> //' -e 's/Z$//' <&3 >expect.err &&
|
|
|
|
test_cmp expect.err err
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2012-12-16 19:28:14 +01:00
|
|
|
test_expect_success 'pretend we have a fully passing test suite' "
|
|
|
|
run_sub_test_lib_test full-pass '3 passing tests' <<-\\EOF &&
|
|
|
|
for i in 1 2 3
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test full-pass <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> # passed all 3 test(s)
|
|
|
|
> 1..3
|
|
|
|
EOF
|
|
|
|
"
|
2012-03-02 10:08:28 +01:00
|
|
|
|
2012-12-16 19:28:14 +01:00
|
|
|
test_expect_success 'pretend we have a partially passing test suite' "
|
2019-12-20 19:15:49 +01:00
|
|
|
run_sub_test_lib_test_err \
|
2012-12-16 19:28:14 +01:00
|
|
|
partial-pass '2/3 tests passing' <<-\\EOF &&
|
|
|
|
test_expect_success 'passing test #1' 'true'
|
|
|
|
test_expect_success 'failing test #2' 'false'
|
|
|
|
test_expect_success 'passing test #3' 'true'
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test partial-pass <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> not ok 2 - failing test #2
|
|
|
|
# false
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> # failed 1 among 3 test(s)
|
|
|
|
> 1..3
|
|
|
|
EOF
|
|
|
|
"
|
2012-03-02 10:08:28 +01:00
|
|
|
|
2012-12-16 19:28:14 +01:00
|
|
|
test_expect_success 'pretend we have a known breakage' "
|
|
|
|
run_sub_test_lib_test failing-todo 'A failing TODO test' <<-\\EOF &&
|
|
|
|
test_expect_success 'passing test' 'true'
|
|
|
|
test_expect_failure 'pretend we have a known breakage' 'false'
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test failing-todo <<-\\EOF
|
|
|
|
> ok 1 - passing test
|
|
|
|
> not ok 2 - pretend we have a known breakage # TODO known breakage
|
|
|
|
> # still have 1 known breakage(s)
|
|
|
|
> # passed all remaining 1 test(s)
|
|
|
|
> 1..2
|
|
|
|
EOF
|
|
|
|
"
|
2012-03-02 10:08:28 +01:00
|
|
|
|
2012-12-16 19:28:13 +01:00
|
|
|
test_expect_success 'pretend we have fixed a known breakage' "
|
|
|
|
run_sub_test_lib_test passing-todo 'A passing TODO test' <<-\\EOF &&
|
|
|
|
test_expect_failure 'pretend we have fixed a known breakage' 'true'
|
2012-03-02 10:08:28 +01:00
|
|
|
test_done
|
|
|
|
EOF
|
2012-12-16 19:28:13 +01:00
|
|
|
check_sub_test_lib_test passing-todo <<-\\EOF
|
2012-12-16 19:28:15 +01:00
|
|
|
> ok 1 - pretend we have fixed a known breakage # TODO known breakage vanished
|
|
|
|
> # 1 known breakage(s) vanished; please update test(s)
|
2012-03-02 10:08:28 +01:00
|
|
|
> 1..1
|
|
|
|
EOF
|
2010-08-19 18:08:12 +02:00
|
|
|
"
|
2012-12-16 19:28:13 +01:00
|
|
|
|
2012-12-16 19:28:15 +01:00
|
|
|
test_expect_success 'pretend we have fixed one of two known breakages (run in sub test-lib)' "
|
|
|
|
run_sub_test_lib_test partially-passing-todos \
|
|
|
|
'2 TODO tests, one passing' <<-\\EOF &&
|
|
|
|
test_expect_failure 'pretend we have a known breakage' 'false'
|
|
|
|
test_expect_success 'pretend we have a passing test' 'true'
|
|
|
|
test_expect_failure 'pretend we have fixed another known breakage' 'true'
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test partially-passing-todos <<-\\EOF
|
|
|
|
> not ok 1 - pretend we have a known breakage # TODO known breakage
|
|
|
|
> ok 2 - pretend we have a passing test
|
|
|
|
> ok 3 - pretend we have fixed another known breakage # TODO known breakage vanished
|
|
|
|
> # 1 known breakage(s) vanished; please update test(s)
|
|
|
|
> # still have 1 known breakage(s)
|
|
|
|
> # passed all remaining 1 test(s)
|
|
|
|
> 1..3
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
2012-12-16 19:28:14 +01:00
|
|
|
test_expect_success 'pretend we have a pass, fail, and known breakage' "
|
2019-12-20 19:15:49 +01:00
|
|
|
run_sub_test_lib_test_err \
|
2012-12-16 19:28:14 +01:00
|
|
|
mixed-results1 'mixed results #1' <<-\\EOF &&
|
|
|
|
test_expect_success 'passing test' 'true'
|
|
|
|
test_expect_success 'failing test' 'false'
|
|
|
|
test_expect_failure 'pretend we have a known breakage' 'false'
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test mixed-results1 <<-\\EOF
|
|
|
|
> ok 1 - passing test
|
|
|
|
> not ok 2 - failing test
|
|
|
|
> # false
|
|
|
|
> not ok 3 - pretend we have a known breakage # TODO known breakage
|
|
|
|
> # still have 1 known breakage(s)
|
|
|
|
> # failed 1 among remaining 2 test(s)
|
|
|
|
> 1..3
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success 'pretend we have a mix of all possible results' "
|
2019-12-20 19:15:49 +01:00
|
|
|
run_sub_test_lib_test_err \
|
2012-12-16 19:28:14 +01:00
|
|
|
mixed-results2 'mixed results #2' <<-\\EOF &&
|
|
|
|
test_expect_success 'passing test' 'true'
|
|
|
|
test_expect_success 'passing test' 'true'
|
|
|
|
test_expect_success 'passing test' 'true'
|
|
|
|
test_expect_success 'passing test' 'true'
|
|
|
|
test_expect_success 'failing test' 'false'
|
|
|
|
test_expect_success 'failing test' 'false'
|
|
|
|
test_expect_success 'failing test' 'false'
|
|
|
|
test_expect_failure 'pretend we have a known breakage' 'false'
|
|
|
|
test_expect_failure 'pretend we have a known breakage' 'false'
|
|
|
|
test_expect_failure 'pretend we have fixed a known breakage' 'true'
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test mixed-results2 <<-\\EOF
|
|
|
|
> ok 1 - passing test
|
|
|
|
> ok 2 - passing test
|
|
|
|
> ok 3 - passing test
|
|
|
|
> ok 4 - passing test
|
|
|
|
> not ok 5 - failing test
|
|
|
|
> # false
|
|
|
|
> not ok 6 - failing test
|
|
|
|
> # false
|
|
|
|
> not ok 7 - failing test
|
|
|
|
> # false
|
|
|
|
> not ok 8 - pretend we have a known breakage # TODO known breakage
|
|
|
|
> not ok 9 - pretend we have a known breakage # TODO known breakage
|
2012-12-16 19:28:15 +01:00
|
|
|
> ok 10 - pretend we have fixed a known breakage # TODO known breakage vanished
|
|
|
|
> # 1 known breakage(s) vanished; please update test(s)
|
2012-12-16 19:28:14 +01:00
|
|
|
> # still have 2 known breakage(s)
|
2012-12-16 19:28:15 +01:00
|
|
|
> # failed 3 among remaining 7 test(s)
|
2012-12-16 19:28:14 +01:00
|
|
|
> 1..10
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
i18n: make GETTEXT_POISON a runtime option
Change the GETTEXT_POISON compile-time + runtime GIT_GETTEXT_POISON
test parameter to only be a GIT_TEST_GETTEXT_POISON=<non-empty?>
runtime parameter, to be consistent with other parameters documented
in "Running tests with special setups" in t/README.
When I added GETTEXT_POISON in bb946bba76 ("i18n: add GETTEXT_POISON
to simulate unfriendly translator", 2011-02-22) I was concerned with
ensuring that the _() function would get constant folded if NO_GETTEXT
was defined, and likewise that GETTEXT_POISON would be compiled out
unless it was defined.
But as the benchmark in my [1] shows doing a one-off runtime
getenv("GIT_TEST_[...]") is trivial, and since GETTEXT_POISON was
originally added the GIT_TEST_* env variables have become the common
idiom for turning on special test setups.
So change GETTEXT_POISON to work the same way. Now the
GETTEXT_POISON=YesPlease compile-time option is gone, and running the
tests with GIT_TEST_GETTEXT_POISON=[YesPlease|] can be toggled on/off
without recompiling.
This allows for conditionally amending tests to test with/without
poison, similar to what 859fdc0c3c ("commit-graph: define
GIT_TEST_COMMIT_GRAPH", 2018-08-29) did for GIT_TEST_COMMIT_GRAPH. Do
some of that, now we e.g. always run the t0205-gettext-poison.sh test.
I did enough there to remove the GETTEXT_POISON prerequisite, but its
inverse C_LOCALE_OUTPUT is still around, and surely some tests using
it can be converted to e.g. always set GIT_TEST_GETTEXT_POISON=.
Notes on the implementation:
* We still compile a dedicated GETTEXT_POISON build in Travis
CI. Perhaps this should be revisited and integrated into the
"linux-gcc" build, see ae59a4e44f ("travis: run tests with
GIT_TEST_SPLIT_INDEX", 2018-01-07) for prior art in that area. Then
again maybe not, see [2].
* We now skip a test in t0000-basic.sh under
GIT_TEST_GETTEXT_POISON=YesPlease that wasn't skipped before. This
test relies on C locale output, but due to an edge case in how the
previous implementation of GETTEXT_POISON worked (reading it from
GIT-BUILD-OPTIONS) wasn't enabling poison correctly. Now it does,
and needs to be skipped.
* The getenv() function is not reentrant, so out of paranoia about
code of the form:
printf(_("%s"), getenv("some-env"));
call use_gettext_poison() in our early setup in git_setup_gettext()
so we populate the "poison_requested" variable in a codepath that's
won't suffer from that race condition.
* We error out in the Makefile if you're still saying
GETTEXT_POISON=YesPlease to prompt users to change their
invocation.
* We should not print out poisoned messages during the test
initialization itself to keep it more readable, so the test library
hides the variable if set in $GIT_TEST_GETTEXT_POISON_ORIG during
setup. See [3].
See also [4] for more on the motivation behind this patch, and the
history of the GETTEXT_POISON facility.
1. https://public-inbox.org/git/871s8gd32p.fsf@evledraar.gmail.com/
2. https://public-inbox.org/git/20181102163725.GY30222@szeder.dev/
3. https://public-inbox.org/git/20181022202241.18629-2-szeder.dev@gmail.com/
4. https://public-inbox.org/git/878t2pd6yu.fsf@evledraar.gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-08 22:15:29 +01:00
|
|
|
test_expect_success C_LOCALE_OUTPUT 'test --verbose' '
|
2019-12-20 19:15:49 +01:00
|
|
|
run_sub_test_lib_test_err \
|
2019-08-05 23:04:46 +02:00
|
|
|
t1234-verbose "test verbose" --verbose <<-\EOF &&
|
2013-06-23 20:12:55 +02:00
|
|
|
test_expect_success "passing test" true
|
|
|
|
test_expect_success "test with output" "echo foo"
|
|
|
|
test_expect_success "failing test" false
|
|
|
|
test_done
|
|
|
|
EOF
|
2019-08-05 23:04:46 +02:00
|
|
|
mv t1234-verbose/out t1234-verbose/out+ &&
|
|
|
|
grep -v "^Initialized empty" t1234-verbose/out+ >t1234-verbose/out &&
|
|
|
|
check_sub_test_lib_test t1234-verbose <<-\EOF
|
2019-08-05 23:04:47 +02:00
|
|
|
> expecting success of 1234.1 '\''passing test'\'': true
|
2013-06-23 20:12:55 +02:00
|
|
|
> ok 1 - passing test
|
|
|
|
> Z
|
2019-08-05 23:04:47 +02:00
|
|
|
> expecting success of 1234.2 '\''test with output'\'': echo foo
|
2013-06-23 20:12:55 +02:00
|
|
|
> foo
|
|
|
|
> ok 2 - test with output
|
|
|
|
> Z
|
2019-08-05 23:04:47 +02:00
|
|
|
> expecting success of 1234.3 '\''failing test'\'': false
|
2013-06-23 20:12:55 +02:00
|
|
|
> not ok 3 - failing test
|
|
|
|
> # false
|
|
|
|
> Z
|
|
|
|
> # failed 1 among 3 test(s)
|
|
|
|
> 1..3
|
|
|
|
EOF
|
|
|
|
'
|
|
|
|
|
2013-06-23 20:12:56 +02:00
|
|
|
test_expect_success 'test --verbose-only' '
|
2019-12-20 19:15:49 +01:00
|
|
|
run_sub_test_lib_test_err \
|
2019-08-05 23:04:46 +02:00
|
|
|
t2345-verbose-only-2 "test verbose-only=2" \
|
2013-06-23 20:12:56 +02:00
|
|
|
--verbose-only=2 <<-\EOF &&
|
|
|
|
test_expect_success "passing test" true
|
|
|
|
test_expect_success "test with output" "echo foo"
|
|
|
|
test_expect_success "failing test" false
|
|
|
|
test_done
|
|
|
|
EOF
|
2019-08-05 23:04:46 +02:00
|
|
|
check_sub_test_lib_test t2345-verbose-only-2 <<-\EOF
|
2013-06-23 20:12:56 +02:00
|
|
|
> ok 1 - passing test
|
|
|
|
> Z
|
2019-08-05 23:04:47 +02:00
|
|
|
> expecting success of 2345.2 '\''test with output'\'': echo foo
|
2013-06-23 20:12:56 +02:00
|
|
|
> foo
|
|
|
|
> ok 2 - test with output
|
|
|
|
> Z
|
|
|
|
> not ok 3 - failing test
|
|
|
|
> # false
|
|
|
|
> # failed 1 among 3 test(s)
|
|
|
|
> 1..3
|
|
|
|
EOF
|
|
|
|
'
|
|
|
|
|
2014-04-30 11:50:43 +02:00
|
|
|
test_expect_success 'GIT_SKIP_TESTS' "
|
2014-05-21 01:33:46 +02:00
|
|
|
(
|
|
|
|
GIT_SKIP_TESTS='git.2' && export GIT_SKIP_TESTS &&
|
2014-04-30 11:50:43 +02:00
|
|
|
run_sub_test_lib_test git-skip-tests-basic \
|
2014-05-21 01:33:46 +02:00
|
|
|
'GIT_SKIP_TESTS' <<-\\EOF &&
|
|
|
|
for i in 1 2 3
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test git-skip-tests-basic <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 # skip passing test #2 (GIT_SKIP_TESTS)
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> # passed all 3 test(s)
|
|
|
|
> 1..3
|
|
|
|
EOF
|
|
|
|
)
|
2014-04-30 11:50:43 +02:00
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success 'GIT_SKIP_TESTS several tests' "
|
2014-05-21 01:33:46 +02:00
|
|
|
(
|
|
|
|
GIT_SKIP_TESTS='git.2 git.5' && export GIT_SKIP_TESTS &&
|
2014-04-30 11:50:43 +02:00
|
|
|
run_sub_test_lib_test git-skip-tests-several \
|
2014-05-21 01:33:46 +02:00
|
|
|
'GIT_SKIP_TESTS several tests' <<-\\EOF &&
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test git-skip-tests-several <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 # skip passing test #2 (GIT_SKIP_TESTS)
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> ok 4 - passing test #4
|
|
|
|
> ok 5 # skip passing test #5 (GIT_SKIP_TESTS)
|
|
|
|
> ok 6 - passing test #6
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
)
|
2014-04-30 11:50:43 +02:00
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success 'GIT_SKIP_TESTS sh pattern' "
|
2014-05-21 01:33:46 +02:00
|
|
|
(
|
|
|
|
GIT_SKIP_TESTS='git.[2-5]' && export GIT_SKIP_TESTS &&
|
2014-04-30 11:50:43 +02:00
|
|
|
run_sub_test_lib_test git-skip-tests-sh-pattern \
|
2014-05-21 01:33:46 +02:00
|
|
|
'GIT_SKIP_TESTS sh pattern' <<-\\EOF &&
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test git-skip-tests-sh-pattern <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 # skip passing test #2 (GIT_SKIP_TESTS)
|
|
|
|
> ok 3 # skip passing test #3 (GIT_SKIP_TESTS)
|
|
|
|
> ok 4 # skip passing test #4 (GIT_SKIP_TESTS)
|
|
|
|
> ok 5 # skip passing test #5 (GIT_SKIP_TESTS)
|
|
|
|
> ok 6 - passing test #6
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
)
|
2014-04-30 11:50:43 +02:00
|
|
|
"
|
|
|
|
|
2019-10-08 11:22:47 +02:00
|
|
|
test_expect_success 'GIT_SKIP_TESTS entire suite' "
|
|
|
|
(
|
|
|
|
GIT_SKIP_TESTS='git' && export GIT_SKIP_TESTS &&
|
|
|
|
run_sub_test_lib_test git-skip-tests-entire-suite \
|
|
|
|
'GIT_SKIP_TESTS entire suite' <<-\\EOF &&
|
|
|
|
for i in 1 2 3
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test git-skip-tests-entire-suite <<-\\EOF
|
|
|
|
> 1..0 # SKIP skip all tests in git
|
|
|
|
EOF
|
|
|
|
)
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success 'GIT_SKIP_TESTS does not skip unmatched suite' "
|
|
|
|
(
|
|
|
|
GIT_SKIP_TESTS='notgit' && export GIT_SKIP_TESTS &&
|
|
|
|
run_sub_test_lib_test git-skip-tests-unmatched-suite \
|
|
|
|
'GIT_SKIP_TESTS does not skip unmatched suite' <<-\\EOF &&
|
|
|
|
for i in 1 2 3
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test git-skip-tests-unmatched-suite <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> # passed all 3 test(s)
|
|
|
|
> 1..3
|
|
|
|
EOF
|
|
|
|
)
|
|
|
|
"
|
|
|
|
|
2014-04-30 11:50:44 +02:00
|
|
|
test_expect_success '--run basic' "
|
|
|
|
run_sub_test_lib_test run-basic \
|
2020-10-18 02:23:45 +02:00
|
|
|
'--run basic' --run='1,3,5' <<-\\EOF &&
|
2014-04-30 11:50:44 +02:00
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-basic <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 # skip passing test #2 (--run)
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> ok 4 # skip passing test #4 (--run)
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 # skip passing test #6 (--run)
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run with a range' "
|
|
|
|
run_sub_test_lib_test run-range \
|
|
|
|
'--run with a range' --run='1-3' <<-\\EOF &&
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-range <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> ok 4 # skip passing test #4 (--run)
|
|
|
|
> ok 5 # skip passing test #5 (--run)
|
|
|
|
> ok 6 # skip passing test #6 (--run)
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run with two ranges' "
|
|
|
|
run_sub_test_lib_test run-two-ranges \
|
2020-10-18 02:23:45 +02:00
|
|
|
'--run with two ranges' --run='1-2,5-6' <<-\\EOF &&
|
2014-04-30 11:50:44 +02:00
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-two-ranges <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 # skip passing test #3 (--run)
|
|
|
|
> ok 4 # skip passing test #4 (--run)
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 - passing test #6
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run with a left open range' "
|
|
|
|
run_sub_test_lib_test run-left-open-range \
|
|
|
|
'--run with a left open range' --run='-3' <<-\\EOF &&
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-left-open-range <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> ok 4 # skip passing test #4 (--run)
|
|
|
|
> ok 5 # skip passing test #5 (--run)
|
|
|
|
> ok 6 # skip passing test #6 (--run)
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run with a right open range' "
|
|
|
|
run_sub_test_lib_test run-right-open-range \
|
|
|
|
'--run with a right open range' --run='4-' <<-\\EOF &&
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-right-open-range <<-\\EOF
|
|
|
|
> ok 1 # skip passing test #1 (--run)
|
|
|
|
> ok 2 # skip passing test #2 (--run)
|
|
|
|
> ok 3 # skip passing test #3 (--run)
|
|
|
|
> ok 4 - passing test #4
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 - passing test #6
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run with basic negation' "
|
|
|
|
run_sub_test_lib_test run-basic-neg \
|
|
|
|
'--run with basic negation' --run='"'!3'"' <<-\\EOF &&
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-basic-neg <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 # skip passing test #3 (--run)
|
|
|
|
> ok 4 - passing test #4
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 - passing test #6
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run with two negations' "
|
|
|
|
run_sub_test_lib_test run-two-neg \
|
2020-10-18 02:23:45 +02:00
|
|
|
'--run with two negations' --run='"'!3,!6'"' <<-\\EOF &&
|
2014-04-30 11:50:44 +02:00
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-two-neg <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 # skip passing test #3 (--run)
|
|
|
|
> ok 4 - passing test #4
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 # skip passing test #6 (--run)
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run a range and negation' "
|
|
|
|
run_sub_test_lib_test run-range-and-neg \
|
2020-10-18 02:23:45 +02:00
|
|
|
'--run a range and negation' --run='"'-4,!2'"' <<-\\EOF &&
|
2014-04-30 11:50:44 +02:00
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-range-and-neg <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 # skip passing test #2 (--run)
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> ok 4 - passing test #4
|
|
|
|
> ok 5 # skip passing test #5 (--run)
|
|
|
|
> ok 6 # skip passing test #6 (--run)
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run range negation' "
|
|
|
|
run_sub_test_lib_test run-range-neg \
|
|
|
|
'--run range negation' --run='"'!1-3'"' <<-\\EOF &&
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-range-neg <<-\\EOF
|
|
|
|
> ok 1 # skip passing test #1 (--run)
|
|
|
|
> ok 2 # skip passing test #2 (--run)
|
|
|
|
> ok 3 # skip passing test #3 (--run)
|
|
|
|
> ok 4 - passing test #4
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 - passing test #6
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run include, exclude and include' "
|
|
|
|
run_sub_test_lib_test run-inc-neg-inc \
|
|
|
|
'--run include, exclude and include' \
|
2020-10-18 02:23:45 +02:00
|
|
|
--run='"'1-5,!1-3,2'"' <<-\\EOF &&
|
2014-04-30 11:50:44 +02:00
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-inc-neg-inc <<-\\EOF
|
|
|
|
> ok 1 # skip passing test #1 (--run)
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 # skip passing test #3 (--run)
|
|
|
|
> ok 4 - passing test #4
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 # skip passing test #6 (--run)
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run include, exclude and include, comma separated' "
|
|
|
|
run_sub_test_lib_test run-inc-neg-inc-comma \
|
|
|
|
'--run include, exclude and include, comma separated' \
|
|
|
|
--run=1-5,\!1-3,2 <<-\\EOF &&
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-inc-neg-inc-comma <<-\\EOF
|
|
|
|
> ok 1 # skip passing test #1 (--run)
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 # skip passing test #3 (--run)
|
|
|
|
> ok 4 - passing test #4
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 # skip passing test #6 (--run)
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run exclude and include' "
|
|
|
|
run_sub_test_lib_test run-neg-inc \
|
|
|
|
'--run exclude and include' \
|
2020-10-18 02:23:45 +02:00
|
|
|
--run='"'!3-,5'"' <<-\\EOF &&
|
2014-04-30 11:50:44 +02:00
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-neg-inc <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 - passing test #2
|
|
|
|
> ok 3 # skip passing test #3 (--run)
|
|
|
|
> ok 4 # skip passing test #4 (--run)
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 # skip passing test #6 (--run)
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run empty selectors' "
|
|
|
|
run_sub_test_lib_test run-empty-sel \
|
|
|
|
'--run empty selectors' \
|
|
|
|
--run='1,,3,,,5' <<-\\EOF &&
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"passing test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-empty-sel <<-\\EOF
|
|
|
|
> ok 1 - passing test #1
|
|
|
|
> ok 2 # skip passing test #2 (--run)
|
|
|
|
> ok 3 - passing test #3
|
|
|
|
> ok 4 # skip passing test #4 (--run)
|
|
|
|
> ok 5 - passing test #5
|
|
|
|
> ok 6 # skip passing test #6 (--run)
|
|
|
|
> # passed all 6 test(s)
|
|
|
|
> 1..6
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
2020-10-18 02:23:45 +02:00
|
|
|
test_expect_success '--run substring selector' "
|
|
|
|
run_sub_test_lib_test run-substring-selector \
|
|
|
|
'--run empty selectors' \
|
|
|
|
--run='relevant' <<-\\EOF &&
|
|
|
|
test_expect_success \"relevant test\" 'true'
|
|
|
|
for i in 1 2 3 4 5 6
|
|
|
|
do
|
|
|
|
test_expect_success \"other test #\$i\" 'true'
|
|
|
|
done
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test run-substring-selector <<-\\EOF
|
|
|
|
> ok 1 - relevant test
|
|
|
|
> ok 2 # skip other test #1 (--run)
|
|
|
|
> ok 3 # skip other test #2 (--run)
|
|
|
|
> ok 4 # skip other test #3 (--run)
|
|
|
|
> ok 5 # skip other test #4 (--run)
|
|
|
|
> ok 6 # skip other test #5 (--run)
|
|
|
|
> ok 7 # skip other test #6 (--run)
|
|
|
|
> # passed all 7 test(s)
|
|
|
|
> 1..7
|
|
|
|
EOF
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run keyword selection' "
|
2014-04-30 11:50:44 +02:00
|
|
|
run_sub_test_lib_test_err run-inv-range-start \
|
|
|
|
'--run invalid range start' \
|
|
|
|
--run='a-5' <<-\\EOF &&
|
|
|
|
test_expect_success \"passing test #1\" 'true'
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test_err run-inv-range-start \
|
|
|
|
<<-\\EOF_OUT 3<<-\\EOF_ERR
|
|
|
|
> FATAL: Unexpected exit with code 1
|
|
|
|
EOF_OUT
|
|
|
|
> error: --run: invalid non-numeric in range start: 'a-5'
|
|
|
|
EOF_ERR
|
|
|
|
"
|
|
|
|
|
|
|
|
test_expect_success '--run invalid range end' "
|
|
|
|
run_sub_test_lib_test_err run-inv-range-end \
|
|
|
|
'--run invalid range end' \
|
|
|
|
--run='1-z' <<-\\EOF &&
|
|
|
|
test_expect_success \"passing test #1\" 'true'
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
check_sub_test_lib_test_err run-inv-range-end \
|
|
|
|
<<-\\EOF_OUT 3<<-\\EOF_ERR
|
|
|
|
> FATAL: Unexpected exit with code 1
|
|
|
|
EOF_OUT
|
|
|
|
> error: --run: invalid non-numeric in range end: '1-z'
|
|
|
|
EOF_ERR
|
|
|
|
"
|
|
|
|
|
t0000: run prereq tests inside sub-test
We test the behavior of prerequisites in t0000 by setting up fake ones
in the main test script, trying to run some tests, and then seeing if
those tests impacted the environment correctly. If they didn't, then we
write a message and manually call exit.
Instead, let's push these down into a sub-test, like many of the other
tests covering the framework itself. This has a few advantages:
- it does not pollute the test output with mention of skipped tests
(that we know are uninteresting -- the point of the test was to see
that these are skipped).
- when running in a TAP harness, we get a useful test failure message
(whereas when the script exits early, a tool like "prove" simply
says "Dubious, test returned 1").
- we do not have to worry about different test environments, such as
when GIT_TEST_FAIL_PREREQS_INTERNAL is set. Our sub-test helpers
already give us a known environment.
- the tests themselves are a bit easier to read, as we can just check
the test-framework output to see what happened (and get the usual
test_cmp diff if it failed)
A few notes on the implementation:
- we could do one sub-test per each individual test_expect_success. I
broke it up here into a few logical groups, as I think this makes it
more readable
- the original tests modified environment variables inside the test
bodies. Instead, I've used "true" as the body of a test we expect to
run and "false" otherwise. Technically this does not confirm that
the body of the "true" test actually ran. We are trusting the
framework output to believe that it truly ran, which is sufficient
for these tests. And I think the end result is much simpler to
follow.
- the nested_prereq test uses a few bare "test -f" calls; I converted
these to our usual test_path_is_* helpers while moving the code
around.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-28 07:32:32 +01:00
|
|
|
test_expect_success 'tests respect prerequisites' '
|
|
|
|
run_sub_test_lib_test prereqs "tests respect prereqs" <<-\EOF &&
|
2014-04-30 11:50:44 +02:00
|
|
|
|
t0000: run prereq tests inside sub-test
We test the behavior of prerequisites in t0000 by setting up fake ones
in the main test script, trying to run some tests, and then seeing if
those tests impacted the environment correctly. If they didn't, then we
write a message and manually call exit.
Instead, let's push these down into a sub-test, like many of the other
tests covering the framework itself. This has a few advantages:
- it does not pollute the test output with mention of skipped tests
(that we know are uninteresting -- the point of the test was to see
that these are skipped).
- when running in a TAP harness, we get a useful test failure message
(whereas when the script exits early, a tool like "prove" simply
says "Dubious, test returned 1").
- we do not have to worry about different test environments, such as
when GIT_TEST_FAIL_PREREQS_INTERNAL is set. Our sub-test helpers
already give us a known environment.
- the tests themselves are a bit easier to read, as we can just check
the test-framework output to see what happened (and get the usual
test_cmp diff if it failed)
A few notes on the implementation:
- we could do one sub-test per each individual test_expect_success. I
broke it up here into a few logical groups, as I think this makes it
more readable
- the original tests modified environment variables inside the test
bodies. Instead, I've used "true" as the body of a test we expect to
run and "false" otherwise. Technically this does not confirm that
the body of the "true" test actually ran. We are trusting the
framework output to believe that it truly ran, which is sufficient
for these tests. And I think the end result is much simpler to
follow.
- the nested_prereq test uses a few bare "test -f" calls; I converted
these to our usual test_path_is_* helpers while moving the code
around.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-28 07:32:32 +01:00
|
|
|
test_set_prereq HAVEIT
|
|
|
|
test_expect_success HAVEIT "prereq is satisfied" "true"
|
|
|
|
test_expect_success "have_prereq works" "
|
|
|
|
test_have_prereq HAVEIT
|
|
|
|
"
|
|
|
|
test_expect_success DONTHAVEIT "prereq not satisfied" "false"
|
2012-11-15 01:33:25 +01:00
|
|
|
|
t0000: run prereq tests inside sub-test
We test the behavior of prerequisites in t0000 by setting up fake ones
in the main test script, trying to run some tests, and then seeing if
those tests impacted the environment correctly. If they didn't, then we
write a message and manually call exit.
Instead, let's push these down into a sub-test, like many of the other
tests covering the framework itself. This has a few advantages:
- it does not pollute the test output with mention of skipped tests
(that we know are uninteresting -- the point of the test was to see
that these are skipped).
- when running in a TAP harness, we get a useful test failure message
(whereas when the script exits early, a tool like "prove" simply
says "Dubious, test returned 1").
- we do not have to worry about different test environments, such as
when GIT_TEST_FAIL_PREREQS_INTERNAL is set. Our sub-test helpers
already give us a known environment.
- the tests themselves are a bit easier to read, as we can just check
the test-framework output to see what happened (and get the usual
test_cmp diff if it failed)
A few notes on the implementation:
- we could do one sub-test per each individual test_expect_success. I
broke it up here into a few logical groups, as I think this makes it
more readable
- the original tests modified environment variables inside the test
bodies. Instead, I've used "true" as the body of a test we expect to
run and "false" otherwise. Technically this does not confirm that
the body of the "true" test actually ran. We are trusting the
framework output to believe that it truly ran, which is sufficient
for these tests. And I think the end result is much simpler to
follow.
- the nested_prereq test uses a few bare "test -f" calls; I converted
these to our usual test_path_is_* helpers while moving the code
around.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-28 07:32:32 +01:00
|
|
|
test_set_prereq HAVETHIS
|
|
|
|
test_expect_success HAVETHIS,HAVEIT "multiple prereqs" "true"
|
|
|
|
test_expect_success HAVEIT,DONTHAVEIT "mixed prereqs (yes,no)" "false"
|
|
|
|
test_expect_success DONTHAVEIT,HAVEIT "mixed prereqs (no,yes)" "false"
|
2012-11-15 01:33:25 +01:00
|
|
|
|
t0000: run prereq tests inside sub-test
We test the behavior of prerequisites in t0000 by setting up fake ones
in the main test script, trying to run some tests, and then seeing if
those tests impacted the environment correctly. If they didn't, then we
write a message and manually call exit.
Instead, let's push these down into a sub-test, like many of the other
tests covering the framework itself. This has a few advantages:
- it does not pollute the test output with mention of skipped tests
(that we know are uninteresting -- the point of the test was to see
that these are skipped).
- when running in a TAP harness, we get a useful test failure message
(whereas when the script exits early, a tool like "prove" simply
says "Dubious, test returned 1").
- we do not have to worry about different test environments, such as
when GIT_TEST_FAIL_PREREQS_INTERNAL is set. Our sub-test helpers
already give us a known environment.
- the tests themselves are a bit easier to read, as we can just check
the test-framework output to see what happened (and get the usual
test_cmp diff if it failed)
A few notes on the implementation:
- we could do one sub-test per each individual test_expect_success. I
broke it up here into a few logical groups, as I think this makes it
more readable
- the original tests modified environment variables inside the test
bodies. Instead, I've used "true" as the body of a test we expect to
run and "false" otherwise. Technically this does not confirm that
the body of the "true" test actually ran. We are trusting the
framework output to believe that it truly ran, which is sufficient
for these tests. And I think the end result is much simpler to
follow.
- the nested_prereq test uses a few bare "test -f" calls; I converted
these to our usual test_path_is_* helpers while moving the code
around.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-28 07:32:32 +01:00
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
|
|
|
|
check_sub_test_lib_test prereqs <<-\EOF
|
|
|
|
ok 1 - prereq is satisfied
|
|
|
|
ok 2 - have_prereq works
|
|
|
|
ok 3 # skip prereq not satisfied (missing DONTHAVEIT)
|
|
|
|
ok 4 - multiple prereqs
|
|
|
|
ok 5 # skip mixed prereqs (yes,no) (missing DONTHAVEIT of HAVEIT,DONTHAVEIT)
|
|
|
|
ok 6 # skip mixed prereqs (no,yes) (missing DONTHAVEIT of DONTHAVEIT,HAVEIT)
|
|
|
|
# passed all 6 test(s)
|
|
|
|
1..6
|
|
|
|
EOF
|
2012-11-15 01:33:25 +01:00
|
|
|
'
|
|
|
|
|
t0000: run prereq tests inside sub-test
We test the behavior of prerequisites in t0000 by setting up fake ones
in the main test script, trying to run some tests, and then seeing if
those tests impacted the environment correctly. If they didn't, then we
write a message and manually call exit.
Instead, let's push these down into a sub-test, like many of the other
tests covering the framework itself. This has a few advantages:
- it does not pollute the test output with mention of skipped tests
(that we know are uninteresting -- the point of the test was to see
that these are skipped).
- when running in a TAP harness, we get a useful test failure message
(whereas when the script exits early, a tool like "prove" simply
says "Dubious, test returned 1").
- we do not have to worry about different test environments, such as
when GIT_TEST_FAIL_PREREQS_INTERNAL is set. Our sub-test helpers
already give us a known environment.
- the tests themselves are a bit easier to read, as we can just check
the test-framework output to see what happened (and get the usual
test_cmp diff if it failed)
A few notes on the implementation:
- we could do one sub-test per each individual test_expect_success. I
broke it up here into a few logical groups, as I think this makes it
more readable
- the original tests modified environment variables inside the test
bodies. Instead, I've used "true" as the body of a test we expect to
run and "false" otherwise. Technically this does not confirm that
the body of the "true" test actually ran. We are trusting the
framework output to believe that it truly ran, which is sufficient
for these tests. And I think the end result is much simpler to
follow.
- the nested_prereq test uses a few bare "test -f" calls; I converted
these to our usual test_path_is_* helpers while moving the code
around.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-28 07:32:32 +01:00
|
|
|
test_expect_success 'tests respect lazy prerequisites' '
|
|
|
|
run_sub_test_lib_test lazy-prereqs "respect lazy prereqs" <<-\EOF &&
|
2012-11-15 01:33:25 +01:00
|
|
|
|
t0000: run prereq tests inside sub-test
We test the behavior of prerequisites in t0000 by setting up fake ones
in the main test script, trying to run some tests, and then seeing if
those tests impacted the environment correctly. If they didn't, then we
write a message and manually call exit.
Instead, let's push these down into a sub-test, like many of the other
tests covering the framework itself. This has a few advantages:
- it does not pollute the test output with mention of skipped tests
(that we know are uninteresting -- the point of the test was to see
that these are skipped).
- when running in a TAP harness, we get a useful test failure message
(whereas when the script exits early, a tool like "prove" simply
says "Dubious, test returned 1").
- we do not have to worry about different test environments, such as
when GIT_TEST_FAIL_PREREQS_INTERNAL is set. Our sub-test helpers
already give us a known environment.
- the tests themselves are a bit easier to read, as we can just check
the test-framework output to see what happened (and get the usual
test_cmp diff if it failed)
A few notes on the implementation:
- we could do one sub-test per each individual test_expect_success. I
broke it up here into a few logical groups, as I think this makes it
more readable
- the original tests modified environment variables inside the test
bodies. Instead, I've used "true" as the body of a test we expect to
run and "false" otherwise. Technically this does not confirm that
the body of the "true" test actually ran. We are trusting the
framework output to believe that it truly ran, which is sufficient
for these tests. And I think the end result is much simpler to
follow.
- the nested_prereq test uses a few bare "test -f" calls; I converted
these to our usual test_path_is_* helpers while moving the code
around.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-28 07:32:32 +01:00
|
|
|
test_lazy_prereq LAZY_TRUE true
|
|
|
|
test_expect_success LAZY_TRUE "lazy prereq is satisifed" "true"
|
|
|
|
test_expect_success !LAZY_TRUE "negative lazy prereq" "false"
|
|
|
|
|
|
|
|
test_lazy_prereq LAZY_FALSE false
|
|
|
|
test_expect_success LAZY_FALSE "lazy prereq not satisfied" "false"
|
|
|
|
test_expect_success !LAZY_FALSE "negative false prereq" "true"
|
|
|
|
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
|
|
|
|
check_sub_test_lib_test lazy-prereqs <<-\EOF
|
|
|
|
ok 1 - lazy prereq is satisifed
|
|
|
|
ok 2 # skip negative lazy prereq (missing !LAZY_TRUE)
|
|
|
|
ok 3 # skip lazy prereq not satisfied (missing LAZY_FALSE)
|
|
|
|
ok 4 - negative false prereq
|
|
|
|
# passed all 4 test(s)
|
|
|
|
1..4
|
|
|
|
EOF
|
tests: make sure nested lazy prereqs work reliably
Some test prereqs depend on other prereqs, so in a couple of cases we
have nested prereqs that look something like this:
test_lazy_prereq FOO '
test_have_prereq BAR &&
check-foo
'
This can be problematic, because lazy prereqs are evaluated in the
'$TRASH_DIRECTORY/prereq-test-dir' directory, which is the same for
every prereq, and which is automatically removed after the prereq has
been evaluated. So if the inner prereq (BAR above) is a lazy prereq
that hasn't been evaluated yet, then after its evaluation the
'prereq-test-dir' shared with the outer prereq will be removed.
Consequently, 'check-foo' will find itself in a non-existing
directory, and won't be able to create/access any files in its cwd,
which could result in an unfulfilled outer prereq.
Luckily, this doesn't affect any of our current nested prereqs, either
because the inner prereq is not a lazy prereq (e.g. MINGW, CYGWIN or
PERL), or because the outer prereq happens to be checked without
touching any paths in its cwd (GPGSM and RFC1991 in 'lib-gpg.sh').
So to prevent nested prereqs from interfering with each other let's
evaluate each prereq in its own dedicated directory by appending the
prereq's name to the directory name, e.g. 'prereq-test-dir-SYMLINKS'.
In the test we check not only that the prereq test dir is still there,
but also that the inner prereq can't mess with the outer prereq's
files.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-18 20:04:13 +01:00
|
|
|
'
|
|
|
|
|
t0000: run prereq tests inside sub-test
We test the behavior of prerequisites in t0000 by setting up fake ones
in the main test script, trying to run some tests, and then seeing if
those tests impacted the environment correctly. If they didn't, then we
write a message and manually call exit.
Instead, let's push these down into a sub-test, like many of the other
tests covering the framework itself. This has a few advantages:
- it does not pollute the test output with mention of skipped tests
(that we know are uninteresting -- the point of the test was to see
that these are skipped).
- when running in a TAP harness, we get a useful test failure message
(whereas when the script exits early, a tool like "prove" simply
says "Dubious, test returned 1").
- we do not have to worry about different test environments, such as
when GIT_TEST_FAIL_PREREQS_INTERNAL is set. Our sub-test helpers
already give us a known environment.
- the tests themselves are a bit easier to read, as we can just check
the test-framework output to see what happened (and get the usual
test_cmp diff if it failed)
A few notes on the implementation:
- we could do one sub-test per each individual test_expect_success. I
broke it up here into a few logical groups, as I think this makes it
more readable
- the original tests modified environment variables inside the test
bodies. Instead, I've used "true" as the body of a test we expect to
run and "false" otherwise. Technically this does not confirm that
the body of the "true" test actually ran. We are trusting the
framework output to believe that it truly ran, which is sufficient
for these tests. And I think the end result is much simpler to
follow.
- the nested_prereq test uses a few bare "test -f" calls; I converted
these to our usual test_path_is_* helpers while moving the code
around.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-28 07:32:32 +01:00
|
|
|
test_expect_success 'nested lazy prerequisites' '
|
|
|
|
run_sub_test_lib_test nested-lazy "nested lazy prereqs" <<-\EOF &&
|
|
|
|
|
|
|
|
test_lazy_prereq NESTED_INNER "
|
|
|
|
>inner &&
|
|
|
|
rm -f outer
|
|
|
|
"
|
|
|
|
test_lazy_prereq NESTED_PREREQ "
|
|
|
|
>outer &&
|
|
|
|
test_have_prereq NESTED_INNER &&
|
|
|
|
echo can create new file in cwd >file &&
|
|
|
|
test_path_is_file outer &&
|
|
|
|
test_path_is_missing inner
|
|
|
|
"
|
|
|
|
test_expect_success NESTED_PREREQ "evaluate nested prereq" "true"
|
|
|
|
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
|
|
|
|
check_sub_test_lib_test nested-lazy <<-\EOF
|
|
|
|
ok 1 - evaluate nested prereq
|
|
|
|
# passed all 1 test(s)
|
|
|
|
1..1
|
|
|
|
EOF
|
|
|
|
'
|
tests: make sure nested lazy prereqs work reliably
Some test prereqs depend on other prereqs, so in a couple of cases we
have nested prereqs that look something like this:
test_lazy_prereq FOO '
test_have_prereq BAR &&
check-foo
'
This can be problematic, because lazy prereqs are evaluated in the
'$TRASH_DIRECTORY/prereq-test-dir' directory, which is the same for
every prereq, and which is automatically removed after the prereq has
been evaluated. So if the inner prereq (BAR above) is a lazy prereq
that hasn't been evaluated yet, then after its evaluation the
'prereq-test-dir' shared with the outer prereq will be removed.
Consequently, 'check-foo' will find itself in a non-existing
directory, and won't be able to create/access any files in its cwd,
which could result in an unfulfilled outer prereq.
Luckily, this doesn't affect any of our current nested prereqs, either
because the inner prereq is not a lazy prereq (e.g. MINGW, CYGWIN or
PERL), or because the outer prereq happens to be checked without
touching any paths in its cwd (GPGSM and RFC1991 in 'lib-gpg.sh').
So to prevent nested prereqs from interfering with each other let's
evaluate each prereq in its own dedicated directory by appending the
prereq's name to the directory name, e.g. 'prereq-test-dir-SYMLINKS'.
In the test we check not only that the prereq test dir is still there,
but also that the inner prereq can't mess with the outer prereq's
files.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-18 20:04:13 +01:00
|
|
|
|
tests: do not let lazy prereqs inside `test_expect_*` turn off tracing
The `test_expect_*` functions use `test_eval_` and so does
`test_run_lazy_prereq_`. If tracing is enabled via the `-x` option,
`test_eval_` turns on tracing while evaluating the code block, and turns
it off directly after it.
This is unwanted for nested invocations.
One somewhat surprising example of this is when running a test that
calls `test_i18ngrep`: that function requires the `C_LOCALE_OUTPUT`
prereq, and that prereq is a lazy one, so it is evaluated via
`test_eval_`, the command tracing is turned off, and the test case
continues to run _without tracing the commands_.
Another somewhat surprising example is when one lazy prereq depends on
another lazy prereq: the former will call `test_have_prereq` with the
latter one, which in turn calls `test_eval_` and -- you guessed it --
tracing (if enabled) will be turned off _before_ returning to evaluating
the other lazy prereq.
As we will introduce just such a scenario with the GPG, GPGSM and
RFC1991 prereqs, let's fix that by introducing a variable that keeps
track of the current trace level: nested `test_eval_` calls will
increment and then decrement the level, and only when it reaches 0, the
tracing will _actually_ be turned off.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-03-26 16:35:26 +01:00
|
|
|
test_expect_success 'lazy prereqs do not turn off tracing' "
|
|
|
|
run_sub_test_lib_test lazy-prereq-and-tracing \
|
|
|
|
'lazy prereqs and -x' -v -x <<-\\EOF &&
|
|
|
|
test_lazy_prereq LAZY true
|
|
|
|
|
|
|
|
test_expect_success lazy 'test_have_prereq LAZY && echo trace'
|
|
|
|
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
|
|
|
|
grep 'echo trace' lazy-prereq-and-tracing/err
|
|
|
|
"
|
|
|
|
|
2021-01-28 07:32:28 +01:00
|
|
|
test_expect_success 'tests clean up after themselves' '
|
t0000: run cleaning test inside sub-test
Our check of test_when_finished is done directly in the main script, and
if we failed to clean, we complain and exit immediately. It's nicer to
signal a test failure here, for a few reasons:
- this gives better output to the user when run under a TAP harness
like "prove"
- constency; it's the only test left in the file that behaves this way
- half of its "if" conditional is nonsense anyway; it picked up a
reference to GIT_TEST_FAIL_PREREQS_INTERNAL in dfe1a17df9 (tests:
add a special setup where prerequisites fail, 2019-05-13) along with
its neighbors, even though it has nothing to do with that flag
We could actually do this without a sub-test at all, and just put our
two tests (one to do cleanup, and one to check that it happened) in the
main script. But doing it in a subtest is conceptually cleaner (from the
perspective of the main test script, we are checking only one thing),
and it remains consistent with the "cleanup when failing" test directly
after it, which has to happen in a sub-test (to avoid the main script
complaining of the failed test).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-28 07:32:35 +01:00
|
|
|
run_sub_test_lib_test cleanup "test with cleanup" <<-\EOF &&
|
|
|
|
clean=no
|
|
|
|
test_expect_success "do cleanup" "
|
|
|
|
test_when_finished clean=yes
|
|
|
|
"
|
|
|
|
test_expect_success "cleanup happened" "
|
|
|
|
test $clean = yes
|
|
|
|
"
|
|
|
|
test_done
|
|
|
|
EOF
|
2021-01-28 07:32:28 +01:00
|
|
|
|
t0000: run cleaning test inside sub-test
Our check of test_when_finished is done directly in the main script, and
if we failed to clean, we complain and exit immediately. It's nicer to
signal a test failure here, for a few reasons:
- this gives better output to the user when run under a TAP harness
like "prove"
- constency; it's the only test left in the file that behaves this way
- half of its "if" conditional is nonsense anyway; it picked up a
reference to GIT_TEST_FAIL_PREREQS_INTERNAL in dfe1a17df9 (tests:
add a special setup where prerequisites fail, 2019-05-13) along with
its neighbors, even though it has nothing to do with that flag
We could actually do this without a sub-test at all, and just put our
two tests (one to do cleanup, and one to check that it happened) in the
main script. But doing it in a subtest is conceptually cleaner (from the
perspective of the main test script, we are checking only one thing),
and it remains consistent with the "cleanup when failing" test directly
after it, which has to happen in a sub-test (to avoid the main script
complaining of the failed test).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-01-28 07:32:35 +01:00
|
|
|
check_sub_test_lib_test cleanup <<-\EOF
|
|
|
|
ok 1 - do cleanup
|
|
|
|
ok 2 - cleanup happened
|
|
|
|
# passed all 2 test(s)
|
|
|
|
1..2
|
|
|
|
EOF
|
|
|
|
'
|
2021-01-28 07:32:28 +01:00
|
|
|
|
2010-10-03 21:59:59 +02:00
|
|
|
test_expect_success 'tests clean up even on failures' "
|
2019-12-20 19:15:49 +01:00
|
|
|
run_sub_test_lib_test_err \
|
2012-12-16 19:28:13 +01:00
|
|
|
failing-cleanup 'Failing tests with cleanup commands' <<-\\EOF &&
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'tests clean up even after a failure' '
|
|
|
|
touch clean-after-failure &&
|
|
|
|
test_when_finished rm clean-after-failure &&
|
|
|
|
(exit 1)
|
|
|
|
'
|
|
|
|
test_expect_success 'failure to clean up causes the test to fail' '
|
|
|
|
test_when_finished \"(exit 2)\"
|
|
|
|
'
|
|
|
|
test_done
|
|
|
|
EOF
|
2012-12-16 19:28:13 +01:00
|
|
|
check_sub_test_lib_test failing-cleanup <<-\\EOF
|
2012-12-16 19:28:09 +01:00
|
|
|
> not ok 1 - tests clean up even after a failure
|
2012-03-02 10:08:28 +01:00
|
|
|
> # Z
|
|
|
|
> # touch clean-after-failure &&
|
|
|
|
> # test_when_finished rm clean-after-failure &&
|
|
|
|
> # (exit 1)
|
|
|
|
> # Z
|
2012-12-16 19:28:09 +01:00
|
|
|
> not ok 2 - failure to clean up causes the test to fail
|
2012-03-02 10:08:28 +01:00
|
|
|
> # Z
|
|
|
|
> # test_when_finished \"(exit 2)\"
|
|
|
|
> # Z
|
|
|
|
> # failed 2 among 2 test(s)
|
|
|
|
> 1..2
|
|
|
|
EOF
|
2010-10-03 21:59:59 +02:00
|
|
|
"
|
|
|
|
|
test-lib: introduce 'test_atexit'
When running Apache, 'git daemon', or p4d, we want to kill them at the
end of the test script, otherwise a leftover daemon process will keep
its port open indefinitely, and thus will interfere with subsequent
executions of the same test script.
So far, we stop these daemon processes "manually", i.e.:
- by registering functions or commands in the trap on EXIT to stop
the daemon while preserving the last seen exit code before the
trap (to deal with a failure when run with '--immediate' or with
interrupts by ctrl-C),
- and by invoking these functions/commands last thing before
'test_done' (and sometimes restoring the test framework's default
trap on EXIT, to prevent the daemons from being killed twice).
On one hand, we do this inconsistently, e.g. 'git p4' tests invoke
different functions in the trap on EXIT and in the last test before
'test_done', and they neither restore the test framework's default trap
on EXIT nor preserve the last seen exit code. On the other hand, this
is error prone, because, as shown in a previous patch in this series,
any output from the cleanup commands in the trap on EXIT can prevent a
proper cleanup when a test script run with '--verbose-log' and certain
shells, notably 'dash', is interrupted.
Let's introduce 'test_atexit', which is loosely modeled after
'test_when_finished', but has a broader scope: rather than running the
commands after the current test case, run them when the test script
finishes, and also run them when the test is interrupted, or exits
early in case of a failure while the '--immediate' option is in
effect.
When running the cleanup commands at the end of a successful test,
then they will be run in 'test_done' before it removes the trash
directory, i.e. the cleanup commands will still be able to access any
pidfiles or socket files in there. When running the cleanup commands
after an interrupt or failure with '--immediate', then they will be
run in the trap on EXIT. In both cases they will be run in
'test_eval_', i.e. both standard error and output of all cleanup
commands will go where they should according to the '-v' or
'--verbose-log' options, and thus won't cause any troubles when
interrupting a test script run with '--verbose-log'.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-13 13:24:11 +01:00
|
|
|
test_expect_success 'test_atexit is run' "
|
2019-12-20 19:15:49 +01:00
|
|
|
run_sub_test_lib_test_err \
|
test-lib: introduce 'test_atexit'
When running Apache, 'git daemon', or p4d, we want to kill them at the
end of the test script, otherwise a leftover daemon process will keep
its port open indefinitely, and thus will interfere with subsequent
executions of the same test script.
So far, we stop these daemon processes "manually", i.e.:
- by registering functions or commands in the trap on EXIT to stop
the daemon while preserving the last seen exit code before the
trap (to deal with a failure when run with '--immediate' or with
interrupts by ctrl-C),
- and by invoking these functions/commands last thing before
'test_done' (and sometimes restoring the test framework's default
trap on EXIT, to prevent the daemons from being killed twice).
On one hand, we do this inconsistently, e.g. 'git p4' tests invoke
different functions in the trap on EXIT and in the last test before
'test_done', and they neither restore the test framework's default trap
on EXIT nor preserve the last seen exit code. On the other hand, this
is error prone, because, as shown in a previous patch in this series,
any output from the cleanup commands in the trap on EXIT can prevent a
proper cleanup when a test script run with '--verbose-log' and certain
shells, notably 'dash', is interrupted.
Let's introduce 'test_atexit', which is loosely modeled after
'test_when_finished', but has a broader scope: rather than running the
commands after the current test case, run them when the test script
finishes, and also run them when the test is interrupted, or exits
early in case of a failure while the '--immediate' option is in
effect.
When running the cleanup commands at the end of a successful test,
then they will be run in 'test_done' before it removes the trash
directory, i.e. the cleanup commands will still be able to access any
pidfiles or socket files in there. When running the cleanup commands
after an interrupt or failure with '--immediate', then they will be
run in the trap on EXIT. In both cases they will be run in
'test_eval_', i.e. both standard error and output of all cleanup
commands will go where they should according to the '-v' or
'--verbose-log' options, and thus won't cause any troubles when
interrupting a test script run with '--verbose-log'.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-13 13:24:11 +01:00
|
|
|
atexit-cleanup 'Run atexit commands' -i <<-\\EOF &&
|
|
|
|
test_expect_success 'tests clean up even after a failure' '
|
|
|
|
> ../../clean-atexit &&
|
|
|
|
test_atexit rm ../../clean-atexit &&
|
|
|
|
> ../../also-clean-atexit &&
|
|
|
|
test_atexit rm ../../also-clean-atexit &&
|
|
|
|
> ../../dont-clean-atexit &&
|
|
|
|
(exit 1)
|
|
|
|
'
|
|
|
|
test_done
|
|
|
|
EOF
|
|
|
|
test_path_is_file dont-clean-atexit &&
|
|
|
|
test_path_is_missing clean-atexit &&
|
|
|
|
test_path_is_missing also-clean-atexit
|
|
|
|
"
|
|
|
|
|
t: add test functions to translate hash-related values
Add several test functions to make working with various hash-related
values easier.
Add test_oid_init, which loads common hash-related constants and
placeholder object IDs from the newly added files in t/oid-info.
Provide values for these constants for both SHA-1 and SHA-256.
Add test_oid_cache, which accepts data on standard input in the form of
hash-specific key-value pairs that can be looked up later, using the
same format as the files in t/oid-info. Document this format in a
t/oid-info/README directory so that it's easier to use in the future.
Add test_oid, which is used to specify look up a per-hash value
(produced on standard output) based on the key specified as its
argument. Usually the data to be looked up will be a hash-related
constant (such as the size of the hash in binary or hexadecimal), a
well-known or placeholder object ID (such as the all-zeros object ID or
one consisting of "deadbeef" repeated), or something similar. For these
reasons, test_oid will usually be used within a command substitution.
Consequently, redirect the error output to standard error, since
otherwise it will not be displayed.
Add test_detect_hash, which currently only detects SHA-1, and
test_set_hash, which can be used to set a different hash algorithm for
test purposes. In the future, test_detect_hash will learn to actually
detect the hash depending on how the testsuite is to be run.
Use the local keyword within these functions to avoid overwriting other
shell variables. We have had a test balloon in place for a couple of
releases to catch shells that don't have this keyword and have not
received any reports of failure. Note that the varying usages of local
used here are supported by all common open-source shells supporting the
local keyword.
Test these new functions as part of t0000, which also serves to
demonstrate basic usage of them. In addition, add documentation on how
to format the lookup data and how to use the test functions.
Implement two basic lookup charts, one for common invalid or synthesized
object IDs, and one for various facts about the hash function in use.
Provide versions of the data for both SHA-1 and SHA-256.
Since we use shell variables for storage, names used for lookup can
currently consist only of shell identifier characters. If this is a
problem in the future, we can hash the names before use.
Improved-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-09-13 07:17:31 +02:00
|
|
|
test_expect_success 'test_oid provides sane info by default' '
|
|
|
|
test_oid zero >actual &&
|
|
|
|
grep "^00*\$" actual &&
|
|
|
|
rawsz="$(test_oid rawsz)" &&
|
|
|
|
hexsz="$(test_oid hexsz)" &&
|
|
|
|
test "$hexsz" -eq $(wc -c <actual) &&
|
|
|
|
test $(( $rawsz * 2)) -eq "$hexsz"
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'test_oid can look up data for SHA-1' '
|
|
|
|
test_when_finished "test_detect_hash" &&
|
|
|
|
test_set_hash sha1 &&
|
|
|
|
test_oid zero >actual &&
|
|
|
|
grep "^00*\$" actual &&
|
|
|
|
rawsz="$(test_oid rawsz)" &&
|
|
|
|
hexsz="$(test_oid hexsz)" &&
|
|
|
|
test $(wc -c <actual) -eq 40 &&
|
|
|
|
test "$rawsz" -eq 20 &&
|
|
|
|
test "$hexsz" -eq 40
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'test_oid can look up data for SHA-256' '
|
|
|
|
test_when_finished "test_detect_hash" &&
|
|
|
|
test_set_hash sha256 &&
|
|
|
|
test_oid zero >actual &&
|
|
|
|
grep "^00*\$" actual &&
|
|
|
|
rawsz="$(test_oid rawsz)" &&
|
|
|
|
hexsz="$(test_oid hexsz)" &&
|
|
|
|
test $(wc -c <actual) -eq 64 &&
|
|
|
|
test "$rawsz" -eq 32 &&
|
|
|
|
test "$hexsz" -eq 64
|
|
|
|
'
|
|
|
|
|
2020-07-30 01:14:23 +02:00
|
|
|
test_expect_success 'test_oid can look up data for a specified algorithm' '
|
|
|
|
rawsz="$(test_oid --hash=sha1 rawsz)" &&
|
|
|
|
hexsz="$(test_oid --hash=sha1 hexsz)" &&
|
|
|
|
test "$rawsz" -eq 20 &&
|
|
|
|
test "$hexsz" -eq 40 &&
|
|
|
|
rawsz="$(test_oid --hash=sha256 rawsz)" &&
|
|
|
|
hexsz="$(test_oid --hash=sha256 hexsz)" &&
|
|
|
|
test "$rawsz" -eq 32 &&
|
|
|
|
test "$hexsz" -eq 64
|
|
|
|
'
|
|
|
|
|
tests: add 'test_bool_env' to catch non-bool GIT_TEST_* values
Since 3b072c577b (tests: replace test_tristate with "git env--helper",
2019-06-21) we get the normalized bool values of various GIT_TEST_*
environment variables via 'git env--helper'. Now, while the 'git
env--helper' command itself does catch invalid values in the
environment variable or in the given --default and exits with error
(exit code 128 or 129, respectively), it's invoked in conditions like
'if ! git env--helper ...', which means that all invalid bool values
are interpreted the same as the ordinary 'false' (exit code 1). This
has led to inadvertently skipped httpd tests in our CI builds for a
couple of weeks, see 3960290675 (ci: restore running httpd tests,
2019-09-06).
Let's be more careful about what the test suite accepts as bool values
in GIT_TEST_* environment variables, and error out loud and clear on
invalid values instead of simply skipping tests. Add the
'test_bool_env' helper function to encapsulate the invocation of 'git
env--helper' and the verification of its exit code, and replace all
invocations of that command in our test framework and test suite with
a call to this new helper (except in 't0017-env-helper.sh', of
course).
$ GIT_TEST_GIT_DAEMON=YesPlease ./t5570-git-daemon.sh
fatal: bad numeric config value 'YesPlease' for 'GIT_TEST_GIT_DAEMON': invalid unit
error: test_bool_env requires bool values both for $GIT_TEST_GIT_DAEMON and for the default fallback
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-11-22 14:14:36 +01:00
|
|
|
test_expect_success 'test_bool_env' '
|
|
|
|
(
|
|
|
|
sane_unset envvar &&
|
|
|
|
|
|
|
|
test_bool_env envvar true &&
|
|
|
|
! test_bool_env envvar false &&
|
|
|
|
|
|
|
|
envvar= &&
|
|
|
|
export envvar &&
|
|
|
|
! test_bool_env envvar true &&
|
|
|
|
! test_bool_env envvar false &&
|
|
|
|
|
|
|
|
envvar=true &&
|
|
|
|
test_bool_env envvar true &&
|
|
|
|
test_bool_env envvar false &&
|
|
|
|
|
|
|
|
envvar=false &&
|
|
|
|
! test_bool_env envvar true &&
|
|
|
|
! test_bool_env envvar false &&
|
|
|
|
|
|
|
|
envvar=invalid &&
|
|
|
|
# When encountering an invalid bool value, test_bool_env
|
|
|
|
# prints its error message to the original stderr of the
|
|
|
|
# test script, hence the redirection of fd 7, and aborts
|
|
|
|
# with "exit 1", hence the subshell.
|
|
|
|
! ( test_bool_env envvar true ) 7>err &&
|
|
|
|
grep "error: test_bool_env requires bool values" err &&
|
|
|
|
|
|
|
|
envvar=true &&
|
|
|
|
! ( test_bool_env envvar invalid ) 7>err &&
|
|
|
|
grep "error: test_bool_env requires bool values" err
|
|
|
|
)
|
|
|
|
'
|
|
|
|
|
2005-05-14 07:50:32 +02:00
|
|
|
################################################################
|
|
|
|
# Basics of the basics
|
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_oid_cache <<\EOF
|
|
|
|
path0f sha1:f87290f8eb2cbbea7857214459a0739927eab154
|
|
|
|
path0f sha256:638106af7c38be056f3212cbd7ac65bc1bac74f420ca5a436ff006a9d025d17d
|
|
|
|
|
|
|
|
path0s sha1:15a98433ae33114b085f3eb3bb03b832b3180a01
|
|
|
|
path0s sha256:3a24cc53cf68edddac490bbf94a418a52932130541361f685df685e41dd6c363
|
|
|
|
|
|
|
|
path2f sha1:3feff949ed00a62d9f7af97c15cd8a30595e7ac7
|
|
|
|
path2f sha256:2a7f36571c6fdbaf0e3f62751a0b25a3f4c54d2d1137b3f4af9cb794bb498e5f
|
|
|
|
|
|
|
|
path2s sha1:d8ce161addc5173867a3c3c730924388daedbc38
|
|
|
|
path2s sha256:18fd611b787c2e938ddcc248fabe4d66a150f9364763e9ec133dd01d5bb7c65a
|
|
|
|
|
|
|
|
path2d sha1:58a09c23e2ca152193f2786e06986b7b6712bdbe
|
|
|
|
path2d sha256:00e4b32b96e7e3d65d79112dcbea53238a22715f896933a62b811377e2650c17
|
|
|
|
|
|
|
|
path3f sha1:0aa34cae68d0878578ad119c86ca2b5ed5b28376
|
|
|
|
path3f sha256:09f58616b951bd571b8cb9dc76d372fbb09ab99db2393f5ab3189d26c45099ad
|
|
|
|
|
|
|
|
path3s sha1:8599103969b43aff7e430efea79ca4636466794f
|
|
|
|
path3s sha256:fce1aed087c053306f3f74c32c1a838c662bbc4551a7ac2420f5d6eb061374d0
|
|
|
|
|
|
|
|
path3d sha1:21ae8269cacbe57ae09138dcc3a2887f904d02b3
|
|
|
|
path3d sha256:9b60497be959cb830bf3f0dc82bcc9ad9e925a24e480837ade46b2295e47efe1
|
|
|
|
|
|
|
|
subp3f sha1:00fb5908cb97c2564a9783c0c64087333b3b464f
|
|
|
|
subp3f sha256:a1a9e16998c988453f18313d10375ee1d0ddefe757e710dcae0d66aa1e0c58b3
|
|
|
|
|
|
|
|
subp3s sha1:6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c
|
|
|
|
subp3s sha256:81759d9f5e93c6546ecfcadb560c1ff057314b09f93fe8ec06e2d8610d34ef10
|
|
|
|
|
|
|
|
subp3d sha1:3c5e5399f3a333eddecce7a9b9465b63f65f51e2
|
|
|
|
subp3d sha256:76b4ef482d4fa1c754390344cf3851c7f883b27cf9bc999c6547928c46aeafb7
|
|
|
|
|
|
|
|
root sha1:087704a96baf1c2d1c869a8b084481e121c88b5b
|
|
|
|
root sha256:9481b52abab1b2ffeedbf9de63ce422b929f179c1b98ff7bee5f8f1bc0710751
|
|
|
|
|
|
|
|
simpletree sha1:7bb943559a305bdd6bdee2cef6e5df2413c3d30a
|
|
|
|
simpletree sha256:1710c07a6c86f9a3c7376364df04c47ee39e5a5e221fcdd84b743bc9bb7e2bc5
|
|
|
|
EOF
|
|
|
|
|
2005-05-14 07:50:32 +02:00
|
|
|
# updating a new file without --add should fail.
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'git update-index without --add should fail adding' '
|
|
|
|
test_must_fail git update-index should-be-empty
|
2008-02-01 10:50:53 +01:00
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
|
|
|
# and with --add it should succeed, even if it is empty (it used to fail).
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'git update-index with --add should succeed' '
|
|
|
|
git update-index --add should-be-empty
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'writing tree out with git write-tree' '
|
|
|
|
tree=$(git write-tree)
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
|
|
|
# we know the shape and contents of the tree and know the object ID for it.
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'validate object ID of a known tree' '
|
|
|
|
test "$tree" = "$(test_oid simpletree)"
|
2012-03-02 10:08:28 +01:00
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
|
|
|
# Removing paths.
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'git update-index without --remove should fail removing' '
|
|
|
|
rm -f should-be-empty full-of-directories &&
|
|
|
|
test_must_fail git update-index should-be-empty
|
2008-02-01 10:50:53 +01:00
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'git update-index with --remove should be able to remove' '
|
|
|
|
git update-index --remove should-be-empty
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
|
|
|
# Empty tree can be written with recent write-tree.
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'git write-tree should be able to write an empty tree' '
|
|
|
|
tree=$(git write-tree)
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'validate object ID of a known tree' '
|
2016-07-16 07:06:24 +02:00
|
|
|
test "$tree" = $EMPTY_TREE
|
2012-03-02 10:08:28 +01:00
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
|
|
|
# Various types of objects
|
2012-03-02 10:08:28 +01:00
|
|
|
|
|
|
|
test_expect_success 'adding various types of objects with git update-index --add' '
|
|
|
|
mkdir path2 path3 path3/subp3 &&
|
|
|
|
paths="path0 path2/file2 path3/file3 path3/subp3/file3" &&
|
|
|
|
(
|
|
|
|
for p in $paths
|
|
|
|
do
|
|
|
|
echo "hello $p" >$p || exit 1
|
2013-06-07 22:53:29 +02:00
|
|
|
test_ln_s_add "hello $p" ${p}sym || exit 1
|
2012-03-02 10:08:28 +01:00
|
|
|
done
|
|
|
|
) &&
|
|
|
|
find path* ! -type d -print | xargs git update-index --add
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
|
|
|
# Show them and see that matches what we expect.
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'showing stage with git ls-files --stage' '
|
|
|
|
git ls-files --stage >current
|
|
|
|
'
|
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'validate git ls-files output for a known tree' '
|
|
|
|
cat >expected <<-EOF &&
|
|
|
|
100644 $(test_oid path0f) 0 path0
|
|
|
|
120000 $(test_oid path0s) 0 path0sym
|
|
|
|
100644 $(test_oid path2f) 0 path2/file2
|
|
|
|
120000 $(test_oid path2s) 0 path2/file2sym
|
|
|
|
100644 $(test_oid path3f) 0 path3/file3
|
|
|
|
120000 $(test_oid path3s) 0 path3/file3sym
|
|
|
|
100644 $(test_oid subp3f) 0 path3/subp3/file3
|
|
|
|
120000 $(test_oid subp3s) 0 path3/subp3/file3sym
|
2012-03-02 10:08:28 +01:00
|
|
|
EOF
|
|
|
|
test_cmp expected current
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'writing tree out with git write-tree' '
|
|
|
|
tree=$(git write-tree)
|
|
|
|
'
|
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'validate object ID for a known tree' '
|
|
|
|
test "$tree" = "$(test_oid root)"
|
2012-03-02 10:08:28 +01:00
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'showing tree with git ls-tree' '
|
|
|
|
git ls-tree $tree >current
|
|
|
|
'
|
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'git ls-tree output for a known tree' '
|
|
|
|
cat >expected <<-EOF &&
|
|
|
|
100644 blob $(test_oid path0f) path0
|
|
|
|
120000 blob $(test_oid path0s) path0sym
|
|
|
|
040000 tree $(test_oid path2d) path2
|
|
|
|
040000 tree $(test_oid path3d) path3
|
2012-03-02 10:08:28 +01:00
|
|
|
EOF
|
|
|
|
test_cmp expected current
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2005-11-28 11:32:42 +01:00
|
|
|
# This changed in ls-tree pathspec change -- recursive does
|
|
|
|
# not show tree nodes anymore.
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'showing tree with git ls-tree -r' '
|
|
|
|
git ls-tree -r $tree >current
|
|
|
|
'
|
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'git ls-tree -r output for a known tree' '
|
|
|
|
cat >expected <<-EOF &&
|
|
|
|
100644 blob $(test_oid path0f) path0
|
|
|
|
120000 blob $(test_oid path0s) path0sym
|
|
|
|
100644 blob $(test_oid path2f) path2/file2
|
|
|
|
120000 blob $(test_oid path2s) path2/file2sym
|
|
|
|
100644 blob $(test_oid path3f) path3/file3
|
|
|
|
120000 blob $(test_oid path3s) path3/file3sym
|
|
|
|
100644 blob $(test_oid subp3f) path3/subp3/file3
|
|
|
|
120000 blob $(test_oid subp3s) path3/subp3/file3sym
|
2012-03-02 10:08:28 +01:00
|
|
|
EOF
|
|
|
|
test_cmp expected current
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2006-04-26 11:27:59 +02:00
|
|
|
# But with -r -t we can have both.
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'showing tree with git ls-tree -r -t' '
|
|
|
|
git ls-tree -r -t $tree >current
|
|
|
|
'
|
2006-11-13 14:50:00 +01:00
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'git ls-tree -r output for a known tree' '
|
|
|
|
cat >expected <<-EOF &&
|
|
|
|
100644 blob $(test_oid path0f) path0
|
|
|
|
120000 blob $(test_oid path0s) path0sym
|
|
|
|
040000 tree $(test_oid path2d) path2
|
|
|
|
100644 blob $(test_oid path2f) path2/file2
|
|
|
|
120000 blob $(test_oid path2s) path2/file2sym
|
|
|
|
040000 tree $(test_oid path3d) path3
|
|
|
|
100644 blob $(test_oid path3f) path3/file3
|
|
|
|
120000 blob $(test_oid path3s) path3/file3sym
|
|
|
|
040000 tree $(test_oid subp3d) path3/subp3
|
|
|
|
100644 blob $(test_oid subp3f) path3/subp3/file3
|
|
|
|
120000 blob $(test_oid subp3s) path3/subp3/file3sym
|
2012-03-02 10:08:28 +01:00
|
|
|
EOF
|
|
|
|
test_cmp expected current
|
|
|
|
'
|
2006-11-13 14:50:00 +01:00
|
|
|
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'writing partial tree out with git write-tree --prefix' '
|
|
|
|
ptree=$(git write-tree --prefix=path3)
|
2008-02-01 10:50:53 +01:00
|
|
|
'
|
2006-11-13 14:50:00 +01:00
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'validate object ID for a known tree' '
|
|
|
|
test "$ptree" = $(test_oid path3d)
|
2012-03-02 10:08:28 +01:00
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'writing partial tree out with git write-tree --prefix' '
|
|
|
|
ptree=$(git write-tree --prefix=path3/subp3)
|
|
|
|
'
|
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'validate object ID for a known tree' '
|
|
|
|
test "$ptree" = $(test_oid subp3d)
|
2012-03-02 10:08:28 +01:00
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'put invalid objects into the index' '
|
|
|
|
rm -f .git/index &&
|
2018-09-13 07:17:32 +02:00
|
|
|
suffix=$(echo $ZERO_OID | sed -e "s/^.//") &&
|
|
|
|
cat >badobjects <<-EOF &&
|
|
|
|
100644 blob $(test_oid 001) dir/file1
|
|
|
|
100644 blob $(test_oid 002) dir/file2
|
|
|
|
100644 blob $(test_oid 003) dir/file3
|
|
|
|
100644 blob $(test_oid 004) dir/file4
|
|
|
|
100644 blob $(test_oid 005) dir/file5
|
2012-03-02 10:08:28 +01:00
|
|
|
EOF
|
|
|
|
git update-index --index-info <badobjects
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'writing this tree without --missing-ok' '
|
|
|
|
test_must_fail git write-tree
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'writing this tree with --missing-ok' '
|
|
|
|
git write-tree --missing-ok
|
|
|
|
'
|
2006-11-13 14:50:00 +01:00
|
|
|
|
|
|
|
|
2005-05-14 07:50:32 +02:00
|
|
|
################################################################
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'git read-tree followed by write-tree should be idempotent' '
|
2015-03-20 11:07:15 +01:00
|
|
|
rm -f .git/index &&
|
2012-03-02 10:08:28 +01:00
|
|
|
git read-tree $tree &&
|
2020-10-17 04:43:53 +02:00
|
|
|
test_path_is_file .git/index &&
|
2012-03-02 10:08:28 +01:00
|
|
|
newtree=$(git write-tree) &&
|
|
|
|
test "$newtree" = "$tree"
|
|
|
|
'
|
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'validate git diff-files output for a know cache/work tree state' '
|
|
|
|
cat >expected <<EOF &&
|
|
|
|
:100644 100644 $(test_oid path0f) $ZERO_OID M path0
|
|
|
|
:120000 120000 $(test_oid path0s) $ZERO_OID M path0sym
|
|
|
|
:100644 100644 $(test_oid path2f) $ZERO_OID M path2/file2
|
|
|
|
:120000 120000 $(test_oid path2s) $ZERO_OID M path2/file2sym
|
|
|
|
:100644 100644 $(test_oid path3f) $ZERO_OID M path3/file3
|
|
|
|
:120000 120000 $(test_oid path3s) $ZERO_OID M path3/file3sym
|
|
|
|
:100644 100644 $(test_oid subp3f) $ZERO_OID M path3/subp3/file3
|
|
|
|
:120000 120000 $(test_oid subp3s) $ZERO_OID M path3/subp3/file3sym
|
2005-05-14 07:50:32 +02:00
|
|
|
EOF
|
2012-03-02 10:08:28 +01:00
|
|
|
git diff-files >current &&
|
2018-10-05 23:54:04 +02:00
|
|
|
test_cmp expected current
|
2012-03-02 10:08:28 +01:00
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'git update-index --refresh should succeed' '
|
|
|
|
git update-index --refresh
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2012-03-02 10:08:28 +01:00
|
|
|
test_expect_success 'no diff after checkout and git update-index --refresh' '
|
|
|
|
git diff-files >current &&
|
|
|
|
cmp -s current /dev/null
|
|
|
|
'
|
2005-05-14 07:50:32 +02:00
|
|
|
|
2006-04-27 03:25:15 +02:00
|
|
|
################################################################
|
2018-09-13 07:17:33 +02:00
|
|
|
P=$(test_oid root)
|
2012-03-02 10:08:28 +01:00
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'git commit-tree records the correct tree in a commit' '
|
2012-03-02 10:08:28 +01:00
|
|
|
commit0=$(echo NO | git commit-tree $P) &&
|
|
|
|
tree=$(git show --pretty=raw $commit0 |
|
|
|
|
sed -n -e "s/^tree //p" -e "/^author /q") &&
|
|
|
|
test "z$tree" = "z$P"
|
|
|
|
'
|
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'git commit-tree records the correct parent in a commit' '
|
2012-03-02 10:08:28 +01:00
|
|
|
commit1=$(echo NO | git commit-tree $P -p $commit0) &&
|
|
|
|
parent=$(git show --pretty=raw $commit1 |
|
|
|
|
sed -n -e "s/^parent //p" -e "/^author /q") &&
|
|
|
|
test "z$commit0" = "z$parent"
|
|
|
|
'
|
|
|
|
|
2018-09-13 07:17:33 +02:00
|
|
|
test_expect_success 'git commit-tree omits duplicated parent in a commit' '
|
2012-03-02 10:08:28 +01:00
|
|
|
commit2=$(echo NO | git commit-tree $P -p $commit0 -p $commit0) &&
|
|
|
|
parent=$(git show --pretty=raw $commit2 |
|
|
|
|
sed -n -e "s/^parent //p" -e "/^author /q" |
|
|
|
|
sort -u) &&
|
|
|
|
test "z$commit0" = "z$parent" &&
|
|
|
|
numparent=$(git show --pretty=raw $commit2 |
|
|
|
|
sed -n -e "s/^parent //p" -e "/^author /q" |
|
|
|
|
wc -l) &&
|
|
|
|
test $numparent = 1
|
|
|
|
'
|
2006-04-27 03:25:15 +02:00
|
|
|
|
2006-12-17 02:39:06 +01:00
|
|
|
test_expect_success 'update-index D/F conflict' '
|
|
|
|
mv path0 tmp &&
|
|
|
|
mv path2 path0 &&
|
|
|
|
mv tmp path2 &&
|
|
|
|
git update-index --add --replace path2 path0/file2 &&
|
|
|
|
numpath0=$(git ls-files path0 | wc -l) &&
|
|
|
|
test $numpath0 = 1
|
|
|
|
'
|
|
|
|
|
2008-01-19 08:42:00 +01:00
|
|
|
test_expect_success 'very long name in the index handled sanely' '
|
|
|
|
|
|
|
|
a=a && # 1
|
|
|
|
a=$a$a$a$a$a$a$a$a$a$a$a$a$a$a$a$a && # 16
|
|
|
|
a=$a$a$a$a$a$a$a$a$a$a$a$a$a$a$a$a && # 256
|
|
|
|
a=$a$a$a$a$a$a$a$a$a$a$a$a$a$a$a$a && # 4096
|
|
|
|
a=${a}q &&
|
|
|
|
|
|
|
|
>path4 &&
|
|
|
|
git update-index --add path4 &&
|
|
|
|
(
|
|
|
|
git ls-files -s path4 |
|
|
|
|
sed -e "s/ .*/ /" |
|
2018-07-02 02:23:55 +02:00
|
|
|
tr -d "\012" &&
|
2008-01-19 08:42:00 +01:00
|
|
|
echo "$a"
|
|
|
|
) | git update-index --index-info &&
|
|
|
|
len=$(git ls-files "a*" | wc -c) &&
|
|
|
|
test $len = 4098
|
|
|
|
'
|
|
|
|
|
2020-07-07 08:04:38 +02:00
|
|
|
test_expect_success 'test_must_fail on a failing git command' '
|
|
|
|
test_must_fail git notacommand
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'test_must_fail on a failing git command with env' '
|
|
|
|
test_must_fail env var1=a var2=b git notacommand
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'test_must_fail rejects a non-git command' '
|
|
|
|
! test_must_fail grep ^$ notafile 2>err &&
|
|
|
|
grep -F "test_must_fail: only '"'"'git'"'"' is allowed" err
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'test_must_fail rejects a non-git command with env' '
|
|
|
|
! test_must_fail env var1=a var2=b grep ^$ notafile 2>err &&
|
|
|
|
grep -F "test_must_fail: only '"'"'git'"'"' is allowed" err
|
|
|
|
'
|
|
|
|
|
2005-05-14 07:50:32 +02:00
|
|
|
test_done
|