2013-11-25 22:03:06 +01:00
|
|
|
# Library of functions shared by all tests scripts, included by
|
|
|
|
# test-lib.sh.
|
2012-02-17 11:25:08 +01:00
|
|
|
#
|
|
|
|
# Copyright (c) 2005 Junio C Hamano
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see http://www.gnu.org/licenses/ .
|
|
|
|
|
|
|
|
# The semantics of the editor variables are that of invoking
|
|
|
|
# sh -c "$EDITOR \"$@\"" files ...
|
|
|
|
#
|
|
|
|
# If our trash directory contains shell metacharacters, they will be
|
|
|
|
# interpreted if we just set $EDITOR directly, so do a little dance with
|
|
|
|
# environment variables to work around this.
|
|
|
|
#
|
|
|
|
# In particular, quoting isn't enough, as the path may contain the same quote
|
|
|
|
# that we're using.
|
|
|
|
test_set_editor () {
|
|
|
|
FAKE_EDITOR="$1"
|
|
|
|
export FAKE_EDITOR
|
|
|
|
EDITOR='"$FAKE_EDITOR"'
|
|
|
|
export EDITOR
|
|
|
|
}
|
|
|
|
|
2014-02-23 21:49:58 +01:00
|
|
|
test_set_index_version () {
|
|
|
|
GIT_INDEX_VERSION="$1"
|
|
|
|
export GIT_INDEX_VERSION
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
test_decode_color () {
|
|
|
|
awk '
|
|
|
|
function name(n) {
|
|
|
|
if (n == 0) return "RESET";
|
|
|
|
if (n == 1) return "BOLD";
|
|
|
|
if (n == 30) return "BLACK";
|
|
|
|
if (n == 31) return "RED";
|
|
|
|
if (n == 32) return "GREEN";
|
|
|
|
if (n == 33) return "YELLOW";
|
|
|
|
if (n == 34) return "BLUE";
|
|
|
|
if (n == 35) return "MAGENTA";
|
|
|
|
if (n == 36) return "CYAN";
|
|
|
|
if (n == 37) return "WHITE";
|
|
|
|
if (n == 40) return "BLACK";
|
|
|
|
if (n == 41) return "BRED";
|
|
|
|
if (n == 42) return "BGREEN";
|
|
|
|
if (n == 43) return "BYELLOW";
|
|
|
|
if (n == 44) return "BBLUE";
|
|
|
|
if (n == 45) return "BMAGENTA";
|
|
|
|
if (n == 46) return "BCYAN";
|
|
|
|
if (n == 47) return "BWHITE";
|
|
|
|
}
|
|
|
|
{
|
|
|
|
while (match($0, /\033\[[0-9;]*m/) != 0) {
|
|
|
|
printf "%s<", substr($0, 1, RSTART-1);
|
|
|
|
codes = substr($0, RSTART+2, RLENGTH-3);
|
|
|
|
if (length(codes) == 0)
|
|
|
|
printf "%s", name(0)
|
|
|
|
else {
|
|
|
|
n = split(codes, ary, ";");
|
|
|
|
sep = "";
|
|
|
|
for (i = 1; i <= n; i++) {
|
|
|
|
printf "%s%s", sep, name(ary[i]);
|
|
|
|
sep = ";"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf ">";
|
|
|
|
$0 = substr($0, RSTART + RLENGTH, length($0) - RSTART - RLENGTH + 1);
|
|
|
|
}
|
|
|
|
print
|
|
|
|
}
|
|
|
|
'
|
|
|
|
}
|
|
|
|
|
2016-08-11 16:46:01 +02:00
|
|
|
lf_to_nul () {
|
|
|
|
perl -pe 'y/\012/\000/'
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
nul_to_q () {
|
2013-10-29 02:23:03 +01:00
|
|
|
perl -pe 'y/\000/Q/'
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
q_to_nul () {
|
2013-10-29 02:23:03 +01:00
|
|
|
perl -pe 'y/Q/\000/'
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
q_to_cr () {
|
|
|
|
tr Q '\015'
|
|
|
|
}
|
|
|
|
|
|
|
|
q_to_tab () {
|
|
|
|
tr Q '\011'
|
|
|
|
}
|
|
|
|
|
2013-03-22 19:10:03 +01:00
|
|
|
qz_to_tab_space () {
|
|
|
|
tr QZ '\011\040'
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
append_cr () {
|
|
|
|
sed -e 's/$/Q/' | tr Q '\015'
|
|
|
|
}
|
|
|
|
|
|
|
|
remove_cr () {
|
|
|
|
tr '\015' Q | sed -e 's/Q$//'
|
|
|
|
}
|
|
|
|
|
|
|
|
# In some bourne shell implementations, the "unset" builtin returns
|
|
|
|
# nonzero status when a variable to be unset was not set in the first
|
|
|
|
# place.
|
|
|
|
#
|
|
|
|
# Use sane_unset when that should not be considered an error.
|
|
|
|
|
|
|
|
sane_unset () {
|
|
|
|
unset "$@"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
test_tick () {
|
|
|
|
if test -z "${test_tick+set}"
|
|
|
|
then
|
|
|
|
test_tick=1112911993
|
|
|
|
else
|
|
|
|
test_tick=$(($test_tick + 60))
|
|
|
|
fi
|
|
|
|
GIT_COMMITTER_DATE="$test_tick -0700"
|
|
|
|
GIT_AUTHOR_DATE="$test_tick -0700"
|
|
|
|
export GIT_COMMITTER_DATE GIT_AUTHOR_DATE
|
|
|
|
}
|
|
|
|
|
2017-03-18 17:14:00 +01:00
|
|
|
# Stop execution and start a shell. This is useful for debugging tests.
|
2012-02-17 11:25:08 +01:00
|
|
|
#
|
|
|
|
# Be sure to remove all invocations of this command before submitting.
|
|
|
|
|
|
|
|
test_pause () {
|
2017-03-18 17:14:00 +01:00
|
|
|
"$SHELL_PATH" <&6 >&5 2>&7
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
2015-10-30 20:02:56 +01:00
|
|
|
# Wrap git in gdb. Adding this to a command can make it easier to
|
|
|
|
# understand what is going on in a failing test.
|
|
|
|
#
|
|
|
|
# Example: "debug git checkout master".
|
|
|
|
debug () {
|
tests: create an interactive gdb session with the 'debug' helper
The 'debug' test helper is supposed to facilitate debugging by running
a command of the test suite under gdb. Unfortunately, its usefulness
is severely limited, because that gdb session is not interactive,
since the test's, and thus gdb's standard input is redirected from
/dev/null (for a good reason, see 781f76b15 (test-lib: redirect stdin
of tests, 2011-12-15)).
Redirect gdb's standard file descriptors from/to the test
environment's stdin, stdout and stderr in the 'debug' helper, thus
creating an interactive gdb session (even in non-verbose mode), which
is much, much more useful.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-18 17:13:59 +01:00
|
|
|
GIT_TEST_GDB=1 "$@" <&6 >&5 2>&7
|
2015-10-30 20:02:56 +01:00
|
|
|
}
|
|
|
|
|
2016-12-08 22:03:26 +01:00
|
|
|
# Call test_commit with the arguments
|
|
|
|
# [-C <directory>] <message> [<file> [<contents> [<tag>]]]"
|
2012-02-17 11:25:08 +01:00
|
|
|
#
|
|
|
|
# This will commit a file with the given contents and the given commit
|
2013-02-12 11:17:30 +01:00
|
|
|
# message, and tag the resulting commit with the given tag name.
|
2012-02-17 11:25:08 +01:00
|
|
|
#
|
2013-02-12 11:17:30 +01:00
|
|
|
# <file>, <contents>, and <tag> all default to <message>.
|
2016-12-08 22:03:26 +01:00
|
|
|
#
|
|
|
|
# If the first argument is "-C", the second argument is used as a path for
|
|
|
|
# the git invocations.
|
2012-02-17 11:25:08 +01:00
|
|
|
|
|
|
|
test_commit () {
|
2012-07-22 21:54:08 +02:00
|
|
|
notick= &&
|
2012-09-14 08:52:03 +02:00
|
|
|
signoff= &&
|
2016-12-08 22:03:26 +01:00
|
|
|
indir= &&
|
2012-09-14 08:52:03 +02:00
|
|
|
while test $# != 0
|
|
|
|
do
|
|
|
|
case "$1" in
|
|
|
|
--notick)
|
|
|
|
notick=yes
|
|
|
|
;;
|
|
|
|
--signoff)
|
|
|
|
signoff="$1"
|
|
|
|
;;
|
2016-12-08 22:03:26 +01:00
|
|
|
-C)
|
|
|
|
indir="$2"
|
|
|
|
shift
|
|
|
|
;;
|
2012-09-14 08:52:03 +02:00
|
|
|
*)
|
|
|
|
break
|
|
|
|
;;
|
|
|
|
esac
|
2012-07-22 21:54:08 +02:00
|
|
|
shift
|
2012-09-14 08:52:03 +02:00
|
|
|
done &&
|
2016-12-08 22:03:26 +01:00
|
|
|
indir=${indir:+"$indir"/} &&
|
2012-07-22 21:54:08 +02:00
|
|
|
file=${2:-"$1.t"} &&
|
2016-12-08 22:03:26 +01:00
|
|
|
echo "${3-$1}" > "$indir$file" &&
|
|
|
|
git ${indir:+ -C "$indir"} add "$file" &&
|
2012-07-22 21:54:08 +02:00
|
|
|
if test -z "$notick"
|
|
|
|
then
|
|
|
|
test_tick
|
|
|
|
fi &&
|
2016-12-08 22:03:26 +01:00
|
|
|
git ${indir:+ -C "$indir"} commit $signoff -m "$1" &&
|
|
|
|
git ${indir:+ -C "$indir"} tag "${4:-$1}"
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
# Call test_merge with the arguments "<message> <commit>", where <commit>
|
|
|
|
# can be a tag pointing to the commit-to-merge.
|
|
|
|
|
|
|
|
test_merge () {
|
|
|
|
test_tick &&
|
|
|
|
git merge -m "$1" "$2" &&
|
|
|
|
git tag "$1"
|
|
|
|
}
|
|
|
|
|
|
|
|
# This function helps systems where core.filemode=false is set.
|
|
|
|
# Use it instead of plain 'chmod +x' to set or unset the executable bit
|
|
|
|
# of a file in the working directory and add it to the index.
|
|
|
|
|
|
|
|
test_chmod () {
|
|
|
|
chmod "$@" &&
|
|
|
|
git update-index --add "--chmod=$@"
|
|
|
|
}
|
|
|
|
|
2017-06-25 06:34:28 +02:00
|
|
|
# Get the modebits from a file.
|
|
|
|
test_modebits () {
|
|
|
|
ls -l "$1" | sed -e 's|^\(..........\).*|\1|'
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
# Unset a configuration variable, but don't fail if it doesn't exist.
|
|
|
|
test_unconfig () {
|
2015-09-05 15:12:47 +02:00
|
|
|
config_dir=
|
|
|
|
if test "$1" = -C
|
|
|
|
then
|
|
|
|
shift
|
|
|
|
config_dir=$1
|
|
|
|
shift
|
|
|
|
fi
|
|
|
|
git ${config_dir:+-C "$config_dir"} config --unset-all "$@"
|
2012-02-17 11:25:08 +01:00
|
|
|
config_status=$?
|
|
|
|
case "$config_status" in
|
|
|
|
5) # ok, nothing to unset
|
|
|
|
config_status=0
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
return $config_status
|
|
|
|
}
|
|
|
|
|
|
|
|
# Set git config, automatically unsetting it after the test is over.
|
|
|
|
test_config () {
|
2015-09-05 15:12:47 +02:00
|
|
|
config_dir=
|
|
|
|
if test "$1" = -C
|
|
|
|
then
|
|
|
|
shift
|
|
|
|
config_dir=$1
|
|
|
|
shift
|
|
|
|
fi
|
|
|
|
test_when_finished "test_unconfig ${config_dir:+-C '$config_dir'} '$1'" &&
|
|
|
|
git ${config_dir:+-C "$config_dir"} config "$@"
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
test_config_global () {
|
|
|
|
test_when_finished "test_unconfig --global '$1'" &&
|
|
|
|
git config --global "$@"
|
|
|
|
}
|
|
|
|
|
|
|
|
write_script () {
|
|
|
|
{
|
|
|
|
echo "#!${2-"$SHELL_PATH"}" &&
|
|
|
|
cat
|
|
|
|
} >"$1" &&
|
|
|
|
chmod +x "$1"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Use test_set_prereq to tell that a particular prerequisite is available.
|
|
|
|
# The prerequisite can later be checked for in two ways:
|
|
|
|
#
|
|
|
|
# - Explicitly using test_have_prereq.
|
|
|
|
#
|
|
|
|
# - Implicitly by specifying the prerequisite tag in the calls to
|
|
|
|
# test_expect_{success,failure,code}.
|
|
|
|
#
|
|
|
|
# The single parameter is the prerequisite tag (a simple word, in all
|
|
|
|
# capital letters by convention).
|
|
|
|
|
|
|
|
test_set_prereq () {
|
2012-07-26 22:57:56 +02:00
|
|
|
satisfied_prereq="$satisfied_prereq$1 "
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
2012-07-26 22:57:56 +02:00
|
|
|
satisfied_prereq=" "
|
2012-07-27 00:50:45 +02:00
|
|
|
lazily_testable_prereq= lazily_tested_prereq=
|
|
|
|
|
|
|
|
# Usage: test_lazy_prereq PREREQ 'script'
|
|
|
|
test_lazy_prereq () {
|
|
|
|
lazily_testable_prereq="$lazily_testable_prereq$1 "
|
|
|
|
eval test_prereq_lazily_$1=\$2
|
|
|
|
}
|
|
|
|
|
|
|
|
test_run_lazy_prereq_ () {
|
|
|
|
script='
|
|
|
|
mkdir -p "$TRASH_DIRECTORY/prereq-test-dir" &&
|
|
|
|
(
|
|
|
|
cd "$TRASH_DIRECTORY/prereq-test-dir" &&'"$2"'
|
|
|
|
)'
|
|
|
|
say >&3 "checking prerequisite: $1"
|
|
|
|
say >&3 "$script"
|
|
|
|
test_eval_ "$script"
|
|
|
|
eval_ret=$?
|
|
|
|
rm -rf "$TRASH_DIRECTORY/prereq-test-dir"
|
|
|
|
if test "$eval_ret" = 0; then
|
|
|
|
say >&3 "prerequisite $1 ok"
|
|
|
|
else
|
|
|
|
say >&3 "prerequisite $1 not satisfied"
|
|
|
|
fi
|
|
|
|
return $eval_ret
|
|
|
|
}
|
2012-02-17 11:25:08 +01:00
|
|
|
|
|
|
|
test_have_prereq () {
|
|
|
|
# prerequisites can be concatenated with ','
|
|
|
|
save_IFS=$IFS
|
|
|
|
IFS=,
|
|
|
|
set -- $*
|
|
|
|
IFS=$save_IFS
|
|
|
|
|
|
|
|
total_prereq=0
|
|
|
|
ok_prereq=0
|
|
|
|
missing_prereq=
|
|
|
|
|
|
|
|
for prerequisite
|
|
|
|
do
|
2012-11-15 01:33:25 +01:00
|
|
|
case "$prerequisite" in
|
|
|
|
!*)
|
|
|
|
negative_prereq=t
|
|
|
|
prerequisite=${prerequisite#!}
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
negative_prereq=
|
|
|
|
esac
|
|
|
|
|
2012-07-27 00:50:45 +02:00
|
|
|
case " $lazily_tested_prereq " in
|
|
|
|
*" $prerequisite "*)
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
case " $lazily_testable_prereq " in
|
|
|
|
*" $prerequisite "*)
|
|
|
|
eval "script=\$test_prereq_lazily_$prerequisite" &&
|
|
|
|
if test_run_lazy_prereq_ "$prerequisite" "$script"
|
|
|
|
then
|
|
|
|
test_set_prereq $prerequisite
|
|
|
|
fi
|
|
|
|
lazily_tested_prereq="$lazily_tested_prereq$prerequisite "
|
|
|
|
esac
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
total_prereq=$(($total_prereq + 1))
|
2012-07-26 22:57:56 +02:00
|
|
|
case "$satisfied_prereq" in
|
2012-02-17 11:25:08 +01:00
|
|
|
*" $prerequisite "*)
|
2012-11-15 01:33:25 +01:00
|
|
|
satisfied_this_prereq=t
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
satisfied_this_prereq=
|
|
|
|
esac
|
|
|
|
|
|
|
|
case "$satisfied_this_prereq,$negative_prereq" in
|
|
|
|
t,|,t)
|
2012-02-17 11:25:08 +01:00
|
|
|
ok_prereq=$(($ok_prereq + 1))
|
|
|
|
;;
|
|
|
|
*)
|
2012-11-15 01:33:25 +01:00
|
|
|
# Keep a list of missing prerequisites; restore
|
|
|
|
# the negative marker if necessary.
|
|
|
|
prerequisite=${negative_prereq:+!}$prerequisite
|
2012-02-17 11:25:08 +01:00
|
|
|
if test -z "$missing_prereq"
|
|
|
|
then
|
|
|
|
missing_prereq=$prerequisite
|
|
|
|
else
|
|
|
|
missing_prereq="$prerequisite,$missing_prereq"
|
|
|
|
fi
|
|
|
|
esac
|
|
|
|
done
|
|
|
|
|
|
|
|
test $total_prereq = $ok_prereq
|
|
|
|
}
|
|
|
|
|
|
|
|
test_declared_prereq () {
|
|
|
|
case ",$test_prereq," in
|
|
|
|
*,$1,*)
|
|
|
|
return 0
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-04-27 00:18:45 +02:00
|
|
|
test_verify_prereq () {
|
|
|
|
test -z "$test_prereq" ||
|
|
|
|
expr >/dev/null "$test_prereq" : '[A-Z0-9_,!]*$' ||
|
|
|
|
error "bug in the test script: '$test_prereq' does not look like a prereq"
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
test_expect_failure () {
|
2013-06-18 14:25:59 +02:00
|
|
|
test_start_
|
2012-02-17 11:25:08 +01:00
|
|
|
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
|
|
|
|
test "$#" = 2 ||
|
|
|
|
error "bug in the test script: not 2 or 3 parameters to test-expect-failure"
|
2015-04-27 00:18:45 +02:00
|
|
|
test_verify_prereq
|
2012-02-17 11:25:08 +01:00
|
|
|
export test_prereq
|
|
|
|
if ! test_skip "$@"
|
|
|
|
then
|
|
|
|
say >&3 "checking known breakage: $2"
|
|
|
|
if test_run_ "$2" expecting_failure
|
|
|
|
then
|
|
|
|
test_known_broken_ok_ "$1"
|
|
|
|
else
|
|
|
|
test_known_broken_failure_ "$1"
|
|
|
|
fi
|
|
|
|
fi
|
2013-06-18 14:25:59 +02:00
|
|
|
test_finish_
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
test_expect_success () {
|
2013-06-18 14:25:59 +02:00
|
|
|
test_start_
|
2012-02-17 11:25:08 +01:00
|
|
|
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
|
|
|
|
test "$#" = 2 ||
|
|
|
|
error "bug in the test script: not 2 or 3 parameters to test-expect-success"
|
2015-04-27 00:18:45 +02:00
|
|
|
test_verify_prereq
|
2012-02-17 11:25:08 +01:00
|
|
|
export test_prereq
|
|
|
|
if ! test_skip "$@"
|
|
|
|
then
|
|
|
|
say >&3 "expecting success: $2"
|
|
|
|
if test_run_ "$2"
|
|
|
|
then
|
|
|
|
test_ok_ "$1"
|
|
|
|
else
|
|
|
|
test_failure_ "$@"
|
|
|
|
fi
|
|
|
|
fi
|
2013-06-18 14:25:59 +02:00
|
|
|
test_finish_
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
# test_external runs external test scripts that provide continuous
|
|
|
|
# test output about their progress, and succeeds/fails on
|
|
|
|
# zero/non-zero exit code. It outputs the test output on stdout even
|
|
|
|
# in non-verbose mode, and announces the external script with "# run
|
|
|
|
# <n>: ..." before running it. When providing relative paths, keep in
|
|
|
|
# mind that all scripts run in "trash directory".
|
|
|
|
# Usage: test_external description command arguments...
|
|
|
|
# Example: test_external 'Perl API' perl ../path/to/test.pl
|
|
|
|
test_external () {
|
|
|
|
test "$#" = 4 && { test_prereq=$1; shift; } || test_prereq=
|
|
|
|
test "$#" = 3 ||
|
|
|
|
error >&5 "bug in the test script: not 3 or 4 parameters to test_external"
|
|
|
|
descr="$1"
|
|
|
|
shift
|
2015-04-27 00:18:45 +02:00
|
|
|
test_verify_prereq
|
2012-02-17 11:25:08 +01:00
|
|
|
export test_prereq
|
|
|
|
if ! test_skip "$descr" "$@"
|
|
|
|
then
|
|
|
|
# Announce the script to reduce confusion about the
|
|
|
|
# test output that follows.
|
|
|
|
say_color "" "# run $test_count: $descr ($*)"
|
|
|
|
# Export TEST_DIRECTORY, TRASH_DIRECTORY and GIT_TEST_LONG
|
|
|
|
# to be able to use them in script
|
|
|
|
export TEST_DIRECTORY TRASH_DIRECTORY GIT_TEST_LONG
|
|
|
|
# Run command; redirect its stderr to &4 as in
|
|
|
|
# test_run_, but keep its stdout on our stdout even in
|
|
|
|
# non-verbose mode.
|
|
|
|
"$@" 2>&4
|
2014-10-15 10:35:21 +02:00
|
|
|
if test "$?" = 0
|
2012-02-17 11:25:08 +01:00
|
|
|
then
|
|
|
|
if test $test_external_has_tap -eq 0; then
|
|
|
|
test_ok_ "$descr"
|
|
|
|
else
|
|
|
|
say_color "" "# test_external test $descr was ok"
|
|
|
|
test_success=$(($test_success + 1))
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
if test $test_external_has_tap -eq 0; then
|
|
|
|
test_failure_ "$descr" "$@"
|
|
|
|
else
|
|
|
|
say_color error "# test_external test $descr failed: $@"
|
|
|
|
test_failure=$(($test_failure + 1))
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Like test_external, but in addition tests that the command generated
|
|
|
|
# no output on stderr.
|
|
|
|
test_external_without_stderr () {
|
|
|
|
# The temporary file has no (and must have no) security
|
|
|
|
# implications.
|
|
|
|
tmp=${TMPDIR:-/tmp}
|
|
|
|
stderr="$tmp/git-external-stderr.$$.tmp"
|
|
|
|
test_external "$@" 4> "$stderr"
|
2014-10-15 10:35:21 +02:00
|
|
|
test -f "$stderr" || error "Internal error: $stderr disappeared."
|
2012-02-17 11:25:08 +01:00
|
|
|
descr="no stderr: $1"
|
|
|
|
shift
|
|
|
|
say >&3 "# expecting no stderr from previous command"
|
2014-10-15 10:35:21 +02:00
|
|
|
if test ! -s "$stderr"
|
|
|
|
then
|
2012-02-17 11:25:08 +01:00
|
|
|
rm "$stderr"
|
|
|
|
|
|
|
|
if test $test_external_has_tap -eq 0; then
|
|
|
|
test_ok_ "$descr"
|
|
|
|
else
|
|
|
|
say_color "" "# test_external_without_stderr test $descr was ok"
|
|
|
|
test_success=$(($test_success + 1))
|
|
|
|
fi
|
|
|
|
else
|
2014-10-15 10:35:21 +02:00
|
|
|
if test "$verbose" = t
|
|
|
|
then
|
|
|
|
output=$(echo; echo "# Stderr is:"; cat "$stderr")
|
2012-02-17 11:25:08 +01:00
|
|
|
else
|
|
|
|
output=
|
|
|
|
fi
|
|
|
|
# rm first in case test_failure exits.
|
|
|
|
rm "$stderr"
|
|
|
|
if test $test_external_has_tap -eq 0; then
|
|
|
|
test_failure_ "$descr" "$@" "$output"
|
|
|
|
else
|
|
|
|
say_color error "# test_external_without_stderr test $descr failed: $@: $output"
|
|
|
|
test_failure=$(($test_failure + 1))
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# debugging-friendly alternatives to "test [-f|-d|-e]"
|
|
|
|
# The commands test the existence or non-existence of $1. $2 can be
|
|
|
|
# given to provide a more precise diagnosis.
|
|
|
|
test_path_is_file () {
|
2014-10-15 10:35:21 +02:00
|
|
|
if ! test -f "$1"
|
2012-02-17 11:25:08 +01:00
|
|
|
then
|
2015-04-16 16:12:07 +02:00
|
|
|
echo "File $1 doesn't exist. $2"
|
2012-02-17 11:25:08 +01:00
|
|
|
false
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
test_path_is_dir () {
|
2014-10-15 10:35:21 +02:00
|
|
|
if ! test -d "$1"
|
2012-02-17 11:25:08 +01:00
|
|
|
then
|
2015-04-16 16:12:07 +02:00
|
|
|
echo "Directory $1 doesn't exist. $2"
|
2012-02-17 11:25:08 +01:00
|
|
|
false
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2014-06-19 22:12:23 +02:00
|
|
|
# Check if the directory exists and is empty as expected, barf otherwise.
|
|
|
|
test_dir_is_empty () {
|
|
|
|
test_path_is_dir "$1" &&
|
|
|
|
if test -n "$(ls -a1 "$1" | egrep -v '^\.\.?$')"
|
|
|
|
then
|
|
|
|
echo "Directory '$1' is not empty, it contains:"
|
|
|
|
ls -la "$1"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
test_path_is_missing () {
|
2014-10-15 10:35:21 +02:00
|
|
|
if test -e "$1"
|
2012-02-17 11:25:08 +01:00
|
|
|
then
|
|
|
|
echo "Path exists:"
|
|
|
|
ls -ld "$1"
|
2014-10-15 10:35:21 +02:00
|
|
|
if test $# -ge 1
|
|
|
|
then
|
2012-02-17 11:25:08 +01:00
|
|
|
echo "$*"
|
|
|
|
fi
|
|
|
|
false
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# test_line_count checks that a file has the number of lines it
|
|
|
|
# ought to. For example:
|
|
|
|
#
|
|
|
|
# test_expect_success 'produce exactly one line of output' '
|
|
|
|
# do something >output &&
|
|
|
|
# test_line_count = 1 output
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# is like "test $(wc -l <output) = 1" except that it passes the
|
|
|
|
# output through when the number of lines is wrong.
|
|
|
|
|
|
|
|
test_line_count () {
|
|
|
|
if test $# != 3
|
|
|
|
then
|
|
|
|
error "bug in the test script: not 3 parameters to test_line_count"
|
|
|
|
elif ! test $(wc -l <"$3") "$1" "$2"
|
|
|
|
then
|
|
|
|
echo "test_line_count: line count for $3 !$1 $2"
|
|
|
|
cat "$3"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2015-11-27 10:15:13 +01:00
|
|
|
# Returns success if a comma separated string of keywords ($1) contains a
|
|
|
|
# given keyword ($2).
|
|
|
|
# Examples:
|
|
|
|
# `list_contains "foo,bar" bar` returns 0
|
|
|
|
# `list_contains "foo" bar` returns 1
|
|
|
|
|
|
|
|
list_contains () {
|
|
|
|
case ",$1," in
|
|
|
|
*,$2,*)
|
|
|
|
return 0
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
# This is not among top-level (test_expect_success | test_expect_failure)
|
|
|
|
# but is a prefix that can be used in the test script, like:
|
|
|
|
#
|
|
|
|
# test_expect_success 'complain and die' '
|
|
|
|
# do something &&
|
|
|
|
# do something else &&
|
|
|
|
# test_must_fail git checkout ../outerspace
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# Writing this as "! git checkout ../outerspace" is wrong, because
|
|
|
|
# the failure could be due to a segv. We want a controlled failure.
|
|
|
|
|
|
|
|
test_must_fail () {
|
2015-11-27 10:15:13 +01:00
|
|
|
case "$1" in
|
|
|
|
ok=*)
|
|
|
|
_test_ok=${1#ok=}
|
|
|
|
shift
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
_test_ok=
|
|
|
|
;;
|
|
|
|
esac
|
2012-02-17 11:25:08 +01:00
|
|
|
"$@"
|
|
|
|
exit_code=$?
|
2015-11-27 10:15:13 +01:00
|
|
|
if test $exit_code -eq 0 && ! list_contains "$_test_ok" success
|
|
|
|
then
|
2012-02-17 11:25:08 +01:00
|
|
|
echo >&2 "test_must_fail: command succeeded: $*"
|
|
|
|
return 1
|
2016-06-24 21:45:04 +02:00
|
|
|
elif test_match_signal 13 $exit_code && list_contains "$_test_ok" sigpipe
|
2015-11-27 10:15:14 +01:00
|
|
|
then
|
|
|
|
return 0
|
2015-11-27 10:15:13 +01:00
|
|
|
elif test $exit_code -gt 129 && test $exit_code -le 192
|
|
|
|
then
|
2016-02-24 08:45:49 +01:00
|
|
|
echo >&2 "test_must_fail: died by signal $(($exit_code - 128)): $*"
|
2012-02-17 11:25:08 +01:00
|
|
|
return 1
|
2015-11-27 10:15:13 +01:00
|
|
|
elif test $exit_code -eq 127
|
|
|
|
then
|
2012-02-17 11:25:08 +01:00
|
|
|
echo >&2 "test_must_fail: command not found: $*"
|
|
|
|
return 1
|
2015-11-27 10:15:13 +01:00
|
|
|
elif test $exit_code -eq 126
|
|
|
|
then
|
2013-03-31 10:37:25 +02:00
|
|
|
echo >&2 "test_must_fail: valgrind error: $*"
|
|
|
|
return 1
|
2012-02-17 11:25:08 +01:00
|
|
|
fi
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
# Similar to test_must_fail, but tolerates success, too. This is
|
|
|
|
# meant to be used in contexts like:
|
|
|
|
#
|
|
|
|
# test_expect_success 'some command works without configuration' '
|
|
|
|
# test_might_fail git config --unset all.configuration &&
|
|
|
|
# do something
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# Writing "git config --unset all.configuration || :" would be wrong,
|
|
|
|
# because we want to notice if it fails due to segv.
|
|
|
|
|
|
|
|
test_might_fail () {
|
2015-11-27 10:15:13 +01:00
|
|
|
test_must_fail ok=success "$@"
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
# Similar to test_must_fail and test_might_fail, but check that a
|
|
|
|
# given command exited with a given exit code. Meant to be used as:
|
|
|
|
#
|
|
|
|
# test_expect_success 'Merge with d/f conflicts' '
|
|
|
|
# test_expect_code 1 git merge "merge msg" B master
|
|
|
|
# '
|
|
|
|
|
|
|
|
test_expect_code () {
|
|
|
|
want_code=$1
|
|
|
|
shift
|
|
|
|
"$@"
|
|
|
|
exit_code=$?
|
|
|
|
if test $exit_code = $want_code
|
|
|
|
then
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
echo >&2 "test_expect_code: command exited with $exit_code, we wanted $want_code $*"
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
# test_cmp is a helper function to compare actual and expected output.
|
|
|
|
# You can use it like:
|
|
|
|
#
|
|
|
|
# test_expect_success 'foo works' '
|
|
|
|
# echo expected >expected &&
|
|
|
|
# foo >actual &&
|
|
|
|
# test_cmp expected actual
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# This could be written as either "cmp" or "diff -u", but:
|
|
|
|
# - cmp's output is not nearly as easy to read as diff -u
|
|
|
|
# - not all diff versions understand "-u"
|
|
|
|
|
|
|
|
test_cmp() {
|
|
|
|
$GIT_TEST_CMP "$@"
|
|
|
|
}
|
|
|
|
|
2014-06-04 17:57:52 +02:00
|
|
|
# test_cmp_bin - helper to compare binary files
|
|
|
|
|
|
|
|
test_cmp_bin() {
|
|
|
|
cmp "$@"
|
|
|
|
}
|
|
|
|
|
2014-10-10 08:11:14 +02:00
|
|
|
# Call any command "$@" but be more verbose about its
|
|
|
|
# failure. This is handy for commands like "test" which do
|
|
|
|
# not output anything when they fail.
|
|
|
|
verbose () {
|
|
|
|
"$@" && return 0
|
|
|
|
echo >&2 "command failed: $(git rev-parse --sq-quote "$@")"
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2013-06-09 20:29:20 +02:00
|
|
|
# Check if the file expected to be empty is indeed empty, and barfs
|
|
|
|
# otherwise.
|
|
|
|
|
|
|
|
test_must_be_empty () {
|
|
|
|
if test -s "$1"
|
|
|
|
then
|
|
|
|
echo "'$1' is not empty, it contains:"
|
|
|
|
cat "$1"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2012-12-21 20:10:10 +01:00
|
|
|
# Tests that its two parameters refer to the same revision
|
|
|
|
test_cmp_rev () {
|
|
|
|
git rev-parse --verify "$1" >expect.rev &&
|
|
|
|
git rev-parse --verify "$2" >actual.rev &&
|
|
|
|
test_cmp expect.rev actual.rev
|
|
|
|
}
|
|
|
|
|
2016-05-09 20:36:09 +02:00
|
|
|
# Print a sequence of integers in increasing order, either with
|
|
|
|
# two arguments (start and end):
|
2012-08-04 00:21:04 +02:00
|
|
|
#
|
2016-05-09 20:36:09 +02:00
|
|
|
# test_seq 1 5 -- outputs 1 2 3 4 5 one line at a time
|
|
|
|
#
|
|
|
|
# or with one argument (end), in which case it starts counting
|
|
|
|
# from 1.
|
2012-08-04 00:21:04 +02:00
|
|
|
|
|
|
|
test_seq () {
|
|
|
|
case $# in
|
|
|
|
1) set 1 "$@" ;;
|
|
|
|
2) ;;
|
|
|
|
*) error "bug in the test script: not 1 or 2 parameters to test_seq" ;;
|
|
|
|
esac
|
2016-05-09 21:37:01 +02:00
|
|
|
test_seq_counter__=$1
|
|
|
|
while test "$test_seq_counter__" -le "$2"
|
|
|
|
do
|
|
|
|
echo "$test_seq_counter__"
|
|
|
|
test_seq_counter__=$(( $test_seq_counter__ + 1 ))
|
|
|
|
done
|
2012-08-04 00:21:04 +02:00
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
# This function can be used to schedule some commands to be run
|
|
|
|
# unconditionally at the end of the test to restore sanity:
|
|
|
|
#
|
|
|
|
# test_expect_success 'test core.capslock' '
|
|
|
|
# git config core.capslock true &&
|
|
|
|
# test_when_finished "git config --unset core.capslock" &&
|
|
|
|
# hello world
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# That would be roughly equivalent to
|
|
|
|
#
|
|
|
|
# test_expect_success 'test core.capslock' '
|
|
|
|
# git config core.capslock true &&
|
|
|
|
# hello world
|
|
|
|
# git config --unset core.capslock
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# except that the greeting and config --unset must both succeed for
|
|
|
|
# the test to pass.
|
|
|
|
#
|
|
|
|
# Note that under --immediate mode, no clean-up is done to help diagnose
|
|
|
|
# what went wrong.
|
|
|
|
|
|
|
|
test_when_finished () {
|
2015-09-05 15:12:49 +02:00
|
|
|
# We cannot detect when we are in a subshell in general, but by
|
|
|
|
# doing so on Bash is better than nothing (the test will
|
|
|
|
# silently pass on other shells).
|
|
|
|
test "${BASH_SUBSHELL-0}" = 0 ||
|
|
|
|
error "bug in test script: test_when_finished does nothing in a subshell"
|
2012-02-17 11:25:08 +01:00
|
|
|
test_cleanup="{ $*
|
|
|
|
} && (exit \"\$eval_ret\"); eval_ret=\$?; $test_cleanup"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Most tests can use the created repository, but some may need to create more.
|
|
|
|
# Usage: test_create_repo <directory>
|
|
|
|
test_create_repo () {
|
|
|
|
test "$#" = 1 ||
|
|
|
|
error "bug in the test script: not 1 parameter to test-create-repo"
|
|
|
|
repo="$1"
|
|
|
|
mkdir -p "$repo"
|
|
|
|
(
|
|
|
|
cd "$repo" || error "Cannot setup test environment"
|
|
|
|
"$GIT_EXEC_PATH/git-init" "--template=$GIT_BUILD_DIR/templates/blt/" >&3 2>&4 ||
|
|
|
|
error "cannot run git init -- have you built things yet?"
|
|
|
|
mv .git/hooks .git/hooks-disabled
|
|
|
|
) || exit
|
|
|
|
}
|
2013-06-07 22:53:27 +02:00
|
|
|
|
|
|
|
# This function helps on symlink challenged file systems when it is not
|
|
|
|
# important that the file system entry is a symbolic link.
|
|
|
|
# Use test_ln_s_add instead of "ln -s x y && git add y" to add a
|
|
|
|
# symbolic link entry y to the index.
|
|
|
|
|
|
|
|
test_ln_s_add () {
|
|
|
|
if test_have_prereq SYMLINKS
|
|
|
|
then
|
|
|
|
ln -s "$1" "$2" &&
|
|
|
|
git update-index --add "$2"
|
|
|
|
else
|
|
|
|
printf '%s' "$1" >"$2" &&
|
|
|
|
ln_s_obj=$(git hash-object -w "$2") &&
|
2015-02-23 19:14:47 +01:00
|
|
|
git update-index --add --cacheinfo 120000 $ln_s_obj "$2" &&
|
|
|
|
# pick up stat info from the file
|
|
|
|
git update-index "$2"
|
2013-06-07 22:53:27 +02:00
|
|
|
fi
|
|
|
|
}
|
2013-10-26 21:17:15 +02:00
|
|
|
|
2014-04-27 20:15:47 +02:00
|
|
|
# This function writes out its parameters, one per line
|
|
|
|
test_write_lines () {
|
|
|
|
printf "%s\n" "$@"
|
|
|
|
}
|
|
|
|
|
t: provide a perl() function which uses $PERL_PATH
Once upon a time, we assumed that calling a bare "perl" in
the test scripts was OK, because we would find the perl from
the user's PATH, and we were only asking that perl to do
basic operations that work even on old versions of perl.
Later, we found that some systems really prefer to use
$PERL_PATH even for these basic cases, because the system
perl misbehaves in some way (e.g., by handling line endings
differently). We then switched "perl" invocations to
"$PERL_PATH" to respect the user's choice.
Having to use "$PERL_PATH" is ugly and cumbersome, though.
Instead, let's provide a perl() shell function that tests
can use, which will transparently do the right thing.
Unfortunately, test writers still have to use $PERL_PATH in
certain situations, so we still need to keep the advice in
the README.
Note that this may fix test failures in t5004, t5503, t6002,
t6003, t6300, t8001, and t8002, depending on your system's
perl setup. All of these can be detected by running:
ln -s /bin/false bin-wrappers/perl
make test
which fails before this patch, and passes after.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-10-29 02:22:07 +01:00
|
|
|
perl () {
|
|
|
|
command "$PERL_PATH" "$@"
|
|
|
|
}
|
2013-11-04 23:58:01 +01:00
|
|
|
|
tests: turn on network daemon tests by default
We do not run the httpd nor git-daemon tests by default, as
they are rather heavyweight and require network access
(albeit over localhost). However, it would be nice if more
pepole ran them, for two reasons:
1. We would get more test coverage on more systems.
2. The point of the test suite is to find regressions. It
is very easy to change some of the underlying code and
break the httpd code without realizing you are even
affecting it. Running the httpd tests helps find these
problems sooner (ideally before the patches even hit
the list).
We still want to leave an "out", though, for people who really do
not want to run them. For that reason, the GIT_TEST_HTTPD and
GIT_TEST_GIT_DAEMON variables are now tri-state booleans
(true/false/auto), so you can say GIT_TEST_HTTPD=false to turn the
tests back off. To support those who want a stable single way to
disable these tests across versions of Git before and after this
change, an empty string explicitly set to these variables is also
taken as "false", so the behaviour changes only for those who:
a. did not express any preference by leaving these variables
unset. They did not test these features before, but now they
do; or
b. did express that they want to test these features by setting
GIT_TEST_FEATURE=false (or any equivalent other ways to tell
"false" to Git, e.g. "0"), which has been a valid but funny way
to say that they do want to test the feature only because we
used to interpret any non-empty string to mean "yes please
test". They no longer test that feature.
In addition, we are forgiving of common setup failures (e.g., you do
not have apache installed, or have an old version) when the
tri-state is "auto" (or unset), but report an error when it is
"true". This makes "auto" a sane default, as we should not cause
failures on setups where the tests cannot run. But it allows people
who use "true" to catch regressions in their system (e.g., they
uninstalled apache, but were expecting their automated test runs to
test git-httpd, and would want to be notified).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-02-10 22:29:37 +01:00
|
|
|
# Is the value one of the various ways to spell a boolean true/false?
|
|
|
|
test_normalize_bool () {
|
|
|
|
git -c magic.variable="$1" config --bool magic.variable 2>/dev/null
|
|
|
|
}
|
|
|
|
|
|
|
|
# Given a variable $1, normalize the value of it to one of "true",
|
|
|
|
# "false", or "auto" and store the result to it.
|
|
|
|
#
|
|
|
|
# test_tristate GIT_TEST_HTTPD
|
|
|
|
#
|
|
|
|
# A variable set to an empty string is set to 'false'.
|
|
|
|
# A variable set to 'false' or 'auto' keeps its value.
|
|
|
|
# Anything else is set to 'true'.
|
|
|
|
# An unset variable defaults to 'auto'.
|
|
|
|
#
|
|
|
|
# The last rule is to allow people to set the variable to an empty
|
|
|
|
# string and export it to decline testing the particular feature
|
|
|
|
# for versions both before and after this change. We used to treat
|
|
|
|
# both unset and empty variable as a signal for "do not test" and
|
|
|
|
# took any non-empty string as "please test".
|
|
|
|
|
|
|
|
test_tristate () {
|
|
|
|
if eval "test x\"\${$1+isset}\" = xisset"
|
|
|
|
then
|
|
|
|
# explicitly set
|
|
|
|
eval "
|
|
|
|
case \"\$$1\" in
|
|
|
|
'') $1=false ;;
|
|
|
|
auto) ;;
|
|
|
|
*) $1=\$(test_normalize_bool \$$1 || echo true) ;;
|
|
|
|
esac
|
|
|
|
"
|
|
|
|
else
|
|
|
|
eval "$1=auto"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Exit the test suite, either by skipping all remaining tests or by
|
|
|
|
# exiting with an error. If "$1" is "auto", we then we assume we were
|
|
|
|
# opportunistically trying to set up some tests and we skip. If it is
|
|
|
|
# "true", then we report a failure.
|
|
|
|
#
|
|
|
|
# The error/skip message should be given by $2.
|
|
|
|
#
|
|
|
|
test_skip_or_die () {
|
|
|
|
case "$1" in
|
|
|
|
auto)
|
|
|
|
skip_all=$2
|
|
|
|
test_done
|
|
|
|
;;
|
|
|
|
true)
|
|
|
|
error "$2"
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
error "BUG: test tristate is '$1' (real error: $2)"
|
|
|
|
esac
|
|
|
|
}
|
|
|
|
|
2013-10-26 21:17:15 +02:00
|
|
|
# The following mingw_* functions obey POSIX shell syntax, but are actually
|
|
|
|
# bash scripts, and are meant to be used only with bash on Windows.
|
|
|
|
|
|
|
|
# A test_cmp function that treats LF and CRLF equal and avoids to fork
|
|
|
|
# diff when possible.
|
|
|
|
mingw_test_cmp () {
|
|
|
|
# Read text into shell variables and compare them. If the results
|
|
|
|
# are different, use regular diff to report the difference.
|
|
|
|
local test_cmp_a= test_cmp_b=
|
|
|
|
|
|
|
|
# When text came from stdin (one argument is '-') we must feed it
|
|
|
|
# to diff.
|
|
|
|
local stdin_for_diff=
|
|
|
|
|
|
|
|
# Since it is difficult to detect the difference between an
|
|
|
|
# empty input file and a failure to read the files, we go straight
|
|
|
|
# to diff if one of the inputs is empty.
|
|
|
|
if test -s "$1" && test -s "$2"
|
|
|
|
then
|
|
|
|
# regular case: both files non-empty
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_a <"$1"
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_b <"$2"
|
|
|
|
elif test -s "$1" && test "$2" = -
|
|
|
|
then
|
|
|
|
# read 2nd file from stdin
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_a <"$1"
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_b
|
|
|
|
stdin_for_diff='<<<"$test_cmp_b"'
|
|
|
|
elif test "$1" = - && test -s "$2"
|
|
|
|
then
|
|
|
|
# read 1st file from stdin
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_a
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_b <"$2"
|
|
|
|
stdin_for_diff='<<<"$test_cmp_a"'
|
|
|
|
fi
|
|
|
|
test -n "$test_cmp_a" &&
|
|
|
|
test -n "$test_cmp_b" &&
|
|
|
|
test "$test_cmp_a" = "$test_cmp_b" ||
|
|
|
|
eval "diff -u \"\$@\" $stdin_for_diff"
|
|
|
|
}
|
|
|
|
|
|
|
|
# $1 is the name of the shell variable to fill in
|
|
|
|
mingw_read_file_strip_cr_ () {
|
|
|
|
# Read line-wise using LF as the line separator
|
|
|
|
# and use IFS to strip CR.
|
|
|
|
local line
|
|
|
|
while :
|
|
|
|
do
|
|
|
|
if IFS=$'\r' read -r -d $'\n' line
|
|
|
|
then
|
|
|
|
# good
|
|
|
|
line=$line$'\n'
|
|
|
|
else
|
|
|
|
# we get here at EOF, but also if the last line
|
|
|
|
# was not terminated by LF; in the latter case,
|
|
|
|
# some text was read
|
|
|
|
if test -z "$line"
|
|
|
|
then
|
|
|
|
# EOF, really
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
eval "$1=\$$1\$line"
|
|
|
|
done
|
|
|
|
}
|
2016-06-01 09:04:26 +02:00
|
|
|
|
|
|
|
# Like "env FOO=BAR some-program", but run inside a subshell, which means
|
|
|
|
# it also works for shell functions (though those functions cannot impact
|
|
|
|
# the environment outside of the test_env invocation).
|
|
|
|
test_env () {
|
|
|
|
(
|
|
|
|
while test $# -gt 0
|
|
|
|
do
|
|
|
|
case "$1" in
|
|
|
|
*=*)
|
|
|
|
eval "${1%%=*}=\${1#*=}"
|
|
|
|
eval "export ${1%%=*}"
|
|
|
|
shift
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
"$@"
|
|
|
|
exit
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
done
|
|
|
|
)
|
|
|
|
}
|
t9300: factor out portable "head -c" replacement
It is sometimes useful to be able to read exactly N bytes from a
pipe. Doing this portably turns out to be surprisingly difficult
in shell scripts.
We want a solution that:
- is portable
- never reads more than N bytes due to buffering (which
would mean those bytes are not available to the next
program to read from the same pipe)
- handles partial reads by looping until N bytes are read
(or we see EOF)
- is resilient to stray signals giving us EINTR while
trying to read (even though we don't send them, things
like SIGWINCH could cause apparently-random failures)
Some possible solutions are:
- "head -c" is not portable, and implementations may
buffer (though GNU head does not)
- "read -N" is a bash-ism, and thus not portable
- "dd bs=$n count=1" does not handle partial reads. GNU dd
has iflags=fullblock, but that is not portable
- "dd bs=1 count=$n" fixes the partial read problem (all
reads are 1-byte, so there can be no partial response).
It does make a lot of write() calls, but for our tests
that's unlikely to matter. It's fairly portable. We
already use it in our tests, and it's unlikely that
implementations would screw up any of our criteria. The
most unknown one would be signal handling.
- perl can do a sysread() loop pretty easily. On my Linux
system, at least, it seems to restart the read() call
automatically. If that turns out not to be portable,
though, it would be easy for us to handle it.
That makes the perl solution the least bad (because we
conveniently omitted "length of code" as a criterion).
It's also what t9300 is currently using, so we can just pull
the implementation from there.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 11:07:54 +02:00
|
|
|
|
2016-06-30 10:16:18 +02:00
|
|
|
# Returns true if the numeric exit code in "$2" represents the expected signal
|
|
|
|
# in "$1". Signals should be given numerically.
|
|
|
|
test_match_signal () {
|
|
|
|
if test "$2" = "$((128 + $1))"
|
|
|
|
then
|
|
|
|
# POSIX
|
|
|
|
return 0
|
|
|
|
elif test "$2" = "$((256 + $1))"
|
|
|
|
then
|
|
|
|
# ksh
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
return 1
|
|
|
|
}
|
2016-07-19 22:22:20 +02:00
|
|
|
|
t9300: factor out portable "head -c" replacement
It is sometimes useful to be able to read exactly N bytes from a
pipe. Doing this portably turns out to be surprisingly difficult
in shell scripts.
We want a solution that:
- is portable
- never reads more than N bytes due to buffering (which
would mean those bytes are not available to the next
program to read from the same pipe)
- handles partial reads by looping until N bytes are read
(or we see EOF)
- is resilient to stray signals giving us EINTR while
trying to read (even though we don't send them, things
like SIGWINCH could cause apparently-random failures)
Some possible solutions are:
- "head -c" is not portable, and implementations may
buffer (though GNU head does not)
- "read -N" is a bash-ism, and thus not portable
- "dd bs=$n count=1" does not handle partial reads. GNU dd
has iflags=fullblock, but that is not portable
- "dd bs=1 count=$n" fixes the partial read problem (all
reads are 1-byte, so there can be no partial response).
It does make a lot of write() calls, but for our tests
that's unlikely to matter. It's fairly portable. We
already use it in our tests, and it's unlikely that
implementations would screw up any of our criteria. The
most unknown one would be signal handling.
- perl can do a sysread() loop pretty easily. On my Linux
system, at least, it seems to restart the read() call
automatically. If that turns out not to be portable,
though, it would be easy for us to handle it.
That makes the perl solution the least bad (because we
conveniently omitted "length of code" as a criterion).
It's also what t9300 is currently using, so we can just pull
the implementation from there.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 11:07:54 +02:00
|
|
|
# Read up to "$1" bytes (or to EOF) from stdin and write them to stdout.
|
|
|
|
test_copy_bytes () {
|
|
|
|
perl -e '
|
|
|
|
my $len = $ARGV[1];
|
|
|
|
while ($len > 0) {
|
|
|
|
my $s;
|
|
|
|
my $nread = sysread(STDIN, $s, $len);
|
|
|
|
die "cannot read: $!" unless defined($nread);
|
2017-07-16 12:45:32 +02:00
|
|
|
last unless $nread;
|
t9300: factor out portable "head -c" replacement
It is sometimes useful to be able to read exactly N bytes from a
pipe. Doing this portably turns out to be surprisingly difficult
in shell scripts.
We want a solution that:
- is portable
- never reads more than N bytes due to buffering (which
would mean those bytes are not available to the next
program to read from the same pipe)
- handles partial reads by looping until N bytes are read
(or we see EOF)
- is resilient to stray signals giving us EINTR while
trying to read (even though we don't send them, things
like SIGWINCH could cause apparently-random failures)
Some possible solutions are:
- "head -c" is not portable, and implementations may
buffer (though GNU head does not)
- "read -N" is a bash-ism, and thus not portable
- "dd bs=$n count=1" does not handle partial reads. GNU dd
has iflags=fullblock, but that is not portable
- "dd bs=1 count=$n" fixes the partial read problem (all
reads are 1-byte, so there can be no partial response).
It does make a lot of write() calls, but for our tests
that's unlikely to matter. It's fairly portable. We
already use it in our tests, and it's unlikely that
implementations would screw up any of our criteria. The
most unknown one would be signal handling.
- perl can do a sysread() loop pretty easily. On my Linux
system, at least, it seems to restart the read() call
automatically. If that turns out not to be portable,
though, it would be easy for us to handle it.
That makes the perl solution the least bad (because we
conveniently omitted "length of code" as a criterion).
It's also what t9300 is currently using, so we can just pull
the implementation from there.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 11:07:54 +02:00
|
|
|
print $s;
|
|
|
|
$len -= $nread;
|
|
|
|
}
|
|
|
|
' - "$1"
|
|
|
|
}
|
2016-12-16 03:30:12 +01:00
|
|
|
|
|
|
|
# run "$@" inside a non-git directory
|
|
|
|
nongit () {
|
|
|
|
test -d non-repo ||
|
|
|
|
mkdir non-repo ||
|
|
|
|
return 1
|
|
|
|
|
|
|
|
(
|
|
|
|
GIT_CEILING_DIRECTORIES=$(pwd) &&
|
|
|
|
export GIT_CEILING_DIRECTORIES &&
|
|
|
|
cd non-repo &&
|
|
|
|
"$@"
|
|
|
|
)
|
|
|
|
}
|