2013-11-25 22:03:06 +01:00
|
|
|
# Library of functions shared by all tests scripts, included by
|
|
|
|
# test-lib.sh.
|
2012-02-17 11:25:08 +01:00
|
|
|
#
|
|
|
|
# Copyright (c) 2005 Junio C Hamano
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see http://www.gnu.org/licenses/ .
|
|
|
|
|
|
|
|
# The semantics of the editor variables are that of invoking
|
|
|
|
# sh -c "$EDITOR \"$@\"" files ...
|
|
|
|
#
|
|
|
|
# If our trash directory contains shell metacharacters, they will be
|
|
|
|
# interpreted if we just set $EDITOR directly, so do a little dance with
|
|
|
|
# environment variables to work around this.
|
|
|
|
#
|
|
|
|
# In particular, quoting isn't enough, as the path may contain the same quote
|
|
|
|
# that we're using.
|
|
|
|
test_set_editor () {
|
|
|
|
FAKE_EDITOR="$1"
|
|
|
|
export FAKE_EDITOR
|
|
|
|
EDITOR='"$FAKE_EDITOR"'
|
|
|
|
export EDITOR
|
|
|
|
}
|
|
|
|
|
2014-02-23 21:49:58 +01:00
|
|
|
test_set_index_version () {
|
|
|
|
GIT_INDEX_VERSION="$1"
|
|
|
|
export GIT_INDEX_VERSION
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
test_decode_color () {
|
|
|
|
awk '
|
|
|
|
function name(n) {
|
|
|
|
if (n == 0) return "RESET";
|
|
|
|
if (n == 1) return "BOLD";
|
2018-08-14 03:41:15 +02:00
|
|
|
if (n == 2) return "FAINT";
|
|
|
|
if (n == 3) return "ITALIC";
|
2017-07-13 16:58:41 +02:00
|
|
|
if (n == 7) return "REVERSE";
|
2012-02-17 11:25:08 +01:00
|
|
|
if (n == 30) return "BLACK";
|
|
|
|
if (n == 31) return "RED";
|
|
|
|
if (n == 32) return "GREEN";
|
|
|
|
if (n == 33) return "YELLOW";
|
|
|
|
if (n == 34) return "BLUE";
|
|
|
|
if (n == 35) return "MAGENTA";
|
|
|
|
if (n == 36) return "CYAN";
|
|
|
|
if (n == 37) return "WHITE";
|
|
|
|
if (n == 40) return "BLACK";
|
|
|
|
if (n == 41) return "BRED";
|
|
|
|
if (n == 42) return "BGREEN";
|
|
|
|
if (n == 43) return "BYELLOW";
|
|
|
|
if (n == 44) return "BBLUE";
|
|
|
|
if (n == 45) return "BMAGENTA";
|
|
|
|
if (n == 46) return "BCYAN";
|
|
|
|
if (n == 47) return "BWHITE";
|
|
|
|
}
|
|
|
|
{
|
|
|
|
while (match($0, /\033\[[0-9;]*m/) != 0) {
|
|
|
|
printf "%s<", substr($0, 1, RSTART-1);
|
|
|
|
codes = substr($0, RSTART+2, RLENGTH-3);
|
|
|
|
if (length(codes) == 0)
|
|
|
|
printf "%s", name(0)
|
|
|
|
else {
|
|
|
|
n = split(codes, ary, ";");
|
|
|
|
sep = "";
|
|
|
|
for (i = 1; i <= n; i++) {
|
|
|
|
printf "%s%s", sep, name(ary[i]);
|
|
|
|
sep = ";"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf ">";
|
|
|
|
$0 = substr($0, RSTART + RLENGTH, length($0) - RSTART - RLENGTH + 1);
|
|
|
|
}
|
|
|
|
print
|
|
|
|
}
|
|
|
|
'
|
|
|
|
}
|
|
|
|
|
2016-08-11 16:46:01 +02:00
|
|
|
lf_to_nul () {
|
|
|
|
perl -pe 'y/\012/\000/'
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
nul_to_q () {
|
2013-10-29 02:23:03 +01:00
|
|
|
perl -pe 'y/\000/Q/'
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
q_to_nul () {
|
2013-10-29 02:23:03 +01:00
|
|
|
perl -pe 'y/Q/\000/'
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
q_to_cr () {
|
|
|
|
tr Q '\015'
|
|
|
|
}
|
|
|
|
|
|
|
|
q_to_tab () {
|
|
|
|
tr Q '\011'
|
|
|
|
}
|
|
|
|
|
2013-03-22 19:10:03 +01:00
|
|
|
qz_to_tab_space () {
|
|
|
|
tr QZ '\011\040'
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
append_cr () {
|
|
|
|
sed -e 's/$/Q/' | tr Q '\015'
|
|
|
|
}
|
|
|
|
|
|
|
|
remove_cr () {
|
|
|
|
tr '\015' Q | sed -e 's/Q$//'
|
|
|
|
}
|
|
|
|
|
2019-02-09 19:59:28 +01:00
|
|
|
# Generate an output of $1 bytes of all zeroes (NULs, not ASCII zeroes).
|
|
|
|
# If $1 is 'infinity', output forever or until the receiving pipe stops reading,
|
|
|
|
# whichever comes first.
|
|
|
|
generate_zero_bytes () {
|
tests: teach the test-tool to generate NUL bytes and use it
In cc95bc2025 (t5562: replace /dev/zero with a pipe from
generate_zero_bytes, 2019-02-09), we replaced usage of /dev/zero (which
is not available on NonStop, apparently) by a Perl script snippet to
generate NUL bytes.
Sadly, it does not seem to work on NonStop, as t5562 reportedly hangs.
Worse, this also hangs in the Ubuntu 16.04 agents of the CI builds on
Azure Pipelines: for some reason, the Perl script snippet that is run
via `generate_zero_bytes` in t5562's 'CONTENT_LENGTH overflow ssite_t'
test case tries to write out an infinite amount of NUL bytes unless a
broken pipe is encountered, that snippet never encounters the broken
pipe, and keeps going until the build times out.
Oddly enough, this does not reproduce on the Windows and macOS agents,
nor in a local Ubuntu 18.04.
This developer tried for a day to figure out the exact circumstances
under which this hang happens, to no avail, the details remain a
mystery.
In the end, though, what counts is that this here change incidentally
fixes that hang (maybe also on NonStop?). Even more positively, it gets
rid of yet another unnecessary Perl invocation.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-02-14 22:33:12 +01:00
|
|
|
test-tool genzeros "$@"
|
2019-02-09 19:59:28 +01:00
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
# In some bourne shell implementations, the "unset" builtin returns
|
|
|
|
# nonzero status when a variable to be unset was not set in the first
|
|
|
|
# place.
|
|
|
|
#
|
|
|
|
# Use sane_unset when that should not be considered an error.
|
|
|
|
|
|
|
|
sane_unset () {
|
|
|
|
unset "$@"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
test_tick () {
|
|
|
|
if test -z "${test_tick+set}"
|
|
|
|
then
|
|
|
|
test_tick=1112911993
|
|
|
|
else
|
|
|
|
test_tick=$(($test_tick + 60))
|
|
|
|
fi
|
|
|
|
GIT_COMMITTER_DATE="$test_tick -0700"
|
|
|
|
GIT_AUTHOR_DATE="$test_tick -0700"
|
|
|
|
export GIT_COMMITTER_DATE GIT_AUTHOR_DATE
|
|
|
|
}
|
|
|
|
|
2017-03-18 17:14:00 +01:00
|
|
|
# Stop execution and start a shell. This is useful for debugging tests.
|
2012-02-17 11:25:08 +01:00
|
|
|
#
|
|
|
|
# Be sure to remove all invocations of this command before submitting.
|
|
|
|
|
|
|
|
test_pause () {
|
2017-03-18 17:14:00 +01:00
|
|
|
"$SHELL_PATH" <&6 >&5 2>&7
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
2018-04-25 01:46:45 +02:00
|
|
|
# Wrap git with a debugger. Adding this to a command can make it easier
|
|
|
|
# to understand what is going on in a failing test.
|
2015-10-30 20:02:56 +01:00
|
|
|
#
|
2018-04-25 01:46:45 +02:00
|
|
|
# Examples:
|
|
|
|
# debug git checkout master
|
|
|
|
# debug --debugger=nemiver git $ARGS
|
|
|
|
# debug -d "valgrind --tool=memcheck --track-origins=yes" git $ARGS
|
2015-10-30 20:02:56 +01:00
|
|
|
debug () {
|
2018-04-25 01:46:45 +02:00
|
|
|
case "$1" in
|
|
|
|
-d)
|
|
|
|
GIT_DEBUGGER="$2" &&
|
|
|
|
shift 2
|
|
|
|
;;
|
|
|
|
--debugger=*)
|
|
|
|
GIT_DEBUGGER="${1#*=}" &&
|
|
|
|
shift 1
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
GIT_DEBUGGER=1
|
|
|
|
;;
|
|
|
|
esac &&
|
|
|
|
GIT_DEBUGGER="${GIT_DEBUGGER}" "$@" <&6 >&5 2>&7
|
2015-10-30 20:02:56 +01:00
|
|
|
}
|
|
|
|
|
2016-12-08 22:03:26 +01:00
|
|
|
# Call test_commit with the arguments
|
|
|
|
# [-C <directory>] <message> [<file> [<contents> [<tag>]]]"
|
2012-02-17 11:25:08 +01:00
|
|
|
#
|
|
|
|
# This will commit a file with the given contents and the given commit
|
2013-02-12 11:17:30 +01:00
|
|
|
# message, and tag the resulting commit with the given tag name.
|
2012-02-17 11:25:08 +01:00
|
|
|
#
|
2013-02-12 11:17:30 +01:00
|
|
|
# <file>, <contents>, and <tag> all default to <message>.
|
2016-12-08 22:03:26 +01:00
|
|
|
#
|
|
|
|
# If the first argument is "-C", the second argument is used as a path for
|
|
|
|
# the git invocations.
|
2012-02-17 11:25:08 +01:00
|
|
|
|
|
|
|
test_commit () {
|
2012-07-22 21:54:08 +02:00
|
|
|
notick= &&
|
2012-09-14 08:52:03 +02:00
|
|
|
signoff= &&
|
2016-12-08 22:03:26 +01:00
|
|
|
indir= &&
|
2012-09-14 08:52:03 +02:00
|
|
|
while test $# != 0
|
|
|
|
do
|
|
|
|
case "$1" in
|
|
|
|
--notick)
|
|
|
|
notick=yes
|
|
|
|
;;
|
|
|
|
--signoff)
|
|
|
|
signoff="$1"
|
|
|
|
;;
|
2016-12-08 22:03:26 +01:00
|
|
|
-C)
|
|
|
|
indir="$2"
|
|
|
|
shift
|
|
|
|
;;
|
2012-09-14 08:52:03 +02:00
|
|
|
*)
|
|
|
|
break
|
|
|
|
;;
|
|
|
|
esac
|
2012-07-22 21:54:08 +02:00
|
|
|
shift
|
2012-09-14 08:52:03 +02:00
|
|
|
done &&
|
2016-12-08 22:03:26 +01:00
|
|
|
indir=${indir:+"$indir"/} &&
|
2012-07-22 21:54:08 +02:00
|
|
|
file=${2:-"$1.t"} &&
|
2016-12-08 22:03:26 +01:00
|
|
|
echo "${3-$1}" > "$indir$file" &&
|
|
|
|
git ${indir:+ -C "$indir"} add "$file" &&
|
2012-07-22 21:54:08 +02:00
|
|
|
if test -z "$notick"
|
|
|
|
then
|
|
|
|
test_tick
|
|
|
|
fi &&
|
2016-12-08 22:03:26 +01:00
|
|
|
git ${indir:+ -C "$indir"} commit $signoff -m "$1" &&
|
|
|
|
git ${indir:+ -C "$indir"} tag "${4:-$1}"
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
# Call test_merge with the arguments "<message> <commit>", where <commit>
|
|
|
|
# can be a tag pointing to the commit-to-merge.
|
|
|
|
|
|
|
|
test_merge () {
|
|
|
|
test_tick &&
|
|
|
|
git merge -m "$1" "$2" &&
|
|
|
|
git tag "$1"
|
|
|
|
}
|
|
|
|
|
|
|
|
# This function helps systems where core.filemode=false is set.
|
|
|
|
# Use it instead of plain 'chmod +x' to set or unset the executable bit
|
|
|
|
# of a file in the working directory and add it to the index.
|
|
|
|
|
|
|
|
test_chmod () {
|
|
|
|
chmod "$@" &&
|
|
|
|
git update-index --add "--chmod=$@"
|
|
|
|
}
|
|
|
|
|
2017-06-25 06:34:28 +02:00
|
|
|
# Get the modebits from a file.
|
|
|
|
test_modebits () {
|
|
|
|
ls -l "$1" | sed -e 's|^\(..........\).*|\1|'
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
# Unset a configuration variable, but don't fail if it doesn't exist.
|
|
|
|
test_unconfig () {
|
2015-09-05 15:12:47 +02:00
|
|
|
config_dir=
|
|
|
|
if test "$1" = -C
|
|
|
|
then
|
|
|
|
shift
|
|
|
|
config_dir=$1
|
|
|
|
shift
|
|
|
|
fi
|
|
|
|
git ${config_dir:+-C "$config_dir"} config --unset-all "$@"
|
2012-02-17 11:25:08 +01:00
|
|
|
config_status=$?
|
|
|
|
case "$config_status" in
|
|
|
|
5) # ok, nothing to unset
|
|
|
|
config_status=0
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
return $config_status
|
|
|
|
}
|
|
|
|
|
|
|
|
# Set git config, automatically unsetting it after the test is over.
|
|
|
|
test_config () {
|
2015-09-05 15:12:47 +02:00
|
|
|
config_dir=
|
|
|
|
if test "$1" = -C
|
|
|
|
then
|
|
|
|
shift
|
|
|
|
config_dir=$1
|
|
|
|
shift
|
|
|
|
fi
|
|
|
|
test_when_finished "test_unconfig ${config_dir:+-C '$config_dir'} '$1'" &&
|
|
|
|
git ${config_dir:+-C "$config_dir"} config "$@"
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
test_config_global () {
|
|
|
|
test_when_finished "test_unconfig --global '$1'" &&
|
|
|
|
git config --global "$@"
|
|
|
|
}
|
|
|
|
|
|
|
|
write_script () {
|
|
|
|
{
|
|
|
|
echo "#!${2-"$SHELL_PATH"}" &&
|
|
|
|
cat
|
|
|
|
} >"$1" &&
|
|
|
|
chmod +x "$1"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Use test_set_prereq to tell that a particular prerequisite is available.
|
|
|
|
# The prerequisite can later be checked for in two ways:
|
|
|
|
#
|
|
|
|
# - Explicitly using test_have_prereq.
|
|
|
|
#
|
|
|
|
# - Implicitly by specifying the prerequisite tag in the calls to
|
|
|
|
# test_expect_{success,failure,code}.
|
|
|
|
#
|
|
|
|
# The single parameter is the prerequisite tag (a simple word, in all
|
|
|
|
# capital letters by convention).
|
|
|
|
|
2018-04-29 00:33:36 +02:00
|
|
|
test_unset_prereq () {
|
|
|
|
! test_have_prereq "$1" ||
|
|
|
|
satisfied_prereq="${satisfied_prereq% $1 *} ${satisfied_prereq#* $1 }"
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
test_set_prereq () {
|
2018-04-29 00:33:36 +02:00
|
|
|
case "$1" in
|
|
|
|
!*)
|
|
|
|
test_unset_prereq "${1#!}"
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
satisfied_prereq="$satisfied_prereq$1 "
|
|
|
|
;;
|
|
|
|
esac
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
2012-07-26 22:57:56 +02:00
|
|
|
satisfied_prereq=" "
|
2012-07-27 00:50:45 +02:00
|
|
|
lazily_testable_prereq= lazily_tested_prereq=
|
|
|
|
|
|
|
|
# Usage: test_lazy_prereq PREREQ 'script'
|
|
|
|
test_lazy_prereq () {
|
|
|
|
lazily_testable_prereq="$lazily_testable_prereq$1 "
|
|
|
|
eval test_prereq_lazily_$1=\$2
|
|
|
|
}
|
|
|
|
|
|
|
|
test_run_lazy_prereq_ () {
|
|
|
|
script='
|
|
|
|
mkdir -p "$TRASH_DIRECTORY/prereq-test-dir" &&
|
|
|
|
(
|
|
|
|
cd "$TRASH_DIRECTORY/prereq-test-dir" &&'"$2"'
|
|
|
|
)'
|
|
|
|
say >&3 "checking prerequisite: $1"
|
|
|
|
say >&3 "$script"
|
|
|
|
test_eval_ "$script"
|
|
|
|
eval_ret=$?
|
|
|
|
rm -rf "$TRASH_DIRECTORY/prereq-test-dir"
|
|
|
|
if test "$eval_ret" = 0; then
|
|
|
|
say >&3 "prerequisite $1 ok"
|
|
|
|
else
|
|
|
|
say >&3 "prerequisite $1 not satisfied"
|
|
|
|
fi
|
|
|
|
return $eval_ret
|
|
|
|
}
|
2012-02-17 11:25:08 +01:00
|
|
|
|
|
|
|
test_have_prereq () {
|
|
|
|
# prerequisites can be concatenated with ','
|
|
|
|
save_IFS=$IFS
|
|
|
|
IFS=,
|
|
|
|
set -- $*
|
|
|
|
IFS=$save_IFS
|
|
|
|
|
|
|
|
total_prereq=0
|
|
|
|
ok_prereq=0
|
|
|
|
missing_prereq=
|
|
|
|
|
|
|
|
for prerequisite
|
|
|
|
do
|
2012-11-15 01:33:25 +01:00
|
|
|
case "$prerequisite" in
|
|
|
|
!*)
|
|
|
|
negative_prereq=t
|
|
|
|
prerequisite=${prerequisite#!}
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
negative_prereq=
|
|
|
|
esac
|
|
|
|
|
2012-07-27 00:50:45 +02:00
|
|
|
case " $lazily_tested_prereq " in
|
|
|
|
*" $prerequisite "*)
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
case " $lazily_testable_prereq " in
|
|
|
|
*" $prerequisite "*)
|
|
|
|
eval "script=\$test_prereq_lazily_$prerequisite" &&
|
|
|
|
if test_run_lazy_prereq_ "$prerequisite" "$script"
|
|
|
|
then
|
|
|
|
test_set_prereq $prerequisite
|
|
|
|
fi
|
|
|
|
lazily_tested_prereq="$lazily_tested_prereq$prerequisite "
|
|
|
|
esac
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
total_prereq=$(($total_prereq + 1))
|
2012-07-26 22:57:56 +02:00
|
|
|
case "$satisfied_prereq" in
|
2012-02-17 11:25:08 +01:00
|
|
|
*" $prerequisite "*)
|
2012-11-15 01:33:25 +01:00
|
|
|
satisfied_this_prereq=t
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
satisfied_this_prereq=
|
|
|
|
esac
|
|
|
|
|
|
|
|
case "$satisfied_this_prereq,$negative_prereq" in
|
|
|
|
t,|,t)
|
2012-02-17 11:25:08 +01:00
|
|
|
ok_prereq=$(($ok_prereq + 1))
|
|
|
|
;;
|
|
|
|
*)
|
2012-11-15 01:33:25 +01:00
|
|
|
# Keep a list of missing prerequisites; restore
|
|
|
|
# the negative marker if necessary.
|
|
|
|
prerequisite=${negative_prereq:+!}$prerequisite
|
2012-02-17 11:25:08 +01:00
|
|
|
if test -z "$missing_prereq"
|
|
|
|
then
|
|
|
|
missing_prereq=$prerequisite
|
|
|
|
else
|
|
|
|
missing_prereq="$prerequisite,$missing_prereq"
|
|
|
|
fi
|
|
|
|
esac
|
|
|
|
done
|
|
|
|
|
|
|
|
test $total_prereq = $ok_prereq
|
|
|
|
}
|
|
|
|
|
|
|
|
test_declared_prereq () {
|
|
|
|
case ",$test_prereq," in
|
|
|
|
*,$1,*)
|
|
|
|
return 0
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-04-27 00:18:45 +02:00
|
|
|
test_verify_prereq () {
|
|
|
|
test -z "$test_prereq" ||
|
|
|
|
expr >/dev/null "$test_prereq" : '[A-Z0-9_,!]*$' ||
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "'$test_prereq' does not look like a prereq"
|
2015-04-27 00:18:45 +02:00
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
test_expect_failure () {
|
2013-06-18 14:25:59 +02:00
|
|
|
test_start_
|
2012-02-17 11:25:08 +01:00
|
|
|
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
|
|
|
|
test "$#" = 2 ||
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "not 2 or 3 parameters to test-expect-failure"
|
2015-04-27 00:18:45 +02:00
|
|
|
test_verify_prereq
|
2012-02-17 11:25:08 +01:00
|
|
|
export test_prereq
|
|
|
|
if ! test_skip "$@"
|
|
|
|
then
|
|
|
|
say >&3 "checking known breakage: $2"
|
|
|
|
if test_run_ "$2" expecting_failure
|
|
|
|
then
|
|
|
|
test_known_broken_ok_ "$1"
|
|
|
|
else
|
|
|
|
test_known_broken_failure_ "$1"
|
|
|
|
fi
|
|
|
|
fi
|
2013-06-18 14:25:59 +02:00
|
|
|
test_finish_
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
test_expect_success () {
|
2013-06-18 14:25:59 +02:00
|
|
|
test_start_
|
2012-02-17 11:25:08 +01:00
|
|
|
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
|
|
|
|
test "$#" = 2 ||
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "not 2 or 3 parameters to test-expect-success"
|
2015-04-27 00:18:45 +02:00
|
|
|
test_verify_prereq
|
2012-02-17 11:25:08 +01:00
|
|
|
export test_prereq
|
|
|
|
if ! test_skip "$@"
|
|
|
|
then
|
|
|
|
say >&3 "expecting success: $2"
|
|
|
|
if test_run_ "$2"
|
|
|
|
then
|
|
|
|
test_ok_ "$1"
|
|
|
|
else
|
|
|
|
test_failure_ "$@"
|
|
|
|
fi
|
|
|
|
fi
|
2013-06-18 14:25:59 +02:00
|
|
|
test_finish_
|
2012-02-17 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
# test_external runs external test scripts that provide continuous
|
|
|
|
# test output about their progress, and succeeds/fails on
|
|
|
|
# zero/non-zero exit code. It outputs the test output on stdout even
|
|
|
|
# in non-verbose mode, and announces the external script with "# run
|
|
|
|
# <n>: ..." before running it. When providing relative paths, keep in
|
|
|
|
# mind that all scripts run in "trash directory".
|
|
|
|
# Usage: test_external description command arguments...
|
|
|
|
# Example: test_external 'Perl API' perl ../path/to/test.pl
|
|
|
|
test_external () {
|
|
|
|
test "$#" = 4 && { test_prereq=$1; shift; } || test_prereq=
|
|
|
|
test "$#" = 3 ||
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "not 3 or 4 parameters to test_external"
|
2012-02-17 11:25:08 +01:00
|
|
|
descr="$1"
|
|
|
|
shift
|
2015-04-27 00:18:45 +02:00
|
|
|
test_verify_prereq
|
2012-02-17 11:25:08 +01:00
|
|
|
export test_prereq
|
|
|
|
if ! test_skip "$descr" "$@"
|
|
|
|
then
|
|
|
|
# Announce the script to reduce confusion about the
|
|
|
|
# test output that follows.
|
|
|
|
say_color "" "# run $test_count: $descr ($*)"
|
|
|
|
# Export TEST_DIRECTORY, TRASH_DIRECTORY and GIT_TEST_LONG
|
|
|
|
# to be able to use them in script
|
|
|
|
export TEST_DIRECTORY TRASH_DIRECTORY GIT_TEST_LONG
|
|
|
|
# Run command; redirect its stderr to &4 as in
|
|
|
|
# test_run_, but keep its stdout on our stdout even in
|
|
|
|
# non-verbose mode.
|
|
|
|
"$@" 2>&4
|
2014-10-15 10:35:21 +02:00
|
|
|
if test "$?" = 0
|
2012-02-17 11:25:08 +01:00
|
|
|
then
|
|
|
|
if test $test_external_has_tap -eq 0; then
|
|
|
|
test_ok_ "$descr"
|
|
|
|
else
|
|
|
|
say_color "" "# test_external test $descr was ok"
|
|
|
|
test_success=$(($test_success + 1))
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
if test $test_external_has_tap -eq 0; then
|
|
|
|
test_failure_ "$descr" "$@"
|
|
|
|
else
|
|
|
|
say_color error "# test_external test $descr failed: $@"
|
|
|
|
test_failure=$(($test_failure + 1))
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Like test_external, but in addition tests that the command generated
|
|
|
|
# no output on stderr.
|
|
|
|
test_external_without_stderr () {
|
|
|
|
# The temporary file has no (and must have no) security
|
|
|
|
# implications.
|
|
|
|
tmp=${TMPDIR:-/tmp}
|
|
|
|
stderr="$tmp/git-external-stderr.$$.tmp"
|
|
|
|
test_external "$@" 4> "$stderr"
|
2014-10-15 10:35:21 +02:00
|
|
|
test -f "$stderr" || error "Internal error: $stderr disappeared."
|
2012-02-17 11:25:08 +01:00
|
|
|
descr="no stderr: $1"
|
|
|
|
shift
|
|
|
|
say >&3 "# expecting no stderr from previous command"
|
2014-10-15 10:35:21 +02:00
|
|
|
if test ! -s "$stderr"
|
|
|
|
then
|
2012-02-17 11:25:08 +01:00
|
|
|
rm "$stderr"
|
|
|
|
|
|
|
|
if test $test_external_has_tap -eq 0; then
|
|
|
|
test_ok_ "$descr"
|
|
|
|
else
|
|
|
|
say_color "" "# test_external_without_stderr test $descr was ok"
|
|
|
|
test_success=$(($test_success + 1))
|
|
|
|
fi
|
|
|
|
else
|
2014-10-15 10:35:21 +02:00
|
|
|
if test "$verbose" = t
|
|
|
|
then
|
|
|
|
output=$(echo; echo "# Stderr is:"; cat "$stderr")
|
2012-02-17 11:25:08 +01:00
|
|
|
else
|
|
|
|
output=
|
|
|
|
fi
|
|
|
|
# rm first in case test_failure exits.
|
|
|
|
rm "$stderr"
|
|
|
|
if test $test_external_has_tap -eq 0; then
|
|
|
|
test_failure_ "$descr" "$@" "$output"
|
|
|
|
else
|
|
|
|
say_color error "# test_external_without_stderr test $descr failed: $@: $output"
|
|
|
|
test_failure=$(($test_failure + 1))
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# debugging-friendly alternatives to "test [-f|-d|-e]"
|
|
|
|
# The commands test the existence or non-existence of $1. $2 can be
|
|
|
|
# given to provide a more precise diagnosis.
|
|
|
|
test_path_is_file () {
|
2014-10-15 10:35:21 +02:00
|
|
|
if ! test -f "$1"
|
2012-02-17 11:25:08 +01:00
|
|
|
then
|
2015-04-16 16:12:07 +02:00
|
|
|
echo "File $1 doesn't exist. $2"
|
2012-02-17 11:25:08 +01:00
|
|
|
false
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
test_path_is_dir () {
|
2014-10-15 10:35:21 +02:00
|
|
|
if ! test -d "$1"
|
2012-02-17 11:25:08 +01:00
|
|
|
then
|
2015-04-16 16:12:07 +02:00
|
|
|
echo "Directory $1 doesn't exist. $2"
|
2012-02-17 11:25:08 +01:00
|
|
|
false
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2018-08-08 18:31:06 +02:00
|
|
|
test_path_exists () {
|
|
|
|
if ! test -e "$1"
|
|
|
|
then
|
|
|
|
echo "Path $1 doesn't exist. $2"
|
|
|
|
false
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2014-06-19 22:12:23 +02:00
|
|
|
# Check if the directory exists and is empty as expected, barf otherwise.
|
|
|
|
test_dir_is_empty () {
|
|
|
|
test_path_is_dir "$1" &&
|
|
|
|
if test -n "$(ls -a1 "$1" | egrep -v '^\.\.?$')"
|
|
|
|
then
|
|
|
|
echo "Directory '$1' is not empty, it contains:"
|
|
|
|
ls -la "$1"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2019-03-04 13:07:59 +01:00
|
|
|
# Check if the file exists and has a size greater than zero
|
|
|
|
test_file_not_empty () {
|
|
|
|
if ! test -s "$1"
|
|
|
|
then
|
|
|
|
echo "'$1' is not a non-empty file."
|
|
|
|
false
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
test_path_is_missing () {
|
2014-10-15 10:35:21 +02:00
|
|
|
if test -e "$1"
|
2012-02-17 11:25:08 +01:00
|
|
|
then
|
|
|
|
echo "Path exists:"
|
|
|
|
ls -ld "$1"
|
2014-10-15 10:35:21 +02:00
|
|
|
if test $# -ge 1
|
|
|
|
then
|
2012-02-17 11:25:08 +01:00
|
|
|
echo "$*"
|
|
|
|
fi
|
|
|
|
false
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# test_line_count checks that a file has the number of lines it
|
|
|
|
# ought to. For example:
|
|
|
|
#
|
|
|
|
# test_expect_success 'produce exactly one line of output' '
|
|
|
|
# do something >output &&
|
|
|
|
# test_line_count = 1 output
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# is like "test $(wc -l <output) = 1" except that it passes the
|
|
|
|
# output through when the number of lines is wrong.
|
|
|
|
|
|
|
|
test_line_count () {
|
|
|
|
if test $# != 3
|
|
|
|
then
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "not 3 parameters to test_line_count"
|
2012-02-17 11:25:08 +01:00
|
|
|
elif ! test $(wc -l <"$3") "$1" "$2"
|
|
|
|
then
|
|
|
|
echo "test_line_count: line count for $3 !$1 $2"
|
|
|
|
cat "$3"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2015-11-27 10:15:13 +01:00
|
|
|
# Returns success if a comma separated string of keywords ($1) contains a
|
|
|
|
# given keyword ($2).
|
|
|
|
# Examples:
|
|
|
|
# `list_contains "foo,bar" bar` returns 0
|
|
|
|
# `list_contains "foo" bar` returns 1
|
|
|
|
|
|
|
|
list_contains () {
|
|
|
|
case ",$1," in
|
|
|
|
*,$2,*)
|
|
|
|
return 0
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
# This is not among top-level (test_expect_success | test_expect_failure)
|
|
|
|
# but is a prefix that can be used in the test script, like:
|
|
|
|
#
|
|
|
|
# test_expect_success 'complain and die' '
|
|
|
|
# do something &&
|
|
|
|
# do something else &&
|
|
|
|
# test_must_fail git checkout ../outerspace
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# Writing this as "! git checkout ../outerspace" is wrong, because
|
|
|
|
# the failure could be due to a segv. We want a controlled failure.
|
2018-02-09 03:42:33 +01:00
|
|
|
#
|
|
|
|
# Accepts the following options:
|
|
|
|
#
|
|
|
|
# ok=<signal-name>[,<...>]:
|
|
|
|
# Don't treat an exit caused by the given signal as error.
|
|
|
|
# Multiple signals can be specified as a comma separated list.
|
|
|
|
# Currently recognized signal names are: sigpipe, success.
|
|
|
|
# (Don't use 'success', use 'test_might_fail' instead.)
|
2012-02-17 11:25:08 +01:00
|
|
|
|
|
|
|
test_must_fail () {
|
2015-11-27 10:15:13 +01:00
|
|
|
case "$1" in
|
|
|
|
ok=*)
|
|
|
|
_test_ok=${1#ok=}
|
|
|
|
shift
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
_test_ok=
|
|
|
|
;;
|
|
|
|
esac
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
"$@" 2>&7
|
2012-02-17 11:25:08 +01:00
|
|
|
exit_code=$?
|
2015-11-27 10:15:13 +01:00
|
|
|
if test $exit_code -eq 0 && ! list_contains "$_test_ok" success
|
|
|
|
then
|
2018-02-22 07:48:37 +01:00
|
|
|
echo >&4 "test_must_fail: command succeeded: $*"
|
2012-02-17 11:25:08 +01:00
|
|
|
return 1
|
2016-06-24 21:45:04 +02:00
|
|
|
elif test_match_signal 13 $exit_code && list_contains "$_test_ok" sigpipe
|
2015-11-27 10:15:14 +01:00
|
|
|
then
|
|
|
|
return 0
|
2015-11-27 10:15:13 +01:00
|
|
|
elif test $exit_code -gt 129 && test $exit_code -le 192
|
|
|
|
then
|
2018-02-22 07:48:37 +01:00
|
|
|
echo >&4 "test_must_fail: died by signal $(($exit_code - 128)): $*"
|
2012-02-17 11:25:08 +01:00
|
|
|
return 1
|
2015-11-27 10:15:13 +01:00
|
|
|
elif test $exit_code -eq 127
|
|
|
|
then
|
2018-02-22 07:48:37 +01:00
|
|
|
echo >&4 "test_must_fail: command not found: $*"
|
2012-02-17 11:25:08 +01:00
|
|
|
return 1
|
2015-11-27 10:15:13 +01:00
|
|
|
elif test $exit_code -eq 126
|
|
|
|
then
|
2018-02-22 07:48:37 +01:00
|
|
|
echo >&4 "test_must_fail: valgrind error: $*"
|
2013-03-31 10:37:25 +02:00
|
|
|
return 1
|
2012-02-17 11:25:08 +01:00
|
|
|
fi
|
|
|
|
return 0
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
} 7>&2 2>&4
|
2012-02-17 11:25:08 +01:00
|
|
|
|
|
|
|
# Similar to test_must_fail, but tolerates success, too. This is
|
|
|
|
# meant to be used in contexts like:
|
|
|
|
#
|
|
|
|
# test_expect_success 'some command works without configuration' '
|
|
|
|
# test_might_fail git config --unset all.configuration &&
|
|
|
|
# do something
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# Writing "git config --unset all.configuration || :" would be wrong,
|
|
|
|
# because we want to notice if it fails due to segv.
|
2018-02-09 03:42:33 +01:00
|
|
|
#
|
|
|
|
# Accepts the same options as test_must_fail.
|
2012-02-17 11:25:08 +01:00
|
|
|
|
|
|
|
test_might_fail () {
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
test_must_fail ok=success "$@" 2>&7
|
|
|
|
} 7>&2 2>&4
|
2012-02-17 11:25:08 +01:00
|
|
|
|
|
|
|
# Similar to test_must_fail and test_might_fail, but check that a
|
|
|
|
# given command exited with a given exit code. Meant to be used as:
|
|
|
|
#
|
|
|
|
# test_expect_success 'Merge with d/f conflicts' '
|
|
|
|
# test_expect_code 1 git merge "merge msg" B master
|
|
|
|
# '
|
|
|
|
|
|
|
|
test_expect_code () {
|
|
|
|
want_code=$1
|
|
|
|
shift
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
"$@" 2>&7
|
2012-02-17 11:25:08 +01:00
|
|
|
exit_code=$?
|
|
|
|
if test $exit_code = $want_code
|
|
|
|
then
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
2018-02-22 07:48:37 +01:00
|
|
|
echo >&4 "test_expect_code: command exited with $exit_code, we wanted $want_code $*"
|
2012-02-17 11:25:08 +01:00
|
|
|
return 1
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
} 7>&2 2>&4
|
2012-02-17 11:25:08 +01:00
|
|
|
|
|
|
|
# test_cmp is a helper function to compare actual and expected output.
|
|
|
|
# You can use it like:
|
|
|
|
#
|
|
|
|
# test_expect_success 'foo works' '
|
|
|
|
# echo expected >expected &&
|
|
|
|
# foo >actual &&
|
|
|
|
# test_cmp expected actual
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# This could be written as either "cmp" or "diff -u", but:
|
|
|
|
# - cmp's output is not nearly as easy to read as diff -u
|
|
|
|
# - not all diff versions understand "-u"
|
|
|
|
|
|
|
|
test_cmp() {
|
|
|
|
$GIT_TEST_CMP "$@"
|
|
|
|
}
|
|
|
|
|
2018-10-21 16:02:27 +02:00
|
|
|
# Check that the given config key has the expected value.
|
|
|
|
#
|
|
|
|
# test_cmp_config [-C <dir>] <expected-value>
|
|
|
|
# [<git-config-options>...] <config-key>
|
|
|
|
#
|
|
|
|
# for example to check that the value of core.bar is foo
|
|
|
|
#
|
|
|
|
# test_cmp_config foo core.bar
|
|
|
|
#
|
|
|
|
test_cmp_config() {
|
|
|
|
local GD &&
|
|
|
|
if test "$1" = "-C"
|
|
|
|
then
|
|
|
|
shift &&
|
|
|
|
GD="-C $1" &&
|
|
|
|
shift
|
|
|
|
fi &&
|
|
|
|
printf "%s\n" "$1" >expect.config &&
|
|
|
|
shift &&
|
|
|
|
git $GD config "$@" >actual.config &&
|
|
|
|
test_cmp expect.config actual.config
|
|
|
|
}
|
|
|
|
|
2014-06-04 17:57:52 +02:00
|
|
|
# test_cmp_bin - helper to compare binary files
|
|
|
|
|
|
|
|
test_cmp_bin() {
|
|
|
|
cmp "$@"
|
|
|
|
}
|
|
|
|
|
2018-02-08 16:56:54 +01:00
|
|
|
# Use this instead of test_cmp to compare files that contain expected and
|
|
|
|
# actual output from git commands that can be translated. When running
|
i18n: make GETTEXT_POISON a runtime option
Change the GETTEXT_POISON compile-time + runtime GIT_GETTEXT_POISON
test parameter to only be a GIT_TEST_GETTEXT_POISON=<non-empty?>
runtime parameter, to be consistent with other parameters documented
in "Running tests with special setups" in t/README.
When I added GETTEXT_POISON in bb946bba76 ("i18n: add GETTEXT_POISON
to simulate unfriendly translator", 2011-02-22) I was concerned with
ensuring that the _() function would get constant folded if NO_GETTEXT
was defined, and likewise that GETTEXT_POISON would be compiled out
unless it was defined.
But as the benchmark in my [1] shows doing a one-off runtime
getenv("GIT_TEST_[...]") is trivial, and since GETTEXT_POISON was
originally added the GIT_TEST_* env variables have become the common
idiom for turning on special test setups.
So change GETTEXT_POISON to work the same way. Now the
GETTEXT_POISON=YesPlease compile-time option is gone, and running the
tests with GIT_TEST_GETTEXT_POISON=[YesPlease|] can be toggled on/off
without recompiling.
This allows for conditionally amending tests to test with/without
poison, similar to what 859fdc0c3c ("commit-graph: define
GIT_TEST_COMMIT_GRAPH", 2018-08-29) did for GIT_TEST_COMMIT_GRAPH. Do
some of that, now we e.g. always run the t0205-gettext-poison.sh test.
I did enough there to remove the GETTEXT_POISON prerequisite, but its
inverse C_LOCALE_OUTPUT is still around, and surely some tests using
it can be converted to e.g. always set GIT_TEST_GETTEXT_POISON=.
Notes on the implementation:
* We still compile a dedicated GETTEXT_POISON build in Travis
CI. Perhaps this should be revisited and integrated into the
"linux-gcc" build, see ae59a4e44f ("travis: run tests with
GIT_TEST_SPLIT_INDEX", 2018-01-07) for prior art in that area. Then
again maybe not, see [2].
* We now skip a test in t0000-basic.sh under
GIT_TEST_GETTEXT_POISON=YesPlease that wasn't skipped before. This
test relies on C locale output, but due to an edge case in how the
previous implementation of GETTEXT_POISON worked (reading it from
GIT-BUILD-OPTIONS) wasn't enabling poison correctly. Now it does,
and needs to be skipped.
* The getenv() function is not reentrant, so out of paranoia about
code of the form:
printf(_("%s"), getenv("some-env"));
call use_gettext_poison() in our early setup in git_setup_gettext()
so we populate the "poison_requested" variable in a codepath that's
won't suffer from that race condition.
* We error out in the Makefile if you're still saying
GETTEXT_POISON=YesPlease to prompt users to change their
invocation.
* We should not print out poisoned messages during the test
initialization itself to keep it more readable, so the test library
hides the variable if set in $GIT_TEST_GETTEXT_POISON_ORIG during
setup. See [3].
See also [4] for more on the motivation behind this patch, and the
history of the GETTEXT_POISON facility.
1. https://public-inbox.org/git/871s8gd32p.fsf@evledraar.gmail.com/
2. https://public-inbox.org/git/20181102163725.GY30222@szeder.dev/
3. https://public-inbox.org/git/20181022202241.18629-2-szeder.dev@gmail.com/
4. https://public-inbox.org/git/878t2pd6yu.fsf@evledraar.gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-08 22:15:29 +01:00
|
|
|
# under GIT_TEST_GETTEXT_POISON this pretends that the command produced expected
|
2018-02-08 16:56:54 +01:00
|
|
|
# results.
|
|
|
|
test_i18ncmp () {
|
i18n: make GETTEXT_POISON a runtime option
Change the GETTEXT_POISON compile-time + runtime GIT_GETTEXT_POISON
test parameter to only be a GIT_TEST_GETTEXT_POISON=<non-empty?>
runtime parameter, to be consistent with other parameters documented
in "Running tests with special setups" in t/README.
When I added GETTEXT_POISON in bb946bba76 ("i18n: add GETTEXT_POISON
to simulate unfriendly translator", 2011-02-22) I was concerned with
ensuring that the _() function would get constant folded if NO_GETTEXT
was defined, and likewise that GETTEXT_POISON would be compiled out
unless it was defined.
But as the benchmark in my [1] shows doing a one-off runtime
getenv("GIT_TEST_[...]") is trivial, and since GETTEXT_POISON was
originally added the GIT_TEST_* env variables have become the common
idiom for turning on special test setups.
So change GETTEXT_POISON to work the same way. Now the
GETTEXT_POISON=YesPlease compile-time option is gone, and running the
tests with GIT_TEST_GETTEXT_POISON=[YesPlease|] can be toggled on/off
without recompiling.
This allows for conditionally amending tests to test with/without
poison, similar to what 859fdc0c3c ("commit-graph: define
GIT_TEST_COMMIT_GRAPH", 2018-08-29) did for GIT_TEST_COMMIT_GRAPH. Do
some of that, now we e.g. always run the t0205-gettext-poison.sh test.
I did enough there to remove the GETTEXT_POISON prerequisite, but its
inverse C_LOCALE_OUTPUT is still around, and surely some tests using
it can be converted to e.g. always set GIT_TEST_GETTEXT_POISON=.
Notes on the implementation:
* We still compile a dedicated GETTEXT_POISON build in Travis
CI. Perhaps this should be revisited and integrated into the
"linux-gcc" build, see ae59a4e44f ("travis: run tests with
GIT_TEST_SPLIT_INDEX", 2018-01-07) for prior art in that area. Then
again maybe not, see [2].
* We now skip a test in t0000-basic.sh under
GIT_TEST_GETTEXT_POISON=YesPlease that wasn't skipped before. This
test relies on C locale output, but due to an edge case in how the
previous implementation of GETTEXT_POISON worked (reading it from
GIT-BUILD-OPTIONS) wasn't enabling poison correctly. Now it does,
and needs to be skipped.
* The getenv() function is not reentrant, so out of paranoia about
code of the form:
printf(_("%s"), getenv("some-env"));
call use_gettext_poison() in our early setup in git_setup_gettext()
so we populate the "poison_requested" variable in a codepath that's
won't suffer from that race condition.
* We error out in the Makefile if you're still saying
GETTEXT_POISON=YesPlease to prompt users to change their
invocation.
* We should not print out poisoned messages during the test
initialization itself to keep it more readable, so the test library
hides the variable if set in $GIT_TEST_GETTEXT_POISON_ORIG during
setup. See [3].
See also [4] for more on the motivation behind this patch, and the
history of the GETTEXT_POISON facility.
1. https://public-inbox.org/git/871s8gd32p.fsf@evledraar.gmail.com/
2. https://public-inbox.org/git/20181102163725.GY30222@szeder.dev/
3. https://public-inbox.org/git/20181022202241.18629-2-szeder.dev@gmail.com/
4. https://public-inbox.org/git/878t2pd6yu.fsf@evledraar.gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-08 22:15:29 +01:00
|
|
|
! test_have_prereq C_LOCALE_OUTPUT || test_cmp "$@"
|
2018-02-08 16:56:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
# Use this instead of "grep expected-string actual" to see if the
|
|
|
|
# output from a git command that can be translated either contains an
|
|
|
|
# expected string, or does not contain an unwanted one. When running
|
i18n: make GETTEXT_POISON a runtime option
Change the GETTEXT_POISON compile-time + runtime GIT_GETTEXT_POISON
test parameter to only be a GIT_TEST_GETTEXT_POISON=<non-empty?>
runtime parameter, to be consistent with other parameters documented
in "Running tests with special setups" in t/README.
When I added GETTEXT_POISON in bb946bba76 ("i18n: add GETTEXT_POISON
to simulate unfriendly translator", 2011-02-22) I was concerned with
ensuring that the _() function would get constant folded if NO_GETTEXT
was defined, and likewise that GETTEXT_POISON would be compiled out
unless it was defined.
But as the benchmark in my [1] shows doing a one-off runtime
getenv("GIT_TEST_[...]") is trivial, and since GETTEXT_POISON was
originally added the GIT_TEST_* env variables have become the common
idiom for turning on special test setups.
So change GETTEXT_POISON to work the same way. Now the
GETTEXT_POISON=YesPlease compile-time option is gone, and running the
tests with GIT_TEST_GETTEXT_POISON=[YesPlease|] can be toggled on/off
without recompiling.
This allows for conditionally amending tests to test with/without
poison, similar to what 859fdc0c3c ("commit-graph: define
GIT_TEST_COMMIT_GRAPH", 2018-08-29) did for GIT_TEST_COMMIT_GRAPH. Do
some of that, now we e.g. always run the t0205-gettext-poison.sh test.
I did enough there to remove the GETTEXT_POISON prerequisite, but its
inverse C_LOCALE_OUTPUT is still around, and surely some tests using
it can be converted to e.g. always set GIT_TEST_GETTEXT_POISON=.
Notes on the implementation:
* We still compile a dedicated GETTEXT_POISON build in Travis
CI. Perhaps this should be revisited and integrated into the
"linux-gcc" build, see ae59a4e44f ("travis: run tests with
GIT_TEST_SPLIT_INDEX", 2018-01-07) for prior art in that area. Then
again maybe not, see [2].
* We now skip a test in t0000-basic.sh under
GIT_TEST_GETTEXT_POISON=YesPlease that wasn't skipped before. This
test relies on C locale output, but due to an edge case in how the
previous implementation of GETTEXT_POISON worked (reading it from
GIT-BUILD-OPTIONS) wasn't enabling poison correctly. Now it does,
and needs to be skipped.
* The getenv() function is not reentrant, so out of paranoia about
code of the form:
printf(_("%s"), getenv("some-env"));
call use_gettext_poison() in our early setup in git_setup_gettext()
so we populate the "poison_requested" variable in a codepath that's
won't suffer from that race condition.
* We error out in the Makefile if you're still saying
GETTEXT_POISON=YesPlease to prompt users to change their
invocation.
* We should not print out poisoned messages during the test
initialization itself to keep it more readable, so the test library
hides the variable if set in $GIT_TEST_GETTEXT_POISON_ORIG during
setup. See [3].
See also [4] for more on the motivation behind this patch, and the
history of the GETTEXT_POISON facility.
1. https://public-inbox.org/git/871s8gd32p.fsf@evledraar.gmail.com/
2. https://public-inbox.org/git/20181102163725.GY30222@szeder.dev/
3. https://public-inbox.org/git/20181022202241.18629-2-szeder.dev@gmail.com/
4. https://public-inbox.org/git/878t2pd6yu.fsf@evledraar.gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-08 22:15:29 +01:00
|
|
|
# under GIT_TEST_GETTEXT_POISON this pretends that the command produced expected
|
2018-02-08 16:56:54 +01:00
|
|
|
# results.
|
|
|
|
test_i18ngrep () {
|
t: validate 'test_i18ngrep's parameters
Some of the previous patches in this series fixed bogus
'test_i18ngrep' invocations:
- Two invocations where the tested git command's standard output is
directly piped into 'test_i18ngrep'. While convenient, this is an
antipattern, because the pipe hides the git command's exit code,
and the test could continue even if the command exited with error.
- Two invocations that had neither a filename parameter nor anything
piped into their standard input, yet both managed to remain
unnoticed for years. A third similarly bogus invocation is
currently lurking in 'pu' for a couple of weeks now.
Prevent similar mistakes in the future by validating 'test_i18ngrep's
parameters requiring that
- The last parameter names an existing file to be read, effectively
forbidding piping into 'test_i18ngrep'.
Note that this change will also forbid cases where 'test_i18ngrep'
would legitimately read its standard input, e.g. when its standard
input is redirected from a file, or when a git command's standard
output is first written to an intermediate file, which is then
preprocessed by a non-git command before the results are piped
into 'test_i18ngrep'. See two of the previous patches for the
only such cases we had in our test suite. However, reliably
preventing the piping antipattern is arguably more important than
supporting these cases, which can be easily worked around by
opening the file directly or using an intermediate file anyway.
- There are at least two parameters, not including the optional '!'
to negate the pattern. This ought to catch corner cases when
'test_i18ngrep' looks for the name of an existing file on its
standard input; the above check would miss this case becase the
filename as pattern would be the last parameter.
Note that this is not quite perfect, as it doesn't account for any
'grep --options' given as parameters. However, doing so would be
far too complicated, considering that patterns can start with
dashes as well, and in the majority of the cases we don't use any
such options anyway.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-08 16:56:55 +01:00
|
|
|
eval "last_arg=\${$#}"
|
|
|
|
|
|
|
|
test -f "$last_arg" ||
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "test_i18ngrep requires a file to read as the last parameter"
|
t: validate 'test_i18ngrep's parameters
Some of the previous patches in this series fixed bogus
'test_i18ngrep' invocations:
- Two invocations where the tested git command's standard output is
directly piped into 'test_i18ngrep'. While convenient, this is an
antipattern, because the pipe hides the git command's exit code,
and the test could continue even if the command exited with error.
- Two invocations that had neither a filename parameter nor anything
piped into their standard input, yet both managed to remain
unnoticed for years. A third similarly bogus invocation is
currently lurking in 'pu' for a couple of weeks now.
Prevent similar mistakes in the future by validating 'test_i18ngrep's
parameters requiring that
- The last parameter names an existing file to be read, effectively
forbidding piping into 'test_i18ngrep'.
Note that this change will also forbid cases where 'test_i18ngrep'
would legitimately read its standard input, e.g. when its standard
input is redirected from a file, or when a git command's standard
output is first written to an intermediate file, which is then
preprocessed by a non-git command before the results are piped
into 'test_i18ngrep'. See two of the previous patches for the
only such cases we had in our test suite. However, reliably
preventing the piping antipattern is arguably more important than
supporting these cases, which can be easily worked around by
opening the file directly or using an intermediate file anyway.
- There are at least two parameters, not including the optional '!'
to negate the pattern. This ought to catch corner cases when
'test_i18ngrep' looks for the name of an existing file on its
standard input; the above check would miss this case becase the
filename as pattern would be the last parameter.
Note that this is not quite perfect, as it doesn't account for any
'grep --options' given as parameters. However, doing so would be
far too complicated, considering that patterns can start with
dashes as well, and in the majority of the cases we don't use any
such options anyway.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-08 16:56:55 +01:00
|
|
|
|
|
|
|
if test $# -lt 2 ||
|
|
|
|
{ test "x!" = "x$1" && test $# -lt 3 ; }
|
|
|
|
then
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "too few parameters to test_i18ngrep"
|
t: validate 'test_i18ngrep's parameters
Some of the previous patches in this series fixed bogus
'test_i18ngrep' invocations:
- Two invocations where the tested git command's standard output is
directly piped into 'test_i18ngrep'. While convenient, this is an
antipattern, because the pipe hides the git command's exit code,
and the test could continue even if the command exited with error.
- Two invocations that had neither a filename parameter nor anything
piped into their standard input, yet both managed to remain
unnoticed for years. A third similarly bogus invocation is
currently lurking in 'pu' for a couple of weeks now.
Prevent similar mistakes in the future by validating 'test_i18ngrep's
parameters requiring that
- The last parameter names an existing file to be read, effectively
forbidding piping into 'test_i18ngrep'.
Note that this change will also forbid cases where 'test_i18ngrep'
would legitimately read its standard input, e.g. when its standard
input is redirected from a file, or when a git command's standard
output is first written to an intermediate file, which is then
preprocessed by a non-git command before the results are piped
into 'test_i18ngrep'. See two of the previous patches for the
only such cases we had in our test suite. However, reliably
preventing the piping antipattern is arguably more important than
supporting these cases, which can be easily worked around by
opening the file directly or using an intermediate file anyway.
- There are at least two parameters, not including the optional '!'
to negate the pattern. This ought to catch corner cases when
'test_i18ngrep' looks for the name of an existing file on its
standard input; the above check would miss this case becase the
filename as pattern would be the last parameter.
Note that this is not quite perfect, as it doesn't account for any
'grep --options' given as parameters. However, doing so would be
far too complicated, considering that patterns can start with
dashes as well, and in the majority of the cases we don't use any
such options anyway.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-08 16:56:55 +01:00
|
|
|
fi
|
|
|
|
|
i18n: make GETTEXT_POISON a runtime option
Change the GETTEXT_POISON compile-time + runtime GIT_GETTEXT_POISON
test parameter to only be a GIT_TEST_GETTEXT_POISON=<non-empty?>
runtime parameter, to be consistent with other parameters documented
in "Running tests with special setups" in t/README.
When I added GETTEXT_POISON in bb946bba76 ("i18n: add GETTEXT_POISON
to simulate unfriendly translator", 2011-02-22) I was concerned with
ensuring that the _() function would get constant folded if NO_GETTEXT
was defined, and likewise that GETTEXT_POISON would be compiled out
unless it was defined.
But as the benchmark in my [1] shows doing a one-off runtime
getenv("GIT_TEST_[...]") is trivial, and since GETTEXT_POISON was
originally added the GIT_TEST_* env variables have become the common
idiom for turning on special test setups.
So change GETTEXT_POISON to work the same way. Now the
GETTEXT_POISON=YesPlease compile-time option is gone, and running the
tests with GIT_TEST_GETTEXT_POISON=[YesPlease|] can be toggled on/off
without recompiling.
This allows for conditionally amending tests to test with/without
poison, similar to what 859fdc0c3c ("commit-graph: define
GIT_TEST_COMMIT_GRAPH", 2018-08-29) did for GIT_TEST_COMMIT_GRAPH. Do
some of that, now we e.g. always run the t0205-gettext-poison.sh test.
I did enough there to remove the GETTEXT_POISON prerequisite, but its
inverse C_LOCALE_OUTPUT is still around, and surely some tests using
it can be converted to e.g. always set GIT_TEST_GETTEXT_POISON=.
Notes on the implementation:
* We still compile a dedicated GETTEXT_POISON build in Travis
CI. Perhaps this should be revisited and integrated into the
"linux-gcc" build, see ae59a4e44f ("travis: run tests with
GIT_TEST_SPLIT_INDEX", 2018-01-07) for prior art in that area. Then
again maybe not, see [2].
* We now skip a test in t0000-basic.sh under
GIT_TEST_GETTEXT_POISON=YesPlease that wasn't skipped before. This
test relies on C locale output, but due to an edge case in how the
previous implementation of GETTEXT_POISON worked (reading it from
GIT-BUILD-OPTIONS) wasn't enabling poison correctly. Now it does,
and needs to be skipped.
* The getenv() function is not reentrant, so out of paranoia about
code of the form:
printf(_("%s"), getenv("some-env"));
call use_gettext_poison() in our early setup in git_setup_gettext()
so we populate the "poison_requested" variable in a codepath that's
won't suffer from that race condition.
* We error out in the Makefile if you're still saying
GETTEXT_POISON=YesPlease to prompt users to change their
invocation.
* We should not print out poisoned messages during the test
initialization itself to keep it more readable, so the test library
hides the variable if set in $GIT_TEST_GETTEXT_POISON_ORIG during
setup. See [3].
See also [4] for more on the motivation behind this patch, and the
history of the GETTEXT_POISON facility.
1. https://public-inbox.org/git/871s8gd32p.fsf@evledraar.gmail.com/
2. https://public-inbox.org/git/20181102163725.GY30222@szeder.dev/
3. https://public-inbox.org/git/20181022202241.18629-2-szeder.dev@gmail.com/
4. https://public-inbox.org/git/878t2pd6yu.fsf@evledraar.gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-08 22:15:29 +01:00
|
|
|
if test_have_prereq !C_LOCALE_OUTPUT
|
2018-02-08 16:56:54 +01:00
|
|
|
then
|
t: make 'test_i18ngrep' more informative on failure
When 'test_i18ngrep' can't find the expected pattern, it exits
completely silently; when its negated form does find the pattern that
shouldn't be there, it prints the matching line(s) but otherwise exits
without any error message. This leaves the developer puzzled about
what could have gone wrong.
Make 'test_i18ngrep' more informative on failure by printing an error
message including the invoked 'grep' command and the contents of the
file it had to scan through.
Note that this "dump the scanned file" part is not quite perfect, as
it dumps only the file specified as the function's last positional
parameter, thus assuming that there is only a single file parameter.
I think that's a reasonable assumption to make, one that holds true in
the current code base. And even if someone were to scan multiple
files at once in the future, the worst thing that could happen is that
the verbose error message won't include the contents of all those
files, only the last one. Alas, we can't really do any better than
this, because checking whether the other positional parameters match a
filename can result in false positives: 't3400-rebase.sh' and
't3404-rebase-interactive.sh' contain one test each, where the
'test_i18ngrep's pattern verbatimly matches a file in the trash
directory.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-08 16:56:56 +01:00
|
|
|
# pretend success
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
if test "x!" = "x$1"
|
2018-02-08 16:56:54 +01:00
|
|
|
then
|
|
|
|
shift
|
t: make 'test_i18ngrep' more informative on failure
When 'test_i18ngrep' can't find the expected pattern, it exits
completely silently; when its negated form does find the pattern that
shouldn't be there, it prints the matching line(s) but otherwise exits
without any error message. This leaves the developer puzzled about
what could have gone wrong.
Make 'test_i18ngrep' more informative on failure by printing an error
message including the invoked 'grep' command and the contents of the
file it had to scan through.
Note that this "dump the scanned file" part is not quite perfect, as
it dumps only the file specified as the function's last positional
parameter, thus assuming that there is only a single file parameter.
I think that's a reasonable assumption to make, one that holds true in
the current code base. And even if someone were to scan multiple
files at once in the future, the worst thing that could happen is that
the verbose error message won't include the contents of all those
files, only the last one. Alas, we can't really do any better than
this, because checking whether the other positional parameters match a
filename can result in false positives: 't3400-rebase.sh' and
't3404-rebase-interactive.sh' contain one test each, where the
'test_i18ngrep's pattern verbatimly matches a file in the trash
directory.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-08 16:56:56 +01:00
|
|
|
! grep "$@" && return 0
|
|
|
|
|
2018-02-22 07:48:37 +01:00
|
|
|
echo >&4 "error: '! grep $@' did find a match in:"
|
2018-02-08 16:56:54 +01:00
|
|
|
else
|
t: make 'test_i18ngrep' more informative on failure
When 'test_i18ngrep' can't find the expected pattern, it exits
completely silently; when its negated form does find the pattern that
shouldn't be there, it prints the matching line(s) but otherwise exits
without any error message. This leaves the developer puzzled about
what could have gone wrong.
Make 'test_i18ngrep' more informative on failure by printing an error
message including the invoked 'grep' command and the contents of the
file it had to scan through.
Note that this "dump the scanned file" part is not quite perfect, as
it dumps only the file specified as the function's last positional
parameter, thus assuming that there is only a single file parameter.
I think that's a reasonable assumption to make, one that holds true in
the current code base. And even if someone were to scan multiple
files at once in the future, the worst thing that could happen is that
the verbose error message won't include the contents of all those
files, only the last one. Alas, we can't really do any better than
this, because checking whether the other positional parameters match a
filename can result in false positives: 't3400-rebase.sh' and
't3404-rebase-interactive.sh' contain one test each, where the
'test_i18ngrep's pattern verbatimly matches a file in the trash
directory.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-08 16:56:56 +01:00
|
|
|
grep "$@" && return 0
|
|
|
|
|
2018-02-22 07:48:37 +01:00
|
|
|
echo >&4 "error: 'grep $@' didn't find a match in:"
|
2018-02-08 16:56:54 +01:00
|
|
|
fi
|
t: make 'test_i18ngrep' more informative on failure
When 'test_i18ngrep' can't find the expected pattern, it exits
completely silently; when its negated form does find the pattern that
shouldn't be there, it prints the matching line(s) but otherwise exits
without any error message. This leaves the developer puzzled about
what could have gone wrong.
Make 'test_i18ngrep' more informative on failure by printing an error
message including the invoked 'grep' command and the contents of the
file it had to scan through.
Note that this "dump the scanned file" part is not quite perfect, as
it dumps only the file specified as the function's last positional
parameter, thus assuming that there is only a single file parameter.
I think that's a reasonable assumption to make, one that holds true in
the current code base. And even if someone were to scan multiple
files at once in the future, the worst thing that could happen is that
the verbose error message won't include the contents of all those
files, only the last one. Alas, we can't really do any better than
this, because checking whether the other positional parameters match a
filename can result in false positives: 't3400-rebase.sh' and
't3404-rebase-interactive.sh' contain one test each, where the
'test_i18ngrep's pattern verbatimly matches a file in the trash
directory.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-08 16:56:56 +01:00
|
|
|
|
|
|
|
if test -s "$last_arg"
|
|
|
|
then
|
2018-02-22 07:48:37 +01:00
|
|
|
cat >&4 "$last_arg"
|
t: make 'test_i18ngrep' more informative on failure
When 'test_i18ngrep' can't find the expected pattern, it exits
completely silently; when its negated form does find the pattern that
shouldn't be there, it prints the matching line(s) but otherwise exits
without any error message. This leaves the developer puzzled about
what could have gone wrong.
Make 'test_i18ngrep' more informative on failure by printing an error
message including the invoked 'grep' command and the contents of the
file it had to scan through.
Note that this "dump the scanned file" part is not quite perfect, as
it dumps only the file specified as the function's last positional
parameter, thus assuming that there is only a single file parameter.
I think that's a reasonable assumption to make, one that holds true in
the current code base. And even if someone were to scan multiple
files at once in the future, the worst thing that could happen is that
the verbose error message won't include the contents of all those
files, only the last one. Alas, we can't really do any better than
this, because checking whether the other positional parameters match a
filename can result in false positives: 't3400-rebase.sh' and
't3404-rebase-interactive.sh' contain one test each, where the
'test_i18ngrep's pattern verbatimly matches a file in the trash
directory.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-08 16:56:56 +01:00
|
|
|
else
|
2018-02-22 07:48:37 +01:00
|
|
|
echo >&4 "<File '$last_arg' is empty>"
|
t: make 'test_i18ngrep' more informative on failure
When 'test_i18ngrep' can't find the expected pattern, it exits
completely silently; when its negated form does find the pattern that
shouldn't be there, it prints the matching line(s) but otherwise exits
without any error message. This leaves the developer puzzled about
what could have gone wrong.
Make 'test_i18ngrep' more informative on failure by printing an error
message including the invoked 'grep' command and the contents of the
file it had to scan through.
Note that this "dump the scanned file" part is not quite perfect, as
it dumps only the file specified as the function's last positional
parameter, thus assuming that there is only a single file parameter.
I think that's a reasonable assumption to make, one that holds true in
the current code base. And even if someone were to scan multiple
files at once in the future, the worst thing that could happen is that
the verbose error message won't include the contents of all those
files, only the last one. Alas, we can't really do any better than
this, because checking whether the other positional parameters match a
filename can result in false positives: 't3400-rebase.sh' and
't3404-rebase-interactive.sh' contain one test each, where the
'test_i18ngrep's pattern verbatimly matches a file in the trash
directory.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-08 16:56:56 +01:00
|
|
|
fi
|
|
|
|
|
|
|
|
return 1
|
2018-02-08 16:56:54 +01:00
|
|
|
}
|
|
|
|
|
2014-10-10 08:11:14 +02:00
|
|
|
# Call any command "$@" but be more verbose about its
|
|
|
|
# failure. This is handy for commands like "test" which do
|
|
|
|
# not output anything when they fail.
|
|
|
|
verbose () {
|
|
|
|
"$@" && return 0
|
2018-02-22 07:48:37 +01:00
|
|
|
echo >&4 "command failed: $(git rev-parse --sq-quote "$@")"
|
2014-10-10 08:11:14 +02:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2013-06-09 20:29:20 +02:00
|
|
|
# Check if the file expected to be empty is indeed empty, and barfs
|
|
|
|
# otherwise.
|
|
|
|
|
|
|
|
test_must_be_empty () {
|
2018-03-26 15:11:24 +02:00
|
|
|
test_path_is_file "$1" &&
|
|
|
|
if test -s "$1"
|
2013-06-09 20:29:20 +02:00
|
|
|
then
|
|
|
|
echo "'$1' is not empty, it contains:"
|
|
|
|
cat "$1"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2012-12-21 20:10:10 +01:00
|
|
|
# Tests that its two parameters refer to the same revision
|
|
|
|
test_cmp_rev () {
|
2018-11-19 14:28:18 +01:00
|
|
|
if test $# != 2
|
|
|
|
then
|
|
|
|
error "bug in the test script: test_cmp_rev requires two revisions, but got $#"
|
|
|
|
else
|
|
|
|
local r1 r2
|
|
|
|
r1=$(git rev-parse --verify "$1") &&
|
|
|
|
r2=$(git rev-parse --verify "$2") &&
|
|
|
|
if test "$r1" != "$r2"
|
|
|
|
then
|
|
|
|
cat >&4 <<-EOF
|
|
|
|
error: two revisions point to different objects:
|
|
|
|
'$1': $r1
|
|
|
|
'$2': $r2
|
|
|
|
EOF
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
fi
|
2012-12-21 20:10:10 +01:00
|
|
|
}
|
|
|
|
|
2016-05-09 20:36:09 +02:00
|
|
|
# Print a sequence of integers in increasing order, either with
|
|
|
|
# two arguments (start and end):
|
2012-08-04 00:21:04 +02:00
|
|
|
#
|
2016-05-09 20:36:09 +02:00
|
|
|
# test_seq 1 5 -- outputs 1 2 3 4 5 one line at a time
|
|
|
|
#
|
|
|
|
# or with one argument (end), in which case it starts counting
|
|
|
|
# from 1.
|
2012-08-04 00:21:04 +02:00
|
|
|
|
|
|
|
test_seq () {
|
|
|
|
case $# in
|
|
|
|
1) set 1 "$@" ;;
|
|
|
|
2) ;;
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
*) BUG "not 1 or 2 parameters to test_seq" ;;
|
2012-08-04 00:21:04 +02:00
|
|
|
esac
|
2016-05-09 21:37:01 +02:00
|
|
|
test_seq_counter__=$1
|
|
|
|
while test "$test_seq_counter__" -le "$2"
|
|
|
|
do
|
|
|
|
echo "$test_seq_counter__"
|
|
|
|
test_seq_counter__=$(( $test_seq_counter__ + 1 ))
|
|
|
|
done
|
2012-08-04 00:21:04 +02:00
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
# This function can be used to schedule some commands to be run
|
|
|
|
# unconditionally at the end of the test to restore sanity:
|
|
|
|
#
|
|
|
|
# test_expect_success 'test core.capslock' '
|
|
|
|
# git config core.capslock true &&
|
|
|
|
# test_when_finished "git config --unset core.capslock" &&
|
|
|
|
# hello world
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# That would be roughly equivalent to
|
|
|
|
#
|
|
|
|
# test_expect_success 'test core.capslock' '
|
|
|
|
# git config core.capslock true &&
|
|
|
|
# hello world
|
|
|
|
# git config --unset core.capslock
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# except that the greeting and config --unset must both succeed for
|
|
|
|
# the test to pass.
|
|
|
|
#
|
|
|
|
# Note that under --immediate mode, no clean-up is done to help diagnose
|
|
|
|
# what went wrong.
|
|
|
|
|
|
|
|
test_when_finished () {
|
2015-09-05 15:12:49 +02:00
|
|
|
# We cannot detect when we are in a subshell in general, but by
|
|
|
|
# doing so on Bash is better than nothing (the test will
|
|
|
|
# silently pass on other shells).
|
|
|
|
test "${BASH_SUBSHELL-0}" = 0 ||
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "test_when_finished does nothing in a subshell"
|
2012-02-17 11:25:08 +01:00
|
|
|
test_cleanup="{ $*
|
|
|
|
} && (exit \"\$eval_ret\"); eval_ret=\$?; $test_cleanup"
|
|
|
|
}
|
|
|
|
|
test-lib: introduce 'test_atexit'
When running Apache, 'git daemon', or p4d, we want to kill them at the
end of the test script, otherwise a leftover daemon process will keep
its port open indefinitely, and thus will interfere with subsequent
executions of the same test script.
So far, we stop these daemon processes "manually", i.e.:
- by registering functions or commands in the trap on EXIT to stop
the daemon while preserving the last seen exit code before the
trap (to deal with a failure when run with '--immediate' or with
interrupts by ctrl-C),
- and by invoking these functions/commands last thing before
'test_done' (and sometimes restoring the test framework's default
trap on EXIT, to prevent the daemons from being killed twice).
On one hand, we do this inconsistently, e.g. 'git p4' tests invoke
different functions in the trap on EXIT and in the last test before
'test_done', and they neither restore the test framework's default trap
on EXIT nor preserve the last seen exit code. On the other hand, this
is error prone, because, as shown in a previous patch in this series,
any output from the cleanup commands in the trap on EXIT can prevent a
proper cleanup when a test script run with '--verbose-log' and certain
shells, notably 'dash', is interrupted.
Let's introduce 'test_atexit', which is loosely modeled after
'test_when_finished', but has a broader scope: rather than running the
commands after the current test case, run them when the test script
finishes, and also run them when the test is interrupted, or exits
early in case of a failure while the '--immediate' option is in
effect.
When running the cleanup commands at the end of a successful test,
then they will be run in 'test_done' before it removes the trash
directory, i.e. the cleanup commands will still be able to access any
pidfiles or socket files in there. When running the cleanup commands
after an interrupt or failure with '--immediate', then they will be
run in the trap on EXIT. In both cases they will be run in
'test_eval_', i.e. both standard error and output of all cleanup
commands will go where they should according to the '-v' or
'--verbose-log' options, and thus won't cause any troubles when
interrupting a test script run with '--verbose-log'.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-13 13:24:11 +01:00
|
|
|
# This function can be used to schedule some commands to be run
|
|
|
|
# unconditionally at the end of the test script, e.g. to stop a daemon:
|
|
|
|
#
|
|
|
|
# test_expect_success 'test git daemon' '
|
|
|
|
# git daemon &
|
|
|
|
# daemon_pid=$! &&
|
|
|
|
# test_atexit 'kill $daemon_pid' &&
|
|
|
|
# hello world
|
|
|
|
# '
|
|
|
|
#
|
|
|
|
# The commands will be executed before the trash directory is removed,
|
|
|
|
# i.e. the atexit commands will still be able to access any pidfiles or
|
|
|
|
# socket files.
|
|
|
|
#
|
|
|
|
# Note that these commands will be run even when a test script run
|
|
|
|
# with '--immediate' fails. Be careful with your atexit commands to
|
|
|
|
# minimize any changes to the failed state.
|
|
|
|
|
|
|
|
test_atexit () {
|
|
|
|
# We cannot detect when we are in a subshell in general, but by
|
|
|
|
# doing so on Bash is better than nothing (the test will
|
|
|
|
# silently pass on other shells).
|
|
|
|
test "${BASH_SUBSHELL-0}" = 0 ||
|
|
|
|
error "bug in test script: test_atexit does nothing in a subshell"
|
|
|
|
test_atexit_cleanup="{ $*
|
|
|
|
} && (exit \"\$eval_ret\"); eval_ret=\$?; $test_atexit_cleanup"
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:08 +01:00
|
|
|
# Most tests can use the created repository, but some may need to create more.
|
|
|
|
# Usage: test_create_repo <directory>
|
|
|
|
test_create_repo () {
|
|
|
|
test "$#" = 1 ||
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "not 1 parameter to test-create-repo"
|
2012-02-17 11:25:08 +01:00
|
|
|
repo="$1"
|
|
|
|
mkdir -p "$repo"
|
|
|
|
(
|
|
|
|
cd "$repo" || error "Cannot setup test environment"
|
tests: explicitly use `git.exe` on Windows
On Windows, when we refer to `/an/absolute/path/to/git`, it magically
resolves `git.exe` at that location. Except if something of the name
`git` exists next to that `git.exe`. So if we call `$BUILD_DIR/git`, it
will find `$BUILD_DIR/git.exe` *only* if there is not, say, a directory
called `$BUILD_DIR/git`.
Such a directory, however, exists in Git for Windows when building with
Visual Studio (our Visual Studio project generator defaults to putting
the build files into a directory whose name is the base name of the
corresponding `.exe`).
In the bin-wrappers/* scripts, we already take pains to use `git.exe`
rather than `git`, as this could pick up the wrong thing on Windows
(i.e. if there exists a `git` file or directory in the build directory).
Now we do the same in the tests' start-up code.
This also helps when testing an installed Git, as there might be even
more likely some stray file or directory in the way.
Note: the only way we can record whether the `.exe` suffix is by writing
it to the `GIT-BUILD-OPTIONS` file and sourcing it at the beginning of
`t/test-lib.sh`. This is not a requirement introduced by this patch, but
we move the call to be able to use the `$X` variable that holds the file
extension, if any.
Note also: the many, many calls to `git this` and `git that` are
unaffected, as the regular PATH search will find the `.exe` files on
Windows (and not be confused by a directory of the name `git` that is
in one of the directories listed in the `PATH` variable), while
`/path/to/git` would not, per se, know that it is looking for an
executable and happily prefer such a directory.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-14 17:32:11 +01:00
|
|
|
"${GIT_TEST_INSTALLED:-$GIT_EXEC_PATH}/git$X" init \
|
2018-11-12 14:48:34 +01:00
|
|
|
"--template=$GIT_BUILD_DIR/templates/blt/" >&3 2>&4 ||
|
2012-02-17 11:25:08 +01:00
|
|
|
error "cannot run git init -- have you built things yet?"
|
|
|
|
mv .git/hooks .git/hooks-disabled
|
|
|
|
) || exit
|
|
|
|
}
|
2013-06-07 22:53:27 +02:00
|
|
|
|
|
|
|
# This function helps on symlink challenged file systems when it is not
|
|
|
|
# important that the file system entry is a symbolic link.
|
|
|
|
# Use test_ln_s_add instead of "ln -s x y && git add y" to add a
|
|
|
|
# symbolic link entry y to the index.
|
|
|
|
|
|
|
|
test_ln_s_add () {
|
|
|
|
if test_have_prereq SYMLINKS
|
|
|
|
then
|
|
|
|
ln -s "$1" "$2" &&
|
|
|
|
git update-index --add "$2"
|
|
|
|
else
|
|
|
|
printf '%s' "$1" >"$2" &&
|
|
|
|
ln_s_obj=$(git hash-object -w "$2") &&
|
2015-02-23 19:14:47 +01:00
|
|
|
git update-index --add --cacheinfo 120000 $ln_s_obj "$2" &&
|
|
|
|
# pick up stat info from the file
|
|
|
|
git update-index "$2"
|
2013-06-07 22:53:27 +02:00
|
|
|
fi
|
|
|
|
}
|
2013-10-26 21:17:15 +02:00
|
|
|
|
2014-04-27 20:15:47 +02:00
|
|
|
# This function writes out its parameters, one per line
|
|
|
|
test_write_lines () {
|
|
|
|
printf "%s\n" "$@"
|
|
|
|
}
|
|
|
|
|
t: provide a perl() function which uses $PERL_PATH
Once upon a time, we assumed that calling a bare "perl" in
the test scripts was OK, because we would find the perl from
the user's PATH, and we were only asking that perl to do
basic operations that work even on old versions of perl.
Later, we found that some systems really prefer to use
$PERL_PATH even for these basic cases, because the system
perl misbehaves in some way (e.g., by handling line endings
differently). We then switched "perl" invocations to
"$PERL_PATH" to respect the user's choice.
Having to use "$PERL_PATH" is ugly and cumbersome, though.
Instead, let's provide a perl() shell function that tests
can use, which will transparently do the right thing.
Unfortunately, test writers still have to use $PERL_PATH in
certain situations, so we still need to keep the advice in
the README.
Note that this may fix test failures in t5004, t5503, t6002,
t6003, t6300, t8001, and t8002, depending on your system's
perl setup. All of these can be detected by running:
ln -s /bin/false bin-wrappers/perl
make test
which fails before this patch, and passes after.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-10-29 02:22:07 +01:00
|
|
|
perl () {
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
command "$PERL_PATH" "$@" 2>&7
|
|
|
|
} 7>&2 2>&4
|
2013-11-04 23:58:01 +01:00
|
|
|
|
tests: turn on network daemon tests by default
We do not run the httpd nor git-daemon tests by default, as
they are rather heavyweight and require network access
(albeit over localhost). However, it would be nice if more
pepole ran them, for two reasons:
1. We would get more test coverage on more systems.
2. The point of the test suite is to find regressions. It
is very easy to change some of the underlying code and
break the httpd code without realizing you are even
affecting it. Running the httpd tests helps find these
problems sooner (ideally before the patches even hit
the list).
We still want to leave an "out", though, for people who really do
not want to run them. For that reason, the GIT_TEST_HTTPD and
GIT_TEST_GIT_DAEMON variables are now tri-state booleans
(true/false/auto), so you can say GIT_TEST_HTTPD=false to turn the
tests back off. To support those who want a stable single way to
disable these tests across versions of Git before and after this
change, an empty string explicitly set to these variables is also
taken as "false", so the behaviour changes only for those who:
a. did not express any preference by leaving these variables
unset. They did not test these features before, but now they
do; or
b. did express that they want to test these features by setting
GIT_TEST_FEATURE=false (or any equivalent other ways to tell
"false" to Git, e.g. "0"), which has been a valid but funny way
to say that they do want to test the feature only because we
used to interpret any non-empty string to mean "yes please
test". They no longer test that feature.
In addition, we are forgiving of common setup failures (e.g., you do
not have apache installed, or have an old version) when the
tri-state is "auto" (or unset), but report an error when it is
"true". This makes "auto" a sane default, as we should not cause
failures on setups where the tests cannot run. But it allows people
who use "true" to catch regressions in their system (e.g., they
uninstalled apache, but were expecting their automated test runs to
test git-httpd, and would want to be notified).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-02-10 22:29:37 +01:00
|
|
|
# Is the value one of the various ways to spell a boolean true/false?
|
|
|
|
test_normalize_bool () {
|
|
|
|
git -c magic.variable="$1" config --bool magic.variable 2>/dev/null
|
|
|
|
}
|
|
|
|
|
|
|
|
# Given a variable $1, normalize the value of it to one of "true",
|
|
|
|
# "false", or "auto" and store the result to it.
|
|
|
|
#
|
|
|
|
# test_tristate GIT_TEST_HTTPD
|
|
|
|
#
|
|
|
|
# A variable set to an empty string is set to 'false'.
|
|
|
|
# A variable set to 'false' or 'auto' keeps its value.
|
|
|
|
# Anything else is set to 'true'.
|
|
|
|
# An unset variable defaults to 'auto'.
|
|
|
|
#
|
|
|
|
# The last rule is to allow people to set the variable to an empty
|
|
|
|
# string and export it to decline testing the particular feature
|
|
|
|
# for versions both before and after this change. We used to treat
|
|
|
|
# both unset and empty variable as a signal for "do not test" and
|
|
|
|
# took any non-empty string as "please test".
|
|
|
|
|
|
|
|
test_tristate () {
|
|
|
|
if eval "test x\"\${$1+isset}\" = xisset"
|
|
|
|
then
|
|
|
|
# explicitly set
|
|
|
|
eval "
|
|
|
|
case \"\$$1\" in
|
|
|
|
'') $1=false ;;
|
|
|
|
auto) ;;
|
|
|
|
*) $1=\$(test_normalize_bool \$$1 || echo true) ;;
|
|
|
|
esac
|
|
|
|
"
|
|
|
|
else
|
|
|
|
eval "$1=auto"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Exit the test suite, either by skipping all remaining tests or by
|
|
|
|
# exiting with an error. If "$1" is "auto", we then we assume we were
|
|
|
|
# opportunistically trying to set up some tests and we skip. If it is
|
|
|
|
# "true", then we report a failure.
|
|
|
|
#
|
|
|
|
# The error/skip message should be given by $2.
|
|
|
|
#
|
|
|
|
test_skip_or_die () {
|
|
|
|
case "$1" in
|
|
|
|
auto)
|
|
|
|
skip_all=$2
|
|
|
|
test_done
|
|
|
|
;;
|
|
|
|
true)
|
|
|
|
error "$2"
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
error "BUG: test tristate is '$1' (real error: $2)"
|
|
|
|
esac
|
|
|
|
}
|
|
|
|
|
2013-10-26 21:17:15 +02:00
|
|
|
# The following mingw_* functions obey POSIX shell syntax, but are actually
|
|
|
|
# bash scripts, and are meant to be used only with bash on Windows.
|
|
|
|
|
|
|
|
# A test_cmp function that treats LF and CRLF equal and avoids to fork
|
|
|
|
# diff when possible.
|
|
|
|
mingw_test_cmp () {
|
|
|
|
# Read text into shell variables and compare them. If the results
|
|
|
|
# are different, use regular diff to report the difference.
|
|
|
|
local test_cmp_a= test_cmp_b=
|
|
|
|
|
|
|
|
# When text came from stdin (one argument is '-') we must feed it
|
|
|
|
# to diff.
|
|
|
|
local stdin_for_diff=
|
|
|
|
|
|
|
|
# Since it is difficult to detect the difference between an
|
|
|
|
# empty input file and a failure to read the files, we go straight
|
|
|
|
# to diff if one of the inputs is empty.
|
|
|
|
if test -s "$1" && test -s "$2"
|
|
|
|
then
|
|
|
|
# regular case: both files non-empty
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_a <"$1"
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_b <"$2"
|
|
|
|
elif test -s "$1" && test "$2" = -
|
|
|
|
then
|
|
|
|
# read 2nd file from stdin
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_a <"$1"
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_b
|
|
|
|
stdin_for_diff='<<<"$test_cmp_b"'
|
|
|
|
elif test "$1" = - && test -s "$2"
|
|
|
|
then
|
|
|
|
# read 1st file from stdin
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_a
|
|
|
|
mingw_read_file_strip_cr_ test_cmp_b <"$2"
|
|
|
|
stdin_for_diff='<<<"$test_cmp_a"'
|
|
|
|
fi
|
|
|
|
test -n "$test_cmp_a" &&
|
|
|
|
test -n "$test_cmp_b" &&
|
|
|
|
test "$test_cmp_a" = "$test_cmp_b" ||
|
|
|
|
eval "diff -u \"\$@\" $stdin_for_diff"
|
|
|
|
}
|
|
|
|
|
|
|
|
# $1 is the name of the shell variable to fill in
|
|
|
|
mingw_read_file_strip_cr_ () {
|
|
|
|
# Read line-wise using LF as the line separator
|
|
|
|
# and use IFS to strip CR.
|
|
|
|
local line
|
|
|
|
while :
|
|
|
|
do
|
|
|
|
if IFS=$'\r' read -r -d $'\n' line
|
|
|
|
then
|
|
|
|
# good
|
|
|
|
line=$line$'\n'
|
|
|
|
else
|
|
|
|
# we get here at EOF, but also if the last line
|
|
|
|
# was not terminated by LF; in the latter case,
|
|
|
|
# some text was read
|
|
|
|
if test -z "$line"
|
|
|
|
then
|
|
|
|
# EOF, really
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
eval "$1=\$$1\$line"
|
|
|
|
done
|
|
|
|
}
|
2016-06-01 09:04:26 +02:00
|
|
|
|
|
|
|
# Like "env FOO=BAR some-program", but run inside a subshell, which means
|
|
|
|
# it also works for shell functions (though those functions cannot impact
|
|
|
|
# the environment outside of the test_env invocation).
|
|
|
|
test_env () {
|
|
|
|
(
|
|
|
|
while test $# -gt 0
|
|
|
|
do
|
|
|
|
case "$1" in
|
|
|
|
*=*)
|
|
|
|
eval "${1%%=*}=\${1#*=}"
|
|
|
|
eval "export ${1%%=*}"
|
|
|
|
shift
|
|
|
|
;;
|
|
|
|
*)
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
"$@" 2>&7
|
2016-06-01 09:04:26 +02:00
|
|
|
exit
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
done
|
|
|
|
)
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
} 7>&2 2>&4
|
t9300: factor out portable "head -c" replacement
It is sometimes useful to be able to read exactly N bytes from a
pipe. Doing this portably turns out to be surprisingly difficult
in shell scripts.
We want a solution that:
- is portable
- never reads more than N bytes due to buffering (which
would mean those bytes are not available to the next
program to read from the same pipe)
- handles partial reads by looping until N bytes are read
(or we see EOF)
- is resilient to stray signals giving us EINTR while
trying to read (even though we don't send them, things
like SIGWINCH could cause apparently-random failures)
Some possible solutions are:
- "head -c" is not portable, and implementations may
buffer (though GNU head does not)
- "read -N" is a bash-ism, and thus not portable
- "dd bs=$n count=1" does not handle partial reads. GNU dd
has iflags=fullblock, but that is not portable
- "dd bs=1 count=$n" fixes the partial read problem (all
reads are 1-byte, so there can be no partial response).
It does make a lot of write() calls, but for our tests
that's unlikely to matter. It's fairly portable. We
already use it in our tests, and it's unlikely that
implementations would screw up any of our criteria. The
most unknown one would be signal handling.
- perl can do a sysread() loop pretty easily. On my Linux
system, at least, it seems to restart the read() call
automatically. If that turns out not to be portable,
though, it would be easy for us to handle it.
That makes the perl solution the least bad (because we
conveniently omitted "length of code" as a criterion).
It's also what t9300 is currently using, so we can just pull
the implementation from there.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 11:07:54 +02:00
|
|
|
|
2016-06-30 10:16:18 +02:00
|
|
|
# Returns true if the numeric exit code in "$2" represents the expected signal
|
|
|
|
# in "$1". Signals should be given numerically.
|
|
|
|
test_match_signal () {
|
|
|
|
if test "$2" = "$((128 + $1))"
|
|
|
|
then
|
|
|
|
# POSIX
|
|
|
|
return 0
|
|
|
|
elif test "$2" = "$((256 + $1))"
|
|
|
|
then
|
|
|
|
# ksh
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
return 1
|
|
|
|
}
|
2016-07-19 22:22:20 +02:00
|
|
|
|
t9300: factor out portable "head -c" replacement
It is sometimes useful to be able to read exactly N bytes from a
pipe. Doing this portably turns out to be surprisingly difficult
in shell scripts.
We want a solution that:
- is portable
- never reads more than N bytes due to buffering (which
would mean those bytes are not available to the next
program to read from the same pipe)
- handles partial reads by looping until N bytes are read
(or we see EOF)
- is resilient to stray signals giving us EINTR while
trying to read (even though we don't send them, things
like SIGWINCH could cause apparently-random failures)
Some possible solutions are:
- "head -c" is not portable, and implementations may
buffer (though GNU head does not)
- "read -N" is a bash-ism, and thus not portable
- "dd bs=$n count=1" does not handle partial reads. GNU dd
has iflags=fullblock, but that is not portable
- "dd bs=1 count=$n" fixes the partial read problem (all
reads are 1-byte, so there can be no partial response).
It does make a lot of write() calls, but for our tests
that's unlikely to matter. It's fairly portable. We
already use it in our tests, and it's unlikely that
implementations would screw up any of our criteria. The
most unknown one would be signal handling.
- perl can do a sysread() loop pretty easily. On my Linux
system, at least, it seems to restart the read() call
automatically. If that turns out not to be portable,
though, it would be easy for us to handle it.
That makes the perl solution the least bad (because we
conveniently omitted "length of code" as a criterion).
It's also what t9300 is currently using, so we can just pull
the implementation from there.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 11:07:54 +02:00
|
|
|
# Read up to "$1" bytes (or to EOF) from stdin and write them to stdout.
|
|
|
|
test_copy_bytes () {
|
|
|
|
perl -e '
|
|
|
|
my $len = $ARGV[1];
|
|
|
|
while ($len > 0) {
|
|
|
|
my $s;
|
|
|
|
my $nread = sysread(STDIN, $s, $len);
|
|
|
|
die "cannot read: $!" unless defined($nread);
|
2017-07-16 12:45:32 +02:00
|
|
|
last unless $nread;
|
t9300: factor out portable "head -c" replacement
It is sometimes useful to be able to read exactly N bytes from a
pipe. Doing this portably turns out to be surprisingly difficult
in shell scripts.
We want a solution that:
- is portable
- never reads more than N bytes due to buffering (which
would mean those bytes are not available to the next
program to read from the same pipe)
- handles partial reads by looping until N bytes are read
(or we see EOF)
- is resilient to stray signals giving us EINTR while
trying to read (even though we don't send them, things
like SIGWINCH could cause apparently-random failures)
Some possible solutions are:
- "head -c" is not portable, and implementations may
buffer (though GNU head does not)
- "read -N" is a bash-ism, and thus not portable
- "dd bs=$n count=1" does not handle partial reads. GNU dd
has iflags=fullblock, but that is not portable
- "dd bs=1 count=$n" fixes the partial read problem (all
reads are 1-byte, so there can be no partial response).
It does make a lot of write() calls, but for our tests
that's unlikely to matter. It's fairly portable. We
already use it in our tests, and it's unlikely that
implementations would screw up any of our criteria. The
most unknown one would be signal handling.
- perl can do a sysread() loop pretty easily. On my Linux
system, at least, it seems to restart the read() call
automatically. If that turns out not to be portable,
though, it would be easy for us to handle it.
That makes the perl solution the least bad (because we
conveniently omitted "length of code" as a criterion).
It's also what t9300 is currently using, so we can just pull
the implementation from there.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-30 11:07:54 +02:00
|
|
|
print $s;
|
|
|
|
$len -= $nread;
|
|
|
|
}
|
|
|
|
' - "$1"
|
|
|
|
}
|
2016-12-16 03:30:12 +01:00
|
|
|
|
|
|
|
# run "$@" inside a non-git directory
|
|
|
|
nongit () {
|
|
|
|
test -d non-repo ||
|
|
|
|
mkdir non-repo ||
|
|
|
|
return 1
|
|
|
|
|
|
|
|
(
|
|
|
|
GIT_CEILING_DIRECTORIES=$(pwd) &&
|
|
|
|
export GIT_CEILING_DIRECTORIES &&
|
|
|
|
cd non-repo &&
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
"$@" 2>&7
|
2016-12-16 03:30:12 +01:00
|
|
|
)
|
t: prevent '-x' tracing from interfering with test helpers' stderr
Running a test script with '-x' turns on 'set -x' tracing, the output
of which is normally sent to stderr. This causes a lot of
test failures, because many tests redirect and verify the stderr
of shell functions, most frequently that of 'test_must_fail'.
These issues were worked around somewhat in d88785e424 (test-lib: set
BASH_XTRACEFD automatically, 2016-05-11), so at least we could
reliably run tests with '-x' tracing under a Bash version supporting
BASH_XTRACEFD, i.e. v4.1 and later.
Futhermore, redirecting the stderr of test helper functions like
'test_must_fail' or 'test_expect_code' is the cause of a different
issue as well. If these functions detect something unexpected, they
will write their error messages intended to the user to thier stderr.
However, if their stderr is redirected in order to save and verify the
stderr of the tested git command invoked in the function, then the
function's error messages will be redirected as well. Consequently,
those messages won't reach the user, making the test's verbose output
less useful.
This patch makes it safe to redirect and verify the stderr of those
test helper functions which are meant to run the tested command given
as argument, even when running tests with '-x' and /bin/sh. This is
achieved through a couple of file descriptor redirections:
- Duplicate stderr of the tested command executed in the test helper
function from the function's fd 7 (see next point), to ensure that
the tested command's error messages go to a different fd than the
'-x' trace of the commands executed in the function or the
function's error messages.
- Duplicate the test helper function's fd 7 from the function's
original stderr, meaning that, after taking a detour through fd 7,
the error messages of the tested command do end up on the
function's original stderr.
- Duplicate stderr of the test helper function from fd 4, i.e. the
fd connected to the test script's original stderr and the fd used
for BASH_XTRACEFD. This ensures that the '-x' trace of the
commands executed in the function
- doesn't go to the function's original stderr, so it won't mess
with callers who want to save and verify the tested command's
stderr.
- does go to the same fd independently from the shell running
the test script, be it /bin/sh, an older Bash without
BASH_XTRACEFD, or a more recent Bash already supporting
BASH_XTRACEFD.
Furthermore, this also makes sure that the function's error
messages go to this fd 4, meaning that the user will be able to
see them even if the function's stderr is redirected in the test.
- Specify the latter two redirections above in the test helper
function's definition, so they are performed every time the
function is invoked, without the need to modify the callsites of
the function.
Perform these redirections in those test helper functions which can be
expected to have their stderr redirected, i.e. in the functions
'test_must_fail', 'test_might_fail', 'test_expect_code', 'test_env',
'nongit', 'test_terminal' and 'perl'. Note that 'test_might_fail',
'test_env', and 'nongit' are not involved in any test failures when
running tests with '-x' and /bin/sh.
The other test helper functions are left unchanged, because they
either don't run commands specified as their arguments, or redirecting
their stderr wouldn't make sense, or both.
With this change the number of failures when running the test suite
with '-x' tracing and /bin/sh goes down from 340 failed tests in 43
test scripts to 22 failed tests in 6 scripts (or 23 in 7, if the
system (OSX) uses an older Bash version without BASH_XTRACEFD to run
't9903-bash-prompt.sh').
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-25 14:40:15 +01:00
|
|
|
} 7>&2 2>&4
|
t/lib-git-daemon: add network-protocol helpers
All of our git-protocol tests rely on invoking the client
and having it make a request of a server. That gives a nice
real-world test of how the two behave together, but it
doesn't leave any room for testing how a server might react
to _other_ clients.
Let's add a few test helper functions which can be used to
manually conduct a git-protocol conversation with a remote
git-daemon:
1. To connect to a remote git-daemon, we need something
like "netcat". But not everybody will have netcat. And
even if they do, the behavior with respect to
half-duplex shutdowns is not portable (openbsd netcat
has "-N", with others you must rely on "-q 1", which is
racy).
Here we provide a "fake_nc" that is capable of doing
a client-side netcat, with sane half-duplex semantics.
It relies on perl's IO::Socket::INET. That's been in
the base distribution since 5.6.0, so it's probably
available everywhere. But just to be on the safe side,
we'll add a prereq.
2. To help tests speak and read pktline, this patch adds
packetize() and depacketize() functions.
I've put fake_nc() into lib-git-daemon.sh, since that's
really the only server where we'd need to use a network
socket. Whereas the pktline helpers may be of more general
use, so I've added them to test-lib-functions.sh. Programs
like upload-pack speak pktline, but can talk directly over
stdio without a network socket.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-01-25 01:58:19 +01:00
|
|
|
|
|
|
|
# convert stdin to pktline representation; note that empty input becomes an
|
|
|
|
# empty packet, not a flush packet (for that you can just print 0000 yourself).
|
|
|
|
packetize() {
|
|
|
|
cat >packetize.tmp &&
|
|
|
|
len=$(wc -c <packetize.tmp) &&
|
|
|
|
printf '%04x%s' "$(($len + 4))" &&
|
|
|
|
cat packetize.tmp &&
|
|
|
|
rm -f packetize.tmp
|
|
|
|
}
|
|
|
|
|
|
|
|
# Parse the input as a series of pktlines, writing the result to stdout.
|
|
|
|
# Sideband markers are removed automatically, and the output is routed to
|
|
|
|
# stderr if appropriate.
|
|
|
|
#
|
|
|
|
# NUL bytes are converted to "\\0" for ease of parsing with text tools.
|
|
|
|
depacketize () {
|
|
|
|
perl -e '
|
|
|
|
while (read(STDIN, $len, 4) == 4) {
|
|
|
|
if ($len eq "0000") {
|
|
|
|
print "FLUSH\n";
|
|
|
|
} else {
|
|
|
|
read(STDIN, $buf, hex($len) - 4);
|
|
|
|
$buf =~ s/\0/\\0/g;
|
|
|
|
if ($buf =~ s/^[\x2\x3]//) {
|
|
|
|
print STDERR $buf;
|
|
|
|
} else {
|
|
|
|
$buf =~ s/^\x1//;
|
|
|
|
print $buf;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'
|
|
|
|
}
|
t: add test functions to translate hash-related values
Add several test functions to make working with various hash-related
values easier.
Add test_oid_init, which loads common hash-related constants and
placeholder object IDs from the newly added files in t/oid-info.
Provide values for these constants for both SHA-1 and SHA-256.
Add test_oid_cache, which accepts data on standard input in the form of
hash-specific key-value pairs that can be looked up later, using the
same format as the files in t/oid-info. Document this format in a
t/oid-info/README directory so that it's easier to use in the future.
Add test_oid, which is used to specify look up a per-hash value
(produced on standard output) based on the key specified as its
argument. Usually the data to be looked up will be a hash-related
constant (such as the size of the hash in binary or hexadecimal), a
well-known or placeholder object ID (such as the all-zeros object ID or
one consisting of "deadbeef" repeated), or something similar. For these
reasons, test_oid will usually be used within a command substitution.
Consequently, redirect the error output to standard error, since
otherwise it will not be displayed.
Add test_detect_hash, which currently only detects SHA-1, and
test_set_hash, which can be used to set a different hash algorithm for
test purposes. In the future, test_detect_hash will learn to actually
detect the hash depending on how the testsuite is to be run.
Use the local keyword within these functions to avoid overwriting other
shell variables. We have had a test balloon in place for a couple of
releases to catch shells that don't have this keyword and have not
received any reports of failure. Note that the varying usages of local
used here are supported by all common open-source shells supporting the
local keyword.
Test these new functions as part of t0000, which also serves to
demonstrate basic usage of them. In addition, add documentation on how
to format the lookup data and how to use the test functions.
Implement two basic lookup charts, one for common invalid or synthesized
object IDs, and one for various facts about the hash function in use.
Provide versions of the data for both SHA-1 and SHA-256.
Since we use shell variables for storage, names used for lookup can
currently consist only of shell identifier characters. If this is a
problem in the future, we can hash the names before use.
Improved-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-09-13 07:17:31 +02:00
|
|
|
|
2019-04-05 05:37:42 +02:00
|
|
|
# Converts base-16 data into base-8. The output is given as a sequence of
|
|
|
|
# escaped octals, suitable for consumption by 'printf'.
|
|
|
|
hex2oct () {
|
|
|
|
perl -ne 'printf "\\%03o", hex for /../g'
|
|
|
|
}
|
|
|
|
|
t: add test functions to translate hash-related values
Add several test functions to make working with various hash-related
values easier.
Add test_oid_init, which loads common hash-related constants and
placeholder object IDs from the newly added files in t/oid-info.
Provide values for these constants for both SHA-1 and SHA-256.
Add test_oid_cache, which accepts data on standard input in the form of
hash-specific key-value pairs that can be looked up later, using the
same format as the files in t/oid-info. Document this format in a
t/oid-info/README directory so that it's easier to use in the future.
Add test_oid, which is used to specify look up a per-hash value
(produced on standard output) based on the key specified as its
argument. Usually the data to be looked up will be a hash-related
constant (such as the size of the hash in binary or hexadecimal), a
well-known or placeholder object ID (such as the all-zeros object ID or
one consisting of "deadbeef" repeated), or something similar. For these
reasons, test_oid will usually be used within a command substitution.
Consequently, redirect the error output to standard error, since
otherwise it will not be displayed.
Add test_detect_hash, which currently only detects SHA-1, and
test_set_hash, which can be used to set a different hash algorithm for
test purposes. In the future, test_detect_hash will learn to actually
detect the hash depending on how the testsuite is to be run.
Use the local keyword within these functions to avoid overwriting other
shell variables. We have had a test balloon in place for a couple of
releases to catch shells that don't have this keyword and have not
received any reports of failure. Note that the varying usages of local
used here are supported by all common open-source shells supporting the
local keyword.
Test these new functions as part of t0000, which also serves to
demonstrate basic usage of them. In addition, add documentation on how
to format the lookup data and how to use the test functions.
Implement two basic lookup charts, one for common invalid or synthesized
object IDs, and one for various facts about the hash function in use.
Provide versions of the data for both SHA-1 and SHA-256.
Since we use shell variables for storage, names used for lookup can
currently consist only of shell identifier characters. If this is a
problem in the future, we can hash the names before use.
Improved-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-09-13 07:17:31 +02:00
|
|
|
# Set the hash algorithm in use to $1. Only useful when testing the testsuite.
|
|
|
|
test_set_hash () {
|
|
|
|
test_hash_algo="$1"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Detect the hash algorithm in use.
|
|
|
|
test_detect_hash () {
|
|
|
|
# Currently we only support SHA-1, but in the future this function will
|
|
|
|
# actually detect the algorithm in use.
|
|
|
|
test_hash_algo='sha1'
|
|
|
|
}
|
|
|
|
|
|
|
|
# Load common hash metadata and common placeholder object IDs for use with
|
|
|
|
# test_oid.
|
|
|
|
test_oid_init () {
|
|
|
|
test -n "$test_hash_algo" || test_detect_hash &&
|
|
|
|
test_oid_cache <"$TEST_DIRECTORY/oid-info/hash-info" &&
|
|
|
|
test_oid_cache <"$TEST_DIRECTORY/oid-info/oid"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Load key-value pairs from stdin suitable for use with test_oid. Blank lines
|
|
|
|
# and lines starting with "#" are ignored. Keys must be shell identifier
|
|
|
|
# characters.
|
|
|
|
#
|
|
|
|
# Examples:
|
|
|
|
# rawsz sha1:20
|
|
|
|
# rawsz sha256:32
|
|
|
|
test_oid_cache () {
|
|
|
|
local tag rest k v &&
|
|
|
|
|
|
|
|
{ test -n "$test_hash_algo" || test_detect_hash; } &&
|
|
|
|
while read tag rest
|
|
|
|
do
|
|
|
|
case $tag in
|
|
|
|
\#*)
|
|
|
|
continue;;
|
|
|
|
?*)
|
|
|
|
# non-empty
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
# blank line
|
|
|
|
continue;;
|
|
|
|
esac &&
|
|
|
|
|
|
|
|
k="${rest%:*}" &&
|
|
|
|
v="${rest#*:}" &&
|
|
|
|
|
|
|
|
if ! expr "$k" : '[a-z0-9][a-z0-9]*$' >/dev/null
|
|
|
|
then
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG 'bad hash algorithm'
|
t: add test functions to translate hash-related values
Add several test functions to make working with various hash-related
values easier.
Add test_oid_init, which loads common hash-related constants and
placeholder object IDs from the newly added files in t/oid-info.
Provide values for these constants for both SHA-1 and SHA-256.
Add test_oid_cache, which accepts data on standard input in the form of
hash-specific key-value pairs that can be looked up later, using the
same format as the files in t/oid-info. Document this format in a
t/oid-info/README directory so that it's easier to use in the future.
Add test_oid, which is used to specify look up a per-hash value
(produced on standard output) based on the key specified as its
argument. Usually the data to be looked up will be a hash-related
constant (such as the size of the hash in binary or hexadecimal), a
well-known or placeholder object ID (such as the all-zeros object ID or
one consisting of "deadbeef" repeated), or something similar. For these
reasons, test_oid will usually be used within a command substitution.
Consequently, redirect the error output to standard error, since
otherwise it will not be displayed.
Add test_detect_hash, which currently only detects SHA-1, and
test_set_hash, which can be used to set a different hash algorithm for
test purposes. In the future, test_detect_hash will learn to actually
detect the hash depending on how the testsuite is to be run.
Use the local keyword within these functions to avoid overwriting other
shell variables. We have had a test balloon in place for a couple of
releases to catch shells that don't have this keyword and have not
received any reports of failure. Note that the varying usages of local
used here are supported by all common open-source shells supporting the
local keyword.
Test these new functions as part of t0000, which also serves to
demonstrate basic usage of them. In addition, add documentation on how
to format the lookup data and how to use the test functions.
Implement two basic lookup charts, one for common invalid or synthesized
object IDs, and one for various facts about the hash function in use.
Provide versions of the data for both SHA-1 and SHA-256.
Since we use shell variables for storage, names used for lookup can
currently consist only of shell identifier characters. If this is a
problem in the future, we can hash the names before use.
Improved-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-09-13 07:17:31 +02:00
|
|
|
fi &&
|
|
|
|
eval "test_oid_${k}_$tag=\"\$v\""
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
# Look up a per-hash value based on a key ($1). The value must have been loaded
|
|
|
|
# by test_oid_init or test_oid_cache.
|
|
|
|
test_oid () {
|
|
|
|
local var="test_oid_${test_hash_algo}_$1" &&
|
|
|
|
|
|
|
|
# If the variable is unset, we must be missing an entry for this
|
|
|
|
# key-hash pair, so exit with an error.
|
|
|
|
if eval "test -z \"\${$var+set}\""
|
|
|
|
then
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "undefined key '$1'"
|
t: add test functions to translate hash-related values
Add several test functions to make working with various hash-related
values easier.
Add test_oid_init, which loads common hash-related constants and
placeholder object IDs from the newly added files in t/oid-info.
Provide values for these constants for both SHA-1 and SHA-256.
Add test_oid_cache, which accepts data on standard input in the form of
hash-specific key-value pairs that can be looked up later, using the
same format as the files in t/oid-info. Document this format in a
t/oid-info/README directory so that it's easier to use in the future.
Add test_oid, which is used to specify look up a per-hash value
(produced on standard output) based on the key specified as its
argument. Usually the data to be looked up will be a hash-related
constant (such as the size of the hash in binary or hexadecimal), a
well-known or placeholder object ID (such as the all-zeros object ID or
one consisting of "deadbeef" repeated), or something similar. For these
reasons, test_oid will usually be used within a command substitution.
Consequently, redirect the error output to standard error, since
otherwise it will not be displayed.
Add test_detect_hash, which currently only detects SHA-1, and
test_set_hash, which can be used to set a different hash algorithm for
test purposes. In the future, test_detect_hash will learn to actually
detect the hash depending on how the testsuite is to be run.
Use the local keyword within these functions to avoid overwriting other
shell variables. We have had a test balloon in place for a couple of
releases to catch shells that don't have this keyword and have not
received any reports of failure. Note that the varying usages of local
used here are supported by all common open-source shells supporting the
local keyword.
Test these new functions as part of t0000, which also serves to
demonstrate basic usage of them. In addition, add documentation on how
to format the lookup data and how to use the test functions.
Implement two basic lookup charts, one for common invalid or synthesized
object IDs, and one for various facts about the hash function in use.
Provide versions of the data for both SHA-1 and SHA-256.
Since we use shell variables for storage, names used for lookup can
currently consist only of shell identifier characters. If this is a
problem in the future, we can hash the names before use.
Improved-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-09-13 07:17:31 +02:00
|
|
|
fi &&
|
|
|
|
eval "printf '%s' \"\${$var}\""
|
|
|
|
}
|
test-lib-functions: introduce the 'test_set_port' helper function
Several test scripts run daemons like 'git-daemon' or Apache, and
communicate with them through TCP sockets. To have unique ports where
these daemons are accessible, the ports are usually the number of the
corresponding test scripts, unless the user overrides them via
environment variables, and thus all those tests and test libs contain
more or less the same bit of one-liner boilerplate code to find out
the port. The last patch in this series will make this a bit more
complicated.
Factor out finding the port for a daemon into the common helper
function 'test_set_port' to avoid repeating ourselves.
Take special care of test scripts with "low" numbers:
- Test numbers below 1024 would result in a port that's only usable
as root, so set their port to '10000 + test-nr' to make sure it
doesn't interfere with other tests in the test suite. This makes
the hardcoded port number in 't0410-partial-clone.sh' unnecessary,
remove it.
- The shell's arithmetic evaluation interprets numbers with leading
zeros as octal values, which means that test number below 1000 and
containing the digits 8 or 9 will trigger an error. Remove all
leading zeros from the test numbers to prevent this.
Note that the 'git p4' tests are unlike the other tests involving
daemons in that:
- 'lib-git-p4.sh' doesn't use the test's number for unique port as
is, but does a bit of additional arithmetic on top [1].
- The port is not overridable via an environment variable.
With this patch even 'git p4' tests will use the test's number as
default port, and it will be overridable via the P4DPORT environment
variable.
[1] Commit fc00233071 (git-p4 tests: refactor and cleanup, 2011-08-22)
introduced that "unusual" unique port computation without
explaining why it was necessary (as opposed to simply using the
test number as is). It seems to be just unnecessary complication,
and in any case that commit came way before the "test nr as unique
port" got "standardized" for other daemons in commits c44132fcf3
(tests: auto-set git-daemon port, 2014-02-10), 3bb486e439 (tests:
auto-set LIB_HTTPD_PORT from test name, 2014-02-10), and
bf9d7df950 (t/lib-git-svn.sh: improve svnserve tests with parallel
make test, 2017-12-01).
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-05 02:08:58 +01:00
|
|
|
|
|
|
|
# Choose a port number based on the test script's number and store it in
|
|
|
|
# the given variable name, unless that variable already contains a number.
|
|
|
|
test_set_port () {
|
|
|
|
local var=$1 port
|
|
|
|
|
|
|
|
if test $# -ne 1 || test -z "$var"
|
|
|
|
then
|
|
|
|
BUG "test_set_port requires a variable name"
|
|
|
|
fi
|
|
|
|
|
|
|
|
eval port=\$$var
|
|
|
|
case "$port" in
|
|
|
|
"")
|
|
|
|
# No port is set in the given env var, use the test
|
|
|
|
# number as port number instead.
|
|
|
|
# Remove not only the leading 't', but all leading zeros
|
|
|
|
# as well, so the arithmetic below won't (mis)interpret
|
|
|
|
# a test number like '0123' as an octal value.
|
|
|
|
port=${this_test#${this_test%%[1-9]*}}
|
|
|
|
if test "${port:-0}" -lt 1024
|
|
|
|
then
|
|
|
|
# root-only port, use a larger one instead.
|
|
|
|
port=$(($port + 10000))
|
|
|
|
fi
|
|
|
|
;;
|
2019-02-11 20:58:03 +01:00
|
|
|
*[!0-9]*|0*)
|
test-lib-functions: introduce the 'test_set_port' helper function
Several test scripts run daemons like 'git-daemon' or Apache, and
communicate with them through TCP sockets. To have unique ports where
these daemons are accessible, the ports are usually the number of the
corresponding test scripts, unless the user overrides them via
environment variables, and thus all those tests and test libs contain
more or less the same bit of one-liner boilerplate code to find out
the port. The last patch in this series will make this a bit more
complicated.
Factor out finding the port for a daemon into the common helper
function 'test_set_port' to avoid repeating ourselves.
Take special care of test scripts with "low" numbers:
- Test numbers below 1024 would result in a port that's only usable
as root, so set their port to '10000 + test-nr' to make sure it
doesn't interfere with other tests in the test suite. This makes
the hardcoded port number in 't0410-partial-clone.sh' unnecessary,
remove it.
- The shell's arithmetic evaluation interprets numbers with leading
zeros as octal values, which means that test number below 1000 and
containing the digits 8 or 9 will trigger an error. Remove all
leading zeros from the test numbers to prevent this.
Note that the 'git p4' tests are unlike the other tests involving
daemons in that:
- 'lib-git-p4.sh' doesn't use the test's number for unique port as
is, but does a bit of additional arithmetic on top [1].
- The port is not overridable via an environment variable.
With this patch even 'git p4' tests will use the test's number as
default port, and it will be overridable via the P4DPORT environment
variable.
[1] Commit fc00233071 (git-p4 tests: refactor and cleanup, 2011-08-22)
introduced that "unusual" unique port computation without
explaining why it was necessary (as opposed to simply using the
test number as is). It seems to be just unnecessary complication,
and in any case that commit came way before the "test nr as unique
port" got "standardized" for other daemons in commits c44132fcf3
(tests: auto-set git-daemon port, 2014-02-10), 3bb486e439 (tests:
auto-set LIB_HTTPD_PORT from test name, 2014-02-10), and
bf9d7df950 (t/lib-git-svn.sh: improve svnserve tests with parallel
make test, 2017-12-01).
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-05 02:08:58 +01:00
|
|
|
error >&7 "invalid port number: $port"
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
# The user has specified the port.
|
|
|
|
;;
|
|
|
|
esac
|
test-lib: add the '--stress' option to run a test repeatedly under load
Unfortunately, we have a few flaky tests, whose failures tend to be
hard to reproduce. We've found that the best we can do to reproduce
such a failure is to run the test script repeatedly while the machine
is under load, and wait in the hope that the load creates enough
variance in the timing of the test's commands that a failure is
evenually triggered. I have a command to do that, and I noticed that
two other contributors have rolled their own scripts to do the same,
all choosing slightly different approaches.
To help reproduce failures in flaky tests, introduce the '--stress'
option to run a test script repeatedly in multiple parallel jobs until
one of them fails, thereby using the test script itself to increase
the load on the machine.
The number of parallel jobs is determined by, in order of precedence:
the number specified as '--stress=<N>', or the value of the
GIT_TEST_STRESS_LOAD environment variable, or twice the number of
available processors (as reported by the 'getconf' utility), or 8.
Make '--stress' imply '--verbose -x --immediate' to get the most
information about rare failures; there is really no point in spending
all the extra effort to reproduce such a failure, and then not know
which command failed and why.
To prevent the several parallel invocations of the same test from
interfering with each other:
- Include the parallel job's number in the name of the trash
directory and the various output files under 't/test-results/' as
a '.stress-<Nr>' suffix.
- Add the parallel job's number to the port number specified by the
user or to the test number, so even tests involving daemons
listening on a TCP socket can be stressed.
- Redirect each parallel test run's verbose output to
't/test-results/$TEST_NAME.stress-<nr>.out', because dumping the
output of several parallel running tests to the terminal would
create a big ugly mess.
For convenience, print the output of the failed test job at the end,
and rename its trash directory to end with the '.stress-failed'
suffix, so it's easy to find in a predictable path (OTOH, all absolute
paths recorded in the trash directory become invalid; we'll see
whether this causes any issues in practice). If, in an unlikely case,
more than one jobs were to fail nearly at the same time, then print
the output of all failed jobs, and rename the trash directory of only
the last one (i.e. with the highest job number), as it is the trash
directory of the test whose output will be at the bottom of the user's
terminal.
Based on Jeff King's 'stress' script.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-05 02:08:59 +01:00
|
|
|
|
|
|
|
# Make sure that parallel '--stress' test jobs get different
|
|
|
|
# ports.
|
|
|
|
port=$(($port + ${GIT_TEST_STRESS_JOB_NR:-0}))
|
|
|
|
eval $var=$port
|
test-lib-functions: introduce the 'test_set_port' helper function
Several test scripts run daemons like 'git-daemon' or Apache, and
communicate with them through TCP sockets. To have unique ports where
these daemons are accessible, the ports are usually the number of the
corresponding test scripts, unless the user overrides them via
environment variables, and thus all those tests and test libs contain
more or less the same bit of one-liner boilerplate code to find out
the port. The last patch in this series will make this a bit more
complicated.
Factor out finding the port for a daemon into the common helper
function 'test_set_port' to avoid repeating ourselves.
Take special care of test scripts with "low" numbers:
- Test numbers below 1024 would result in a port that's only usable
as root, so set their port to '10000 + test-nr' to make sure it
doesn't interfere with other tests in the test suite. This makes
the hardcoded port number in 't0410-partial-clone.sh' unnecessary,
remove it.
- The shell's arithmetic evaluation interprets numbers with leading
zeros as octal values, which means that test number below 1000 and
containing the digits 8 or 9 will trigger an error. Remove all
leading zeros from the test numbers to prevent this.
Note that the 'git p4' tests are unlike the other tests involving
daemons in that:
- 'lib-git-p4.sh' doesn't use the test's number for unique port as
is, but does a bit of additional arithmetic on top [1].
- The port is not overridable via an environment variable.
With this patch even 'git p4' tests will use the test's number as
default port, and it will be overridable via the P4DPORT environment
variable.
[1] Commit fc00233071 (git-p4 tests: refactor and cleanup, 2011-08-22)
introduced that "unusual" unique port computation without
explaining why it was necessary (as opposed to simply using the
test number as is). It seems to be just unnecessary complication,
and in any case that commit came way before the "test nr as unique
port" got "standardized" for other daemons in commits c44132fcf3
(tests: auto-set git-daemon port, 2014-02-10), 3bb486e439 (tests:
auto-set LIB_HTTPD_PORT from test name, 2014-02-10), and
bf9d7df950 (t/lib-git-svn.sh: improve svnserve tests with parallel
make test, 2017-12-01).
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-05 02:08:58 +01:00
|
|
|
}
|