2013-11-25 22:03:06 +01:00
|
|
|
# Performance testing framework. Each perf script starts much like
|
|
|
|
# a normal test script, except it sources this library instead of
|
|
|
|
# test-lib.sh. See t/perf/README for documentation.
|
2012-02-17 11:25:09 +01:00
|
|
|
#
|
|
|
|
# Copyright (c) 2011 Thomas Rast
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see http://www.gnu.org/licenses/ .
|
|
|
|
|
|
|
|
# do the --tee work early; it otherwise confuses our careful
|
|
|
|
# GIT_BUILD_DIR mangling
|
|
|
|
case "$GIT_TEST_TEE_STARTED, $* " in
|
|
|
|
done,*)
|
|
|
|
# do not redirect again
|
|
|
|
;;
|
|
|
|
*' --tee '*|*' --va'*)
|
|
|
|
mkdir -p test-results
|
|
|
|
BASE=test-results/$(basename "$0" .sh)
|
|
|
|
(GIT_TEST_TEE_STARTED=done ${SHELL-sh} "$0" "$@" 2>&1;
|
|
|
|
echo $? > $BASE.exit) | tee $BASE.out
|
|
|
|
test "$(cat $BASE.exit)" = 0
|
|
|
|
exit
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
|
|
|
|
TEST_DIRECTORY=$(pwd)/..
|
|
|
|
TEST_OUTPUT_DIRECTORY=$(pwd)
|
|
|
|
if test -z "$GIT_TEST_INSTALLED"; then
|
|
|
|
perf_results_prefix=
|
|
|
|
else
|
|
|
|
perf_results_prefix=$(printf "%s" "${GIT_TEST_INSTALLED%/bin-wrappers}" | tr -c "[a-zA-Z0-9]" "[_*]")"."
|
|
|
|
# make the tested dir absolute
|
|
|
|
GIT_TEST_INSTALLED=$(cd "$GIT_TEST_INSTALLED" && pwd)
|
|
|
|
fi
|
|
|
|
|
|
|
|
TEST_NO_CREATE_REPO=t
|
2012-09-26 22:16:39 +02:00
|
|
|
TEST_NO_MALLOC_CHECK=t
|
2012-02-17 11:25:09 +01:00
|
|
|
|
|
|
|
. ../test-lib.sh
|
|
|
|
|
2012-03-08 09:54:55 +01:00
|
|
|
# Variables from test-lib that are normally internal to the tests; we
|
|
|
|
# need to export them for test_perf subshells
|
|
|
|
export TEST_DIRECTORY TRASH_DIRECTORY GIT_BUILD_DIR GIT_TEST_CMP
|
|
|
|
|
2016-06-22 21:40:13 +02:00
|
|
|
MODERN_GIT=$GIT_BUILD_DIR/bin-wrappers/git
|
|
|
|
export MODERN_GIT
|
|
|
|
|
2012-02-17 11:25:09 +01:00
|
|
|
perf_results_dir=$TEST_OUTPUT_DIRECTORY/test-results
|
2017-09-23 21:55:56 +02:00
|
|
|
test -n "$GIT_PERF_SUBSECTION" && perf_results_dir="$perf_results_dir/$GIT_PERF_SUBSECTION"
|
2012-02-17 11:25:09 +01:00
|
|
|
mkdir -p "$perf_results_dir"
|
|
|
|
rm -f "$perf_results_dir"/$(basename "$0" .sh).subtests
|
|
|
|
|
|
|
|
die_if_build_dir_not_repo () {
|
|
|
|
if ! ( cd "$TEST_DIRECTORY/.." &&
|
|
|
|
git rev-parse --build-dir >/dev/null 2>&1 ); then
|
|
|
|
error "No $1 defined, and your build directory is not a repo"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
if test -z "$GIT_PERF_REPO"; then
|
|
|
|
die_if_build_dir_not_repo '$GIT_PERF_REPO'
|
|
|
|
GIT_PERF_REPO=$TEST_DIRECTORY/..
|
|
|
|
fi
|
|
|
|
if test -z "$GIT_PERF_LARGE_REPO"; then
|
|
|
|
die_if_build_dir_not_repo '$GIT_PERF_LARGE_REPO'
|
|
|
|
GIT_PERF_LARGE_REPO=$TEST_DIRECTORY/..
|
|
|
|
fi
|
|
|
|
|
2017-05-11 11:41:07 +02:00
|
|
|
test_perf_do_repo_symlink_config_ () {
|
|
|
|
test_have_prereq SYMLINKS || git config core.symlinks false
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:09 +01:00
|
|
|
test_perf_create_repo_from () {
|
|
|
|
test "$#" = 2 ||
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "not 2 parameters to test-create-repo"
|
2012-02-17 11:25:09 +01:00
|
|
|
repo="$1"
|
|
|
|
source="$2"
|
t/perf: use $MODERN_GIT for all repo-copying steps
Since 1a0962dee (t/perf: fix regression in testing older
versions of git, 2016-06-22), we point "$MODERN_GIT" to a
copy of git that matches the t/perf script itself, and which
can be used for tasks outside of the actual timings. This is
needed because the setup done by perf scripts keeps moving
forward in time, and may use features that the older
versions of git we are testing do not have.
That commit used $MODERN_GIT to fix a case where we relied
on the relatively recent --git-path option. But if you go
back further still, there are more problems.
Since 7501b5921 (perf: make the tests work in worktrees,
2016-05-13), we use "git -C", but versions of git older than
44e1e4d67 (git: run in a directory given with -C option,
2013-09-09) don't know about "-C". So testing an old version
of git with a new version of t/perf will fail the setup
step.
We can fix this by using $MODERN_GIT during the setup;
there's no need to use the antique version, since it doesn't
affect the timings. Likewise, we'll adjust the "init"
invocation; antique versions of git called this "init-db".
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-03 08:14:03 +01:00
|
|
|
source_git="$("$MODERN_GIT" -C "$source" rev-parse --git-dir)"
|
2016-06-22 21:40:13 +02:00
|
|
|
objects_dir="$("$MODERN_GIT" -C "$source" rev-parse --git-path objects)"
|
2012-02-17 11:25:09 +01:00
|
|
|
mkdir -p "$repo/.git"
|
|
|
|
(
|
2016-05-29 18:43:41 +02:00
|
|
|
cd "$source" &&
|
2016-05-13 15:25:58 +02:00
|
|
|
{ cp -Rl "$objects_dir" "$repo/.git/" 2>/dev/null ||
|
|
|
|
cp -R "$objects_dir" "$repo/.git/"; } &&
|
2012-02-17 11:25:09 +01:00
|
|
|
for stuff in "$source_git"/*; do
|
|
|
|
case "$stuff" in
|
2016-05-13 15:25:58 +02:00
|
|
|
*/objects|*/hooks|*/config|*/commondir)
|
2012-02-17 11:25:09 +01:00
|
|
|
;;
|
|
|
|
*)
|
2016-05-13 15:25:58 +02:00
|
|
|
cp -R "$stuff" "$repo/.git/" || exit 1
|
2012-02-17 11:25:09 +01:00
|
|
|
;;
|
|
|
|
esac
|
2016-05-29 18:43:41 +02:00
|
|
|
done
|
|
|
|
) &&
|
|
|
|
(
|
2016-05-13 15:25:58 +02:00
|
|
|
cd "$repo" &&
|
2017-05-11 11:41:07 +02:00
|
|
|
"$MODERN_GIT" init -q &&
|
|
|
|
test_perf_do_repo_symlink_config_ &&
|
2017-06-02 12:33:30 +02:00
|
|
|
mv .git/hooks .git/hooks-disabled 2>/dev/null &&
|
|
|
|
if test -f .git/index.lock
|
|
|
|
then
|
|
|
|
# We may be copying a repo that can't run "git
|
|
|
|
# status" due to a locked index. Since we have
|
|
|
|
# a copy it's fine to remove the lock.
|
|
|
|
rm .git/index.lock
|
|
|
|
fi
|
2012-02-17 11:25:09 +01:00
|
|
|
) || error "failed to copy repository '$source' to '$repo'"
|
|
|
|
}
|
|
|
|
|
|
|
|
# call at least one of these to establish an appropriately-sized repository
|
2017-05-11 11:41:07 +02:00
|
|
|
test_perf_fresh_repo () {
|
|
|
|
repo="${1:-$TRASH_DIRECTORY}"
|
|
|
|
"$MODERN_GIT" init -q "$repo" &&
|
|
|
|
(
|
|
|
|
cd "$repo" &&
|
|
|
|
test_perf_do_repo_symlink_config_
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:09 +01:00
|
|
|
test_perf_default_repo () {
|
|
|
|
test_perf_create_repo_from "${1:-$TRASH_DIRECTORY}" "$GIT_PERF_REPO"
|
|
|
|
}
|
|
|
|
test_perf_large_repo () {
|
|
|
|
if test "$GIT_PERF_LARGE_REPO" = "$GIT_BUILD_DIR"; then
|
|
|
|
echo "warning: \$GIT_PERF_LARGE_REPO is \$GIT_BUILD_DIR." >&2
|
|
|
|
echo "warning: This will work, but may not be a sufficiently large repo" >&2
|
|
|
|
echo "warning: for representative measurements." >&2
|
|
|
|
fi
|
|
|
|
test_perf_create_repo_from "${1:-$TRASH_DIRECTORY}" "$GIT_PERF_LARGE_REPO"
|
|
|
|
}
|
|
|
|
test_checkout_worktree () {
|
|
|
|
git checkout-index -u -a ||
|
|
|
|
error "git checkout-index failed"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Performance tests should never fail. If they do, stop immediately
|
|
|
|
immediate=t
|
|
|
|
|
2016-06-21 15:53:43 +02:00
|
|
|
# Perf tests require GNU time
|
|
|
|
case "$(uname -s)" in Darwin) GTIME="${GTIME:-gtime}";; esac
|
|
|
|
GTIME="${GTIME:-/usr/bin/time}"
|
|
|
|
|
2012-02-17 11:25:09 +01:00
|
|
|
test_run_perf_ () {
|
|
|
|
test_cleanup=:
|
|
|
|
test_export_="test_cleanup"
|
|
|
|
export test_cleanup test_export_
|
2016-06-21 15:53:43 +02:00
|
|
|
"$GTIME" -f "%E %U %S" -o test_time.$i "$SHELL" -c '
|
2012-03-08 09:54:54 +01:00
|
|
|
. '"$TEST_DIRECTORY"/test-lib-functions.sh'
|
2012-02-17 11:25:09 +01:00
|
|
|
test_export () {
|
|
|
|
[ $# != 0 ] || return 0
|
|
|
|
test_export_="$test_export_\\|$1"
|
|
|
|
shift
|
|
|
|
test_export "$@"
|
|
|
|
}
|
|
|
|
'"$1"'
|
|
|
|
ret=$?
|
|
|
|
set | sed -n "s'"/'/'\\\\''/g"';s/^\\($test_export_\\)/export '"'&'"'/p" >test_vars
|
|
|
|
exit $ret' >&3 2>&4
|
|
|
|
eval_ret=$?
|
|
|
|
|
|
|
|
if test $eval_ret = 0 || test -n "$expecting_failure"
|
|
|
|
then
|
|
|
|
test_eval_ "$test_cleanup"
|
|
|
|
. ./test_vars || error "failed to load updated environment"
|
|
|
|
fi
|
|
|
|
if test "$verbose" = "t" && test -n "$HARNESS_ACTIVE"; then
|
|
|
|
echo ""
|
|
|
|
fi
|
|
|
|
return "$eval_ret"
|
|
|
|
}
|
|
|
|
|
2018-08-17 22:55:06 +02:00
|
|
|
test_wrapper_ () {
|
|
|
|
test_wrapper_func_=$1; shift
|
2013-06-29 15:38:39 +02:00
|
|
|
test_start_
|
2012-02-17 11:25:09 +01:00
|
|
|
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
|
|
|
|
test "$#" = 2 ||
|
tests: send "bug in the test script" errors to the script's stderr
Some of the functions in our test library check that they were invoked
properly with conditions like this:
test "$#" = 2 ||
error "bug in the test script: not 2 parameters to test-expect-success"
If this particular condition is triggered, then 'error' will abort the
whole test script with a bold red error message [1] right away.
However, under certain circumstances the test script will be aborted
completely silently, namely if:
- a similar condition in a test helper function like
'test_line_count' is triggered,
- which is invoked from the test script's "main" shell [2],
- and the test script is run manually (i.e. './t1234-foo.sh' as
opposed to 'make t1234-foo.sh' or 'make test') [3]
- and without the '--verbose' option,
because the error message is printed from within 'test_eval_', where
standard output is redirected either to /dev/null or to a log file.
The only indication that something is wrong is that not all tests in
the script are executed and at the end of the test script's output
there is no "# passed all N tests" message, which are subtle and can
easily go unnoticed, as I had to experience myself.
Send these "bug in the test script" error messages directly to the
test scripts standard error and thus to the terminal, so those bugs
will be much harder to overlook. Instead of updating all ~20 such
'error' calls with a redirection, let's add a BUG() function to
'test-lib.sh', wrapping an 'error' call with the proper redirection
and also including the common prefix of those error messages, and
convert all those call sites [4] to use this new BUG() function
instead.
[1] That particular error message from 'test_expect_success' is
printed in color only when running with or without '--verbose';
with '--tee' or '--verbose-log' the error is printed without
color, but it is printed to the terminal nonetheless.
[2] If such a condition is triggered in a subshell of a test, then
'error' won't be able to abort the whole test script, but only the
subshell, which in turn causes the test to fail in the usual way,
indicating loudly and clearly that something is wrong.
[3] Well, 'error' aborts the test script the same way when run
manually or by 'make' or 'prove', but both 'make' and 'prove' pay
attention to the test script's exit status, and even a silently
aborted test script would then trigger those tools' usual
noticable error messages.
[4] Strictly speaking, not all those 'error' calls need that
redirection to send their output to the terminal, see e.g.
'test_expect_success' in the opening example, but I think it's
better to be consistent.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-19 14:13:26 +01:00
|
|
|
BUG "not 2 or 3 parameters to test-expect-success"
|
2012-02-17 11:25:09 +01:00
|
|
|
export test_prereq
|
|
|
|
if ! test_skip "$@"
|
|
|
|
then
|
|
|
|
base=$(basename "$0" .sh)
|
|
|
|
echo "$test_count" >>"$perf_results_dir"/$base.subtests
|
|
|
|
echo "$1" >"$perf_results_dir"/$base.$test_count.descr
|
2018-08-17 22:55:06 +02:00
|
|
|
base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
|
|
|
|
"$test_wrapper_func_" "$@"
|
|
|
|
fi
|
|
|
|
|
|
|
|
test_finish_
|
|
|
|
}
|
|
|
|
|
|
|
|
test_perf_ () {
|
|
|
|
if test -z "$verbose"; then
|
|
|
|
printf "%s" "perf $test_count - $1:"
|
|
|
|
else
|
|
|
|
echo "perf $test_count - $1:"
|
|
|
|
fi
|
|
|
|
for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
|
|
|
|
say >&3 "running: $2"
|
|
|
|
if test_run_perf_ "$2"
|
|
|
|
then
|
|
|
|
if test -z "$verbose"; then
|
|
|
|
printf " %s" "$i"
|
2012-02-17 11:25:09 +01:00
|
|
|
else
|
2018-08-17 22:55:06 +02:00
|
|
|
echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
|
2012-02-17 11:25:09 +01:00
|
|
|
fi
|
|
|
|
else
|
2018-08-17 22:55:06 +02:00
|
|
|
test -z "$verbose" && echo
|
|
|
|
test_failure_ "$@"
|
|
|
|
break
|
2012-02-17 11:25:09 +01:00
|
|
|
fi
|
2018-08-17 22:55:06 +02:00
|
|
|
done
|
|
|
|
if test -z "$verbose"; then
|
|
|
|
echo " ok"
|
|
|
|
else
|
|
|
|
test_ok_ "$1"
|
2012-02-17 11:25:09 +01:00
|
|
|
fi
|
2018-08-17 22:55:06 +02:00
|
|
|
"$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
|
|
|
|
}
|
|
|
|
|
|
|
|
test_perf () {
|
|
|
|
test_wrapper_ test_perf_ "$@"
|
2012-02-17 11:25:09 +01:00
|
|
|
}
|
|
|
|
|
2018-08-17 22:56:37 +02:00
|
|
|
test_size_ () {
|
|
|
|
say >&3 "running: $2"
|
|
|
|
if test_eval_ "$2" 3>"$base".size; then
|
|
|
|
test_ok_ "$1"
|
|
|
|
else
|
|
|
|
test_failure_ "$@"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
test_size () {
|
|
|
|
test_wrapper_ test_size_ "$@"
|
|
|
|
}
|
|
|
|
|
2012-02-17 11:25:09 +01:00
|
|
|
# We extend test_done to print timings at the end (./run disables this
|
|
|
|
# and does it after running everything)
|
|
|
|
test_at_end_hook_ () {
|
|
|
|
if test -z "$GIT_PERF_AGGREGATING_LATER"; then
|
|
|
|
( cd "$TEST_DIRECTORY"/perf && ./aggregate.perl $(basename "$0") )
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
test_export () {
|
|
|
|
export "$@"
|
|
|
|
}
|