2010-01-10 14:11:22 +01:00
|
|
|
#!/bin/sh
|
|
|
|
#
|
|
|
|
# Copyright (c) 2009 Ilari Liusvaara
|
|
|
|
#
|
|
|
|
|
|
|
|
test_description='Test run command'
|
|
|
|
|
|
|
|
. ./test-lib.sh
|
|
|
|
|
2011-04-20 12:35:08 +02:00
|
|
|
cat >hello-script <<-EOF
|
|
|
|
#!$SHELL_PATH
|
|
|
|
cat hello-script
|
|
|
|
EOF
|
|
|
|
|
2018-10-24 09:38:00 +02:00
|
|
|
test_expect_success 'start_command reports ENOENT (slash)' '
|
2018-12-11 06:46:07 +01:00
|
|
|
test-tool run-command start-command-ENOENT ./does-not-exist 2>err &&
|
|
|
|
test_i18ngrep "\./does-not-exist" err
|
2010-01-10 14:11:22 +01:00
|
|
|
'
|
|
|
|
|
2018-10-24 09:38:00 +02:00
|
|
|
test_expect_success 'start_command reports ENOENT (no slash)' '
|
2018-12-11 06:46:07 +01:00
|
|
|
test-tool run-command start-command-ENOENT does-not-exist 2>err &&
|
|
|
|
test_i18ngrep "does-not-exist" err
|
2018-10-24 09:38:00 +02:00
|
|
|
'
|
|
|
|
|
2011-04-20 12:35:08 +02:00
|
|
|
test_expect_success 'run_command can run a command' '
|
|
|
|
cat hello-script >hello.sh &&
|
|
|
|
chmod +x hello.sh &&
|
2018-03-24 08:44:55 +01:00
|
|
|
test-tool run-command run-command ./hello.sh >actual 2>err &&
|
2011-04-20 12:35:08 +02:00
|
|
|
|
|
|
|
test_cmp hello-script actual &&
|
tests: use 'test_must_be_empty' instead of 'test_cmp <empty> <out>'
Using 'test_must_be_empty' is shorter and more idiomatic than
>empty &&
test_cmp empty out
as it saves the creation of an empty file. Furthermore, sometimes the
expected empty file doesn't have such a descriptive name like 'empty',
and its creation is far away from the place where it's finally used
for comparison (e.g. in 't7600-merge.sh', where two expected empty
files are created in the 'setup' test, but are used only about 500
lines later).
These cases were found by instrumenting 'test_cmp' to error out the
test script when it's used to compare empty files, and then converted
manually.
Note that even after this patch there still remain a lot of cases
where we use 'test_cmp' to check empty files:
- Sometimes the expected output is not hard-coded in the test, but
'test_cmp' is used to ensure that two similar git commands produce
the same output, and that output happens to be empty, e.g. the
test 'submodule update --merge - ignores --merge for new
submodules' in 't7406-submodule-update.sh'.
- Repetitive common tasks, including preparing the expected results
and running 'test_cmp', are often extracted into a helper
function, and some of this helper's callsites expect no output.
- For the same reason as above, the whole 'test_expect_success'
block is within a helper function, e.g. in 't3070-wildmatch.sh'.
- Or 'test_cmp' is invoked in a loop, e.g. the test 'cvs update
(-p)' in 't9400-git-cvsserver-server.sh'.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-19 23:57:25 +02:00
|
|
|
test_must_be_empty err
|
2011-04-20 12:35:08 +02:00
|
|
|
'
|
|
|
|
|
2018-12-03 02:05:07 +01:00
|
|
|
|
|
|
|
test_lazy_prereq RUNS_COMMANDS_FROM_PWD '
|
|
|
|
write_script runs-commands-from-pwd <<-\EOF &&
|
|
|
|
true
|
|
|
|
EOF
|
|
|
|
runs-commands-from-pwd >/dev/null 2>&1
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success !RUNS_COMMANDS_FROM_PWD 'run_command is restricted to PATH' '
|
2018-10-24 09:38:00 +02:00
|
|
|
write_script should-not-run <<-\EOF &&
|
|
|
|
echo yikes
|
|
|
|
EOF
|
2018-12-11 06:46:07 +01:00
|
|
|
test_must_fail test-tool run-command run-command should-not-run 2>err &&
|
|
|
|
test_i18ngrep "should-not-run" err
|
2018-10-24 09:38:00 +02:00
|
|
|
'
|
|
|
|
|
2017-04-20 01:13:18 +02:00
|
|
|
test_expect_success !MINGW 'run_command can run a script without a #! line' '
|
|
|
|
cat >hello <<-\EOF &&
|
|
|
|
cat hello-script
|
|
|
|
EOF
|
|
|
|
chmod +x hello &&
|
2018-03-24 08:44:55 +01:00
|
|
|
test-tool run-command run-command ./hello >actual 2>err &&
|
2017-04-20 01:13:18 +02:00
|
|
|
|
|
|
|
test_cmp hello-script actual &&
|
tests: use 'test_must_be_empty' instead of 'test_cmp <empty> <out>'
Using 'test_must_be_empty' is shorter and more idiomatic than
>empty &&
test_cmp empty out
as it saves the creation of an empty file. Furthermore, sometimes the
expected empty file doesn't have such a descriptive name like 'empty',
and its creation is far away from the place where it's finally used
for comparison (e.g. in 't7600-merge.sh', where two expected empty
files are created in the 'setup' test, but are used only about 500
lines later).
These cases were found by instrumenting 'test_cmp' to error out the
test script when it's used to compare empty files, and then converted
manually.
Note that even after this patch there still remain a lot of cases
where we use 'test_cmp' to check empty files:
- Sometimes the expected output is not hard-coded in the test, but
'test_cmp' is used to ensure that two similar git commands produce
the same output, and that output happens to be empty, e.g. the
test 'submodule update --merge - ignores --merge for new
submodules' in 't7406-submodule-update.sh'.
- Repetitive common tasks, including preparing the expected results
and running 'test_cmp', are often extracted into a helper
function, and some of this helper's callsites expect no output.
- For the same reason as above, the whole 'test_expect_success'
block is within a helper function, e.g. in 't3070-wildmatch.sh'.
- Or 'test_cmp' is invoked in a loop, e.g. the test 'cvs update
(-p)' in 't9400-git-cvsserver-server.sh'.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-19 23:57:25 +02:00
|
|
|
test_must_be_empty err
|
2017-04-20 01:13:18 +02:00
|
|
|
'
|
|
|
|
|
2017-04-26 01:47:00 +02:00
|
|
|
test_expect_success 'run_command does not try to execute a directory' '
|
|
|
|
test_when_finished "rm -rf bin1 bin2" &&
|
|
|
|
mkdir -p bin1/greet bin2 &&
|
|
|
|
write_script bin2/greet <<-\EOF &&
|
|
|
|
cat bin2/greet
|
|
|
|
EOF
|
|
|
|
|
|
|
|
PATH=$PWD/bin1:$PWD/bin2:$PATH \
|
2018-03-24 08:44:55 +01:00
|
|
|
test-tool run-command run-command greet >actual 2>err &&
|
2017-04-26 01:47:00 +02:00
|
|
|
test_cmp bin2/greet actual &&
|
tests: use 'test_must_be_empty' instead of 'test_cmp <empty> <out>'
Using 'test_must_be_empty' is shorter and more idiomatic than
>empty &&
test_cmp empty out
as it saves the creation of an empty file. Furthermore, sometimes the
expected empty file doesn't have such a descriptive name like 'empty',
and its creation is far away from the place where it's finally used
for comparison (e.g. in 't7600-merge.sh', where two expected empty
files are created in the 'setup' test, but are used only about 500
lines later).
These cases were found by instrumenting 'test_cmp' to error out the
test script when it's used to compare empty files, and then converted
manually.
Note that even after this patch there still remain a lot of cases
where we use 'test_cmp' to check empty files:
- Sometimes the expected output is not hard-coded in the test, but
'test_cmp' is used to ensure that two similar git commands produce
the same output, and that output happens to be empty, e.g. the
test 'submodule update --merge - ignores --merge for new
submodules' in 't7406-submodule-update.sh'.
- Repetitive common tasks, including preparing the expected results
and running 'test_cmp', are often extracted into a helper
function, and some of this helper's callsites expect no output.
- For the same reason as above, the whole 'test_expect_success'
block is within a helper function, e.g. in 't3070-wildmatch.sh'.
- Or 'test_cmp' is invoked in a loop, e.g. the test 'cvs update
(-p)' in 't9400-git-cvsserver-server.sh'.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-19 23:57:25 +02:00
|
|
|
test_must_be_empty err
|
2017-04-26 01:47:00 +02:00
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success POSIXPERM 'run_command passes over non-executable file' '
|
|
|
|
test_when_finished "rm -rf bin1 bin2" &&
|
|
|
|
mkdir -p bin1 bin2 &&
|
|
|
|
write_script bin1/greet <<-\EOF &&
|
|
|
|
cat bin1/greet
|
|
|
|
EOF
|
|
|
|
chmod -x bin1/greet &&
|
|
|
|
write_script bin2/greet <<-\EOF &&
|
|
|
|
cat bin2/greet
|
|
|
|
EOF
|
|
|
|
|
|
|
|
PATH=$PWD/bin1:$PWD/bin2:$PATH \
|
2018-03-24 08:44:55 +01:00
|
|
|
test-tool run-command run-command greet >actual 2>err &&
|
2017-04-26 01:47:00 +02:00
|
|
|
test_cmp bin2/greet actual &&
|
tests: use 'test_must_be_empty' instead of 'test_cmp <empty> <out>'
Using 'test_must_be_empty' is shorter and more idiomatic than
>empty &&
test_cmp empty out
as it saves the creation of an empty file. Furthermore, sometimes the
expected empty file doesn't have such a descriptive name like 'empty',
and its creation is far away from the place where it's finally used
for comparison (e.g. in 't7600-merge.sh', where two expected empty
files are created in the 'setup' test, but are used only about 500
lines later).
These cases were found by instrumenting 'test_cmp' to error out the
test script when it's used to compare empty files, and then converted
manually.
Note that even after this patch there still remain a lot of cases
where we use 'test_cmp' to check empty files:
- Sometimes the expected output is not hard-coded in the test, but
'test_cmp' is used to ensure that two similar git commands produce
the same output, and that output happens to be empty, e.g. the
test 'submodule update --merge - ignores --merge for new
submodules' in 't7406-submodule-update.sh'.
- Repetitive common tasks, including preparing the expected results
and running 'test_cmp', are often extracted into a helper
function, and some of this helper's callsites expect no output.
- For the same reason as above, the whole 'test_expect_success'
block is within a helper function, e.g. in 't3070-wildmatch.sh'.
- Or 'test_cmp' is invoked in a loop, e.g. the test 'cvs update
(-p)' in 't9400-git-cvsserver-server.sh'.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-19 23:57:25 +02:00
|
|
|
test_must_be_empty err
|
2017-04-26 01:47:00 +02:00
|
|
|
'
|
|
|
|
|
2011-04-20 12:35:08 +02:00
|
|
|
test_expect_success POSIXPERM 'run_command reports EACCES' '
|
|
|
|
cat hello-script >hello.sh &&
|
|
|
|
chmod -x hello.sh &&
|
2018-03-24 08:44:55 +01:00
|
|
|
test_must_fail test-tool run-command run-command ./hello.sh 2>err &&
|
2011-04-20 12:35:08 +02:00
|
|
|
|
|
|
|
grep "fatal: cannot exec.*hello.sh" err
|
|
|
|
'
|
|
|
|
|
tests: correct misuses of POSIXPERM
POSIXPERM requires that a later call to stat(2) (hence "ls -l")
faithfully reproduces what an earlier chmod(2) did. Some
filesystems cannot satisify this.
SANITY requires that a file or a directory is indeed accessible (or
inaccessible) when its permission bits would say it ought to be
accessible (or inaccessible). Running tests as root would lose this
prerequisite for obvious reasons.
Fix a few tests that misuse POSIXPERM.
t0061-run-command.sh has two uses of POSIXPERM.
- One checks that an attempt to execute a file that is marked as
unexecutable results in a failure with EACCES; I do not think
having root-ness or any other capability that busts the
filesystem permission mode bits will make you run an unexecutable
file, so this should be left as-is. The test does not have
anything to do with SANITY.
- The other one expects 'git nitfol' runs the alias when an
alias.nitfol is defined and a directory on the PATH is marked as
unreadable and unsearchable. I _think_ the test tries to reject
the alternative expectation that we want to refuse to run the
alias because it would break "no alias may mask a command" rule
if a file 'git-nitfol' exists in the unreadable directory but we
cannot even determine if that is the case. Under !SANITY that
busts the permission bits, this test no longer checks that, so it
must be protected with SANITY.
t1509-root-worktree.sh expects to be run on a / that is writable by
the user and sees if Git behaves "sensibly" when /.git is the
repository to govern a worktree that is the whole filesystem, and
also if Git behaves "sensibly" when / itself is a bare repository
with refs, objects, and friends (I find the definition of "behaves
sensibly" under these conditions hard to fathom, but it is a
different matter).
The implementation of the test is very much problematic.
- It requires POSIXPERM, but it does not do chmod or checks modes
in any way.
- It runs "rm /*" and "rm -fr /refs /objects ..." in one of the
tests, and also does "cd / && git init --bare". If done on a
live system that takes advantages of the "feature" being tested,
these obviously will clobber the system. But there is no guard
against such a breakage.
- It uses "test $UID = 0" to see rootness, which now should be
spelled "! test_have_prereq NOT_ROOT"
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-01-16 19:32:09 +01:00
|
|
|
test_expect_success POSIXPERM,SANITY 'unreadable directory in PATH' '
|
2012-03-30 09:52:18 +02:00
|
|
|
mkdir local-command &&
|
|
|
|
test_when_finished "chmod u+rwx local-command && rm -fr local-command" &&
|
|
|
|
git config alias.nitfol "!echo frotz" &&
|
|
|
|
chmod a-rx local-command &&
|
|
|
|
(
|
|
|
|
PATH=./local-command:$PATH &&
|
|
|
|
git nitfol >actual
|
|
|
|
) &&
|
|
|
|
echo frotz >expect &&
|
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 01:04:10 +01:00
|
|
|
cat >expect <<-EOF
|
|
|
|
preloaded output of a child
|
|
|
|
Hello
|
|
|
|
World
|
|
|
|
preloaded output of a child
|
|
|
|
Hello
|
|
|
|
World
|
|
|
|
preloaded output of a child
|
|
|
|
Hello
|
|
|
|
World
|
|
|
|
preloaded output of a child
|
|
|
|
Hello
|
|
|
|
World
|
|
|
|
EOF
|
|
|
|
|
|
|
|
test_expect_success 'run_command runs in parallel with more jobs available than tasks' '
|
2018-03-24 08:44:55 +01:00
|
|
|
test-tool run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 01:04:10 +01:00
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'run_command runs in parallel with as many jobs as tasks' '
|
2018-03-24 08:44:55 +01:00
|
|
|
test-tool run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 01:04:10 +01:00
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'run_command runs in parallel with more tasks than jobs available' '
|
2018-03-24 08:44:55 +01:00
|
|
|
test-tool run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 01:04:10 +01:00
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
|
|
|
cat >expect <<-EOF
|
|
|
|
preloaded output of a child
|
|
|
|
asking for a quick stop
|
|
|
|
preloaded output of a child
|
|
|
|
asking for a quick stop
|
|
|
|
preloaded output of a child
|
|
|
|
asking for a quick stop
|
|
|
|
EOF
|
|
|
|
|
|
|
|
test_expect_success 'run_command is asked to abort gracefully' '
|
2018-03-24 08:44:55 +01:00
|
|
|
test-tool run-command run-command-abort 3 false 2>actual &&
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 01:04:10 +01:00
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
|
|
|
cat >expect <<-EOF
|
|
|
|
no further jobs available
|
|
|
|
EOF
|
|
|
|
|
|
|
|
test_expect_success 'run_command outputs ' '
|
2018-03-24 08:44:55 +01:00
|
|
|
test-tool run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 01:04:10 +01:00
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
2018-01-18 10:45:11 +01:00
|
|
|
test_trace () {
|
|
|
|
expect="$1"
|
|
|
|
shift
|
2018-03-24 08:44:55 +01:00
|
|
|
GIT_TRACE=1 test-tool run-command "$@" run-command true 2>&1 >/dev/null | \
|
t0061: workaround issues with --with-dashes and RUNTIME_PREFIX
When building Git with RUNTIME_PREFIX and starting a test helper from
t/helper/, it fails to detect a system prefix. The reason is that the
RUNTIME_PREFIX feature wants to use the location of the Git executable
to determine where the support files can be found, e.g. system-wide Git
config or the translations. This does not make any sense for the test
helpers, though, as they are distinctly not in a directory structure
resembling the final installation location of Git.
That is the reason why the test helpers rely on environment variables to
indicate the location of the needed support files, e.g.
GIT_TEXTDOMAINDIR. If this information is missing, the output will
contain warnings like this one:
RUNTIME_PREFIX requested, but prefix computation failed. [...]
In t0061, we did not expect that to happen, and it actually does not
happen in the regular case, because bin-wrappers/test-tool specifically
sets GIT_TEXTDOMAINDIR (and as a consequence, nothing in test-tool needs
to know anything about any runtime prefix).
However, with --with-dashes, bin-wrappers/test-tool is no longer called,
but t/helper/test-tool is called directly instead.
So let's just ignore the RUNTIME_PREFIX warning.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-29 15:19:36 +01:00
|
|
|
sed -e 's/.* run_command: //' -e '/trace: .*/d' \
|
|
|
|
-e '/RUNTIME_PREFIX requested/d' >actual &&
|
2018-01-18 10:45:11 +01:00
|
|
|
echo "$expect true" >expect &&
|
|
|
|
test_cmp expect actual
|
|
|
|
}
|
|
|
|
|
|
|
|
test_expect_success 'GIT_TRACE with environment variables' '
|
|
|
|
test_trace "abc=1 def=2" env abc=1 env def=2 &&
|
|
|
|
test_trace "abc=2" env abc env abc=1 env abc=2 &&
|
|
|
|
test_trace "abc=2" env abc env abc=2 &&
|
|
|
|
(
|
|
|
|
abc=1 && export abc &&
|
|
|
|
test_trace "def=1" env abc=1 env def=1
|
|
|
|
) &&
|
|
|
|
(
|
|
|
|
abc=1 && export abc &&
|
|
|
|
test_trace "def=1" env abc env abc=1 env def=1
|
|
|
|
) &&
|
|
|
|
test_trace "def=1" env non-exist env def=1 &&
|
|
|
|
test_trace "abc=2" env abc=1 env abc env abc=2 &&
|
|
|
|
(
|
|
|
|
abc=1 def=2 && export abc def &&
|
|
|
|
test_trace "unset abc def;" env abc env def
|
|
|
|
) &&
|
|
|
|
(
|
|
|
|
abc=1 def=2 && export abc def &&
|
|
|
|
test_trace "unset def; abc=3" env abc env def env abc=3
|
|
|
|
) &&
|
|
|
|
(
|
|
|
|
abc=1 && export abc &&
|
|
|
|
test_trace "unset abc;" env abc=2 env abc
|
|
|
|
)
|
|
|
|
'
|
|
|
|
|
mingw: special-case arguments to `sh`
The MSYS2 runtime does its best to emulate the command-line wildcard
expansion and de-quoting which would be performed by the calling Unix
shell on Unix systems.
Those Unix shell quoting rules differ from the quoting rules applying to
Windows' cmd and Powershell, making it a little awkward to quote
command-line parameters properly when spawning other processes.
In particular, git.exe passes arguments to subprocesses that are *not*
intended to be interpreted as wildcards, and if they contain
backslashes, those are not to be interpreted as escape characters, e.g.
when passing Windows paths.
Note: this is only a problem when calling MSYS2 executables, not when
calling MINGW executables such as git.exe. However, we do call MSYS2
executables frequently, most notably when setting the use_shell flag in
the child_process structure.
There is no elegant way to determine whether the .exe file to be
executed is an MSYS2 program or a MINGW one. But since the use case of
passing a command line through the shell is so prevalent, we need to
work around this issue at least when executing sh.exe.
Let's introduce an ugly, hard-coded test whether argv[0] is "sh", and
whether it refers to the MSYS2 Bash, to determine whether we need to
quote the arguments differently than usual.
That still does not fix the issue completely, but at least it is
something.
Incidentally, this also fixes the problem where `git clone \\server\repo`
failed due to incorrect handling of the backslashes when handing the path
to the git-upload-pack process.
Further, we need to take care to quote not only whitespace and
backslashes, but also curly brackets. As aliases frequently go through
the MSYS2 Bash, and as aliases frequently get parameters such as
HEAD@{yesterday}, this is really important. As an early version of this
patch broke this, let's make sure that this does not regress by adding a
test case for that.
Helped-by: Kim Gybels <kgybels@infogroep.be>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-17 21:14:48 +01:00
|
|
|
test_expect_success MINGW 'verify curlies are quoted properly' '
|
|
|
|
: force the rev-parse through the MSYS2 Bash &&
|
|
|
|
git -c alias.r="!git rev-parse" r -- a{b}c >actual &&
|
|
|
|
cat >expect <<-\EOF &&
|
|
|
|
--
|
|
|
|
a{b}c
|
|
|
|
EOF
|
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
2019-07-16 16:03:12 +02:00
|
|
|
test_expect_success MINGW 'can spawn with argv[0] containing spaces' '
|
|
|
|
cp "$GIT_BUILD_DIR/t/helper/test-fake-ssh$X" ./ &&
|
|
|
|
test_must_fail "$PWD/test-fake-ssh$X" 2>err &&
|
|
|
|
grep TRASH_DIRECTORY err
|
|
|
|
'
|
|
|
|
|
2010-01-10 14:11:22 +01:00
|
|
|
test_done
|