git-commit-vandalism/run-command.h

205 lines
7.0 KiB
C
Raw Normal View History

#ifndef RUN_COMMAND_H
#define RUN_COMMAND_H
#ifndef NO_PTHREADS
#include <pthread.h>
#endif
#include "argv-array.h"
struct child_process {
const char **argv;
struct argv_array args;
struct argv_array env_array;
pid_t pid;
/*
* Using .in, .out, .err:
* - Specify 0 for no redirections (child inherits stdin, stdout,
* stderr from parent).
* - Specify -1 to have a pipe allocated as follows:
* .in: returns the writable pipe end; parent writes to it,
* the readable pipe end becomes child's stdin
* .out, .err: returns the readable pipe end; parent reads from
* it, the writable pipe end becomes child's stdout/stderr
* The caller of start_command() must close the returned FDs
* after it has completed reading from/writing to it!
* - Specify > 0 to set a channel to a particular FD as follows:
* .in: a readable FD, becomes child's stdin
* .out: a writable FD, becomes child's stdout/stderr
* .err: a writable FD, becomes child's stderr
* The specified FD is closed by start_command(), even in case
* of errors!
*/
int in;
int out;
int err;
const char *dir;
const char *const *env;
unsigned no_stdin:1;
unsigned no_stdout:1;
unsigned no_stderr:1;
unsigned git_cmd:1; /* if this is to be git sub-command */
unsigned silent_exec_failure:1;
unsigned stdout_to_stderr:1;
unsigned use_shell:1;
unsigned clean_on_exit:1;
};
#define CHILD_PROCESS_INIT { NULL, ARGV_ARRAY_INIT, ARGV_ARRAY_INIT }
void child_process_init(struct child_process *);
void child_process_clear(struct child_process *);
int start_command(struct child_process *);
int finish_command(struct child_process *);
pager: don't use unsafe functions in signal handlers Since the commit a3da8821208d (pager: do wait_for_pager on signal death), we call wait_for_pager() in the pager's signal handler. The recent bug report revealed that this causes a deadlock in glibc at aborting "git log" [*1*]. When this happens, git process is left unterminated, and it can't be killed by SIGTERM but only by SIGKILL. The problem is that wait_for_pager() function does more than waiting for pager process's termination, but it does cleanups and printing errors. Unfortunately, the functions that may be used in a signal handler are very limited [*2*]. Particularly, malloc(), free() and the variants can't be used in a signal handler because they take a mutex internally in glibc. This was the cause of the deadlock above. Other than the direct calls of malloc/free, many functions calling malloc/free can't be used. strerror() is such one, either. Also the usage of fflush() and printf() in a signal handler is bad, although it seems working so far. In a safer side, we should avoid them, too. This patch tries to reduce the calls of such functions in signal handlers. wait_for_signal() takes a flag and avoids the unsafe calls. Also, finish_command_in_signal() is introduced for the same reason. There the free() calls are removed, and only waits for the children without whining at errors. [*1*] https://bugzilla.opensuse.org/show_bug.cgi?id=942297 [*2*] http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_04_03 Signed-off-by: Takashi Iwai <tiwai@suse.de> Reviewed-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-04 11:35:57 +02:00
int finish_command_in_signal(struct child_process *);
int run_command(struct child_process *);
/*
* Returns the path to the hook file, or NULL if the hook is missing
* or disabled. Note that this points to static storage that will be
* overwritten by further calls to find_hook and run_hook_*.
*/
extern const char *find_hook(const char *name);
LAST_ARG_MUST_BE_NULL
extern int run_hook_le(const char *const *env, const char *name, ...);
extern int run_hook_ve(const char *const *env, const char *name, va_list args);
#define RUN_COMMAND_NO_STDIN 1
#define RUN_GIT_CMD 2 /*If this is to be git sub-command */
#define RUN_COMMAND_STDOUT_TO_STDERR 4
#define RUN_SILENT_EXEC_FAILURE 8
#define RUN_USING_SHELL 16
#define RUN_CLEAN_ON_EXIT 32
int run_command_v_opt(const char **argv, int opt);
/*
* env (the environment) is to be formatted like environ: "VAR=VALUE".
* To unset an environment variable use just "VAR".
*/
int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env);
/**
* Execute the given command, capturing its stdout in the given strbuf.
* Returns -1 if starting the command fails or reading fails, and otherwise
* returns the exit code of the command. The output collected in the
* buffer is kept even if the command returns a non-zero exit. The hint field
* gives a starting size for the strbuf allocation.
*
* The fields of "cmd" should be set up as they would for a normal run_command
* invocation. But note that there is no need to set cmd->out; the function
* sets it up for the caller.
*/
int capture_command(struct child_process *cmd, struct strbuf *buf, size_t hint);
/*
* The purpose of the following functions is to feed a pipe by running
* a function asynchronously and providing output that the caller reads.
*
* It is expected that no synchronization and mutual exclusion between
* the caller and the feed function is necessary so that the function
* can run in a thread without interfering with the caller.
*/
struct async {
/*
* proc reads from in; closes it before return
* proc writes to out; closes it before return
* returns 0 on success, non-zero on failure
*/
int (*proc)(int in, int out, void *data);
void *data;
int in; /* caller writes here and closes it */
int out; /* caller reads from here and closes it */
#ifdef NO_PTHREADS
pid_t pid;
#else
pthread_t tid;
int proc_in;
int proc_out;
#endif
run-command: teach async threads to ignore SIGPIPE Async processes can be implemented as separate forked processes, or as threads (depending on the NO_PTHREADS setting). In the latter case, if an async thread gets SIGPIPE, it takes down the whole process. This is obviously bad if the main process was not otherwise going to die, but even if we were going to die, it means the main process does not have a chance to report a useful error message. There's also the small matter that forked async processes will not take the main process down on a signal, meaning git will behave differently depending on the NO_PTHREADS setting. This patch fixes it by adding a new flag to "struct async" to block SIGPIPE just in the async thread. In theory, this should always be on (which makes async threads behave more like async processes), but we would first want to make sure that each async process we spawn is careful about checking return codes from write() and would not spew endlessly into a dead pipe. So let's start with it as optional, and we can enable it for specific sites in future patches. The natural name for this option would be "ignore_sigpipe", since that's what it does for the threaded case. But since that name might imply that we are ignoring it in all cases (including the separate-process one), let's call it "isolate_sigpipe". What we are really asking for is isolation. I.e., not to have our main process taken down by signals spawned by the async process. How that is implemented is up to the run-command code. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-04-20 00:49:41 +02:00
int isolate_sigpipe;
};
int start_async(struct async *async);
int finish_async(struct async *async);
int in_async(void);
write_or_die: handle EPIPE in async threads When write_or_die() sees EPIPE, it treats it specially by converting it into a SIGPIPE death. We obviously cannot ignore it, as the write has failed and the caller expects us to die. But likewise, we cannot just call die(), because printing any message at all would be a nuisance during normal operations. However, this is a problem if write_or_die() is called from a thread. Our raised signal ends up killing the whole process, when logically we just need to kill the thread (after all, if we are ignoring SIGPIPE, there is good reason to think that the main thread is expecting to handle it). Inside an async thread, the die() code already does the right thing, because we use our custom die_async() routine, which calls pthread_join(). So ideally we would piggy-back on that, and simply call: die_quietly_with_code(141); or similar. But refactoring the die code to do this is surprisingly non-trivial. The die_routines themselves handle both printing and the decision of the exit code. Every one of them would have to be modified to take new parameters for the code, and to tell us to be quiet. Instead, we can just teach write_or_die() to check for the async case and handle it specially. We do have to build an interface to abstract the async exit, but it's simple and self-contained. If we had many call-sites that wanted to do this die_quietly_with_code(), this approach wouldn't scale as well, but we don't. This is the only place where do this weird exit trick. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-02-24 08:40:16 +01:00
void NORETURN async_exit(int code);
run-command: add an asynchronous parallel child processor This allows to run external commands in parallel with ordered output on stderr. If we run external commands in parallel we cannot pipe the output directly to the our stdout/err as it would mix up. So each process's output will flow through a pipe, which we buffer. One subprocess can be directly piped to out stdout/err for a low latency feedback to the user. Example: Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a different amount of time as the different submodules vary in size, then the output of fetches in sequential order might look like this: time --> output: |---A---| |-B-| |-------C-------| |-D-| |-E-| When we schedule these submodules into maximal two parallel processes, a schedule and sample output over time may look like this: process 1: |---A---| |-D-| |-E-| process 2: |-B-| |-------C-------| output: |---A---|B|---C-------|DE So A will be perceived as it would run normally in the single child version. As B has finished by the time A is done, we can dump its whole progress buffer on stderr, such that it looks like it finished in no time. Once that is done, C is determined to be the visible child and its progress will be reported in real time. So this way of output is really good for human consumption, as it only changes the timing, not the actual output. For machine consumption the output needs to be prepared in the tasks, by either having a prefix per line or per block to indicate whose tasks output is displayed, because the output order may not follow the original sequential ordering: |----A----| |--B--| |-C-| will be scheduled to be all parallel: process 1: |----A----| process 2: |--B--| process 3: |-C-| output: |----A----|CB This happens because C finished before B did, so it will be queued for output before B. To detect when a child has finished executing, we check interleaved with other actions (such as checking the liveliness of children or starting new processes) whether the stderr pipe still exists. Once a child closed its stderr stream, we assume it is terminating very soon, and use `finish_command()` from the single external process execution interface to collect the exit status. By maintaining the strong assumption of stderr being open until the very end of a child process, we can avoid other hassle such as an implementation using `waitpid(-1)`, which is not implemented in Windows. Signed-off-by: Stefan Beller <sbeller@google.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 01:04:10 +01:00
/**
* This callback should initialize the child process and preload the
* error channel if desired. The preloading of is useful if you want to
* have a message printed directly before the output of the child process.
* pp_cb is the callback cookie as passed to run_processes_parallel.
* You can store a child process specific callback cookie in pp_task_cb.
*
* Even after returning 0 to indicate that there are no more processes,
* this function will be called again until there are no more running
* child processes.
*
* Return 1 if the next child is ready to run.
* Return 0 if there are currently no more tasks to be processed.
* To send a signal to other child processes for abortion,
* return the negative signal number.
*/
typedef int (*get_next_task_fn)(struct child_process *cp,
struct strbuf *err,
void *pp_cb,
void **pp_task_cb);
/**
* This callback is called whenever there are problems starting
* a new process.
*
* You must not write to stdout or stderr in this function. Add your
* message to the strbuf err instead, which will be printed without
* messing up the output of the other parallel processes.
*
* pp_cb is the callback cookie as passed into run_processes_parallel,
* pp_task_cb is the callback cookie as passed into get_next_task_fn.
*
* Return 0 to continue the parallel processing. To abort return non zero.
* To send a signal to other child processes for abortion, return
* the negative signal number.
*/
typedef int (*start_failure_fn)(struct strbuf *err,
run-command: add an asynchronous parallel child processor This allows to run external commands in parallel with ordered output on stderr. If we run external commands in parallel we cannot pipe the output directly to the our stdout/err as it would mix up. So each process's output will flow through a pipe, which we buffer. One subprocess can be directly piped to out stdout/err for a low latency feedback to the user. Example: Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a different amount of time as the different submodules vary in size, then the output of fetches in sequential order might look like this: time --> output: |---A---| |-B-| |-------C-------| |-D-| |-E-| When we schedule these submodules into maximal two parallel processes, a schedule and sample output over time may look like this: process 1: |---A---| |-D-| |-E-| process 2: |-B-| |-------C-------| output: |---A---|B|---C-------|DE So A will be perceived as it would run normally in the single child version. As B has finished by the time A is done, we can dump its whole progress buffer on stderr, such that it looks like it finished in no time. Once that is done, C is determined to be the visible child and its progress will be reported in real time. So this way of output is really good for human consumption, as it only changes the timing, not the actual output. For machine consumption the output needs to be prepared in the tasks, by either having a prefix per line or per block to indicate whose tasks output is displayed, because the output order may not follow the original sequential ordering: |----A----| |--B--| |-C-| will be scheduled to be all parallel: process 1: |----A----| process 2: |--B--| process 3: |-C-| output: |----A----|CB This happens because C finished before B did, so it will be queued for output before B. To detect when a child has finished executing, we check interleaved with other actions (such as checking the liveliness of children or starting new processes) whether the stderr pipe still exists. Once a child closed its stderr stream, we assume it is terminating very soon, and use `finish_command()` from the single external process execution interface to collect the exit status. By maintaining the strong assumption of stderr being open until the very end of a child process, we can avoid other hassle such as an implementation using `waitpid(-1)`, which is not implemented in Windows. Signed-off-by: Stefan Beller <sbeller@google.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 01:04:10 +01:00
void *pp_cb,
void *pp_task_cb);
/**
* This callback is called on every child process that finished processing.
*
* You must not write to stdout or stderr in this function. Add your
* message to the strbuf err instead, which will be printed without
* messing up the output of the other parallel processes.
*
* pp_cb is the callback cookie as passed into run_processes_parallel,
* pp_task_cb is the callback cookie as passed into get_next_task_fn.
*
* Return 0 to continue the parallel processing. To abort return non zero.
* To send a signal to other child processes for abortion, return
* the negative signal number.
*/
typedef int (*task_finished_fn)(int result,
struct strbuf *err,
void *pp_cb,
void *pp_task_cb);
/**
* Runs up to n processes at the same time. Whenever a process can be
* started, the callback get_next_task_fn is called to obtain the data
* required to start another child process.
*
* The children started via this function run in parallel. Their output
* (both stdout and stderr) is routed to stderr in a manner that output
* from different tasks does not interleave.
*
* start_failure_fn and task_finished_fn can be NULL to omit any
* special handling.
run-command: add an asynchronous parallel child processor This allows to run external commands in parallel with ordered output on stderr. If we run external commands in parallel we cannot pipe the output directly to the our stdout/err as it would mix up. So each process's output will flow through a pipe, which we buffer. One subprocess can be directly piped to out stdout/err for a low latency feedback to the user. Example: Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a different amount of time as the different submodules vary in size, then the output of fetches in sequential order might look like this: time --> output: |---A---| |-B-| |-------C-------| |-D-| |-E-| When we schedule these submodules into maximal two parallel processes, a schedule and sample output over time may look like this: process 1: |---A---| |-D-| |-E-| process 2: |-B-| |-------C-------| output: |---A---|B|---C-------|DE So A will be perceived as it would run normally in the single child version. As B has finished by the time A is done, we can dump its whole progress buffer on stderr, such that it looks like it finished in no time. Once that is done, C is determined to be the visible child and its progress will be reported in real time. So this way of output is really good for human consumption, as it only changes the timing, not the actual output. For machine consumption the output needs to be prepared in the tasks, by either having a prefix per line or per block to indicate whose tasks output is displayed, because the output order may not follow the original sequential ordering: |----A----| |--B--| |-C-| will be scheduled to be all parallel: process 1: |----A----| process 2: |--B--| process 3: |-C-| output: |----A----|CB This happens because C finished before B did, so it will be queued for output before B. To detect when a child has finished executing, we check interleaved with other actions (such as checking the liveliness of children or starting new processes) whether the stderr pipe still exists. Once a child closed its stderr stream, we assume it is terminating very soon, and use `finish_command()` from the single external process execution interface to collect the exit status. By maintaining the strong assumption of stderr being open until the very end of a child process, we can avoid other hassle such as an implementation using `waitpid(-1)`, which is not implemented in Windows. Signed-off-by: Stefan Beller <sbeller@google.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 01:04:10 +01:00
*/
int run_processes_parallel(int n,
get_next_task_fn,
start_failure_fn,
task_finished_fn,
void *pp_cb);
#endif