Merge branch 'sb/submodule-parallel-update'
A major part of "git submodule update" has been ported to C to take advantage of the recently added framework to run download tasks in parallel. * sb/submodule-parallel-update: clone: allow an explicit argument for parallel submodule clones submodule update: expose parallelism to the user submodule helper: remove double 'fatal: ' prefix git submodule update: have a dedicated helper for cloning run_processes_parallel: rename parameters for the callbacks run_processes_parallel: treat output of children as byte array submodule update: direct error message to stderr fetching submodules: respect `submodule.fetchJobs` config option submodule-config: drop check against NULL submodule-config: keep update strategy around
This commit is contained in:
commit
bdebbeb334
@ -2738,6 +2738,12 @@ submodule.<name>.ignore::
|
||||
"--ignore-submodules" option. The 'git submodule' commands are not
|
||||
affected by this setting.
|
||||
|
||||
submodule.fetchJobs::
|
||||
Specifies how many submodules are fetched/cloned at the same time.
|
||||
A positive integer allows up to that number of submodules fetched
|
||||
in parallel. A value of 0 will give some reasonable default.
|
||||
If unset, it defaults to 1.
|
||||
|
||||
tag.sort::
|
||||
This variable controls the sort ordering of tags when displayed by
|
||||
linkgit:git-tag[1]. Without the "--sort=<value>" option provided, the
|
||||
|
@ -14,7 +14,7 @@ SYNOPSIS
|
||||
[-o <name>] [-b <name>] [-u <upload-pack>] [--reference <repository>]
|
||||
[--dissociate] [--separate-git-dir <git dir>]
|
||||
[--depth <depth>] [--[no-]single-branch]
|
||||
[--recursive | --recurse-submodules] [--] <repository>
|
||||
[--recursive | --recurse-submodules] [--jobs <n>] [--] <repository>
|
||||
[<directory>]
|
||||
|
||||
DESCRIPTION
|
||||
@ -219,6 +219,10 @@ objects from the source repository into a pack in the cloned repository.
|
||||
The result is Git repository can be separated from working
|
||||
tree.
|
||||
|
||||
-j <n>::
|
||||
--jobs <n>::
|
||||
The number of submodules fetched at the same time.
|
||||
Defaults to the `submodule.fetchJobs` option.
|
||||
|
||||
<repository>::
|
||||
The (possibly remote) repository to clone from. See the
|
||||
|
@ -16,7 +16,7 @@ SYNOPSIS
|
||||
'git submodule' [--quiet] deinit [-f|--force] [--] <path>...
|
||||
'git submodule' [--quiet] update [--init] [--remote] [-N|--no-fetch]
|
||||
[-f|--force] [--rebase|--merge] [--reference <repository>]
|
||||
[--depth <depth>] [--recursive] [--] [<path>...]
|
||||
[--depth <depth>] [--recursive] [--jobs <n>] [--] [<path>...]
|
||||
'git submodule' [--quiet] summary [--cached|--files] [(-n|--summary-limit) <n>]
|
||||
[commit] [--] [<path>...]
|
||||
'git submodule' [--quiet] foreach [--recursive] <command>
|
||||
@ -377,6 +377,11 @@ for linkgit:git-clone[1]'s `--reference` and `--shared` options carefully.
|
||||
clone with a history truncated to the specified number of revisions.
|
||||
See linkgit:git-clone[1]
|
||||
|
||||
-j <n>::
|
||||
--jobs <n>::
|
||||
This option is only valid for the update command.
|
||||
Clone new submodules in parallel with as many jobs.
|
||||
Defaults to the `submodule.fetchJobs` option.
|
||||
|
||||
<path>...::
|
||||
Paths to submodule(s). When specified this will restrict the command
|
||||
|
@ -51,6 +51,7 @@ static enum transport_family family;
|
||||
static struct string_list option_config;
|
||||
static struct string_list option_reference;
|
||||
static int option_dissociate;
|
||||
static int max_jobs = -1;
|
||||
|
||||
static struct option builtin_clone_options[] = {
|
||||
OPT__VERBOSITY(&option_verbosity),
|
||||
@ -73,6 +74,8 @@ static struct option builtin_clone_options[] = {
|
||||
N_("initialize submodules in the clone")),
|
||||
OPT_BOOL(0, "recurse-submodules", &option_recursive,
|
||||
N_("initialize submodules in the clone")),
|
||||
OPT_INTEGER('j', "jobs", &max_jobs,
|
||||
N_("number of submodules cloned in parallel")),
|
||||
OPT_STRING(0, "template", &option_template, N_("template-directory"),
|
||||
N_("directory from which templates will be used")),
|
||||
OPT_STRING_LIST(0, "reference", &option_reference, N_("repo"),
|
||||
@ -100,10 +103,6 @@ static struct option builtin_clone_options[] = {
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
static const char *argv_submodule[] = {
|
||||
"submodule", "update", "--init", "--recursive", NULL
|
||||
};
|
||||
|
||||
static const char *get_repo_path_1(struct strbuf *path, int *is_bundle)
|
||||
{
|
||||
static char *suffix[] = { "/.git", "", ".git/.git", ".git" };
|
||||
@ -732,8 +731,16 @@ static int checkout(void)
|
||||
err |= run_hook_le(NULL, "post-checkout", sha1_to_hex(null_sha1),
|
||||
sha1_to_hex(sha1), "1", NULL);
|
||||
|
||||
if (!err && option_recursive)
|
||||
err = run_command_v_opt(argv_submodule, RUN_GIT_CMD);
|
||||
if (!err && option_recursive) {
|
||||
struct argv_array args = ARGV_ARRAY_INIT;
|
||||
argv_array_pushl(&args, "submodule", "update", "--init", "--recursive", NULL);
|
||||
|
||||
if (max_jobs != -1)
|
||||
argv_array_pushf(&args, "--jobs=%d", max_jobs);
|
||||
|
||||
err = run_command_v_opt(args.argv, RUN_GIT_CMD);
|
||||
argv_array_clear(&args);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ static int prune = -1; /* unspecified */
|
||||
static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity;
|
||||
static int progress = -1, recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
|
||||
static int tags = TAGS_DEFAULT, unshallow, update_shallow;
|
||||
static int max_children = 1;
|
||||
static int max_children = -1;
|
||||
static enum transport_family family;
|
||||
static const char *depth;
|
||||
static const char *upload_pack;
|
||||
|
@ -249,6 +249,257 @@ static int module_clone(int argc, const char **argv, const char *prefix)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct submodule_update_clone {
|
||||
/* index into 'list', the list of submodules to look into for cloning */
|
||||
int current;
|
||||
struct module_list list;
|
||||
unsigned warn_if_uninitialized : 1;
|
||||
|
||||
/* update parameter passed via commandline */
|
||||
struct submodule_update_strategy update;
|
||||
|
||||
/* configuration parameters which are passed on to the children */
|
||||
int quiet;
|
||||
const char *reference;
|
||||
const char *depth;
|
||||
const char *recursive_prefix;
|
||||
const char *prefix;
|
||||
|
||||
/* Machine-readable status lines to be consumed by git-submodule.sh */
|
||||
struct string_list projectlines;
|
||||
|
||||
/* If we want to stop as fast as possible and return an error */
|
||||
unsigned quickstop : 1;
|
||||
};
|
||||
#define SUBMODULE_UPDATE_CLONE_INIT {0, MODULE_LIST_INIT, 0, \
|
||||
SUBMODULE_UPDATE_STRATEGY_INIT, 0, NULL, NULL, NULL, NULL, \
|
||||
STRING_LIST_INIT_DUP, 0}
|
||||
|
||||
/**
|
||||
* Determine whether 'ce' needs to be cloned. If so, prepare the 'child' to
|
||||
* run the clone. Returns 1 if 'ce' needs to be cloned, 0 otherwise.
|
||||
*/
|
||||
static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
|
||||
struct child_process *child,
|
||||
struct submodule_update_clone *suc,
|
||||
struct strbuf *out)
|
||||
{
|
||||
const struct submodule *sub = NULL;
|
||||
struct strbuf displaypath_sb = STRBUF_INIT;
|
||||
struct strbuf sb = STRBUF_INIT;
|
||||
const char *displaypath = NULL;
|
||||
char *url = NULL;
|
||||
int needs_cloning = 0;
|
||||
|
||||
if (ce_stage(ce)) {
|
||||
if (suc->recursive_prefix)
|
||||
strbuf_addf(&sb, "%s/%s", suc->recursive_prefix, ce->name);
|
||||
else
|
||||
strbuf_addf(&sb, "%s", ce->name);
|
||||
strbuf_addf(out, _("Skipping unmerged submodule %s"), sb.buf);
|
||||
strbuf_addch(out, '\n');
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
sub = submodule_from_path(null_sha1, ce->name);
|
||||
|
||||
if (suc->recursive_prefix)
|
||||
displaypath = relative_path(suc->recursive_prefix,
|
||||
ce->name, &displaypath_sb);
|
||||
else
|
||||
displaypath = ce->name;
|
||||
|
||||
if (suc->update.type == SM_UPDATE_NONE
|
||||
|| (suc->update.type == SM_UPDATE_UNSPECIFIED
|
||||
&& sub->update_strategy.type == SM_UPDATE_NONE)) {
|
||||
strbuf_addf(out, _("Skipping submodule '%s'"), displaypath);
|
||||
strbuf_addch(out, '\n');
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Looking up the url in .git/config.
|
||||
* We must not fall back to .gitmodules as we only want
|
||||
* to process configured submodules.
|
||||
*/
|
||||
strbuf_reset(&sb);
|
||||
strbuf_addf(&sb, "submodule.%s.url", sub->name);
|
||||
git_config_get_string(sb.buf, &url);
|
||||
if (!url) {
|
||||
/*
|
||||
* Only mention uninitialized submodules when their
|
||||
* path have been specified
|
||||
*/
|
||||
if (suc->warn_if_uninitialized) {
|
||||
strbuf_addf(out,
|
||||
_("Submodule path '%s' not initialized"),
|
||||
displaypath);
|
||||
strbuf_addch(out, '\n');
|
||||
strbuf_addstr(out,
|
||||
_("Maybe you want to use 'update --init'?"));
|
||||
strbuf_addch(out, '\n');
|
||||
}
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
strbuf_reset(&sb);
|
||||
strbuf_addf(&sb, "%s/.git", ce->name);
|
||||
needs_cloning = !file_exists(sb.buf);
|
||||
|
||||
strbuf_reset(&sb);
|
||||
strbuf_addf(&sb, "%06o %s %d %d\t%s\n", ce->ce_mode,
|
||||
sha1_to_hex(ce->sha1), ce_stage(ce),
|
||||
needs_cloning, ce->name);
|
||||
string_list_append(&suc->projectlines, sb.buf);
|
||||
|
||||
if (!needs_cloning)
|
||||
goto cleanup;
|
||||
|
||||
child->git_cmd = 1;
|
||||
child->no_stdin = 1;
|
||||
child->stdout_to_stderr = 1;
|
||||
child->err = -1;
|
||||
argv_array_push(&child->args, "submodule--helper");
|
||||
argv_array_push(&child->args, "clone");
|
||||
if (suc->quiet)
|
||||
argv_array_push(&child->args, "--quiet");
|
||||
if (suc->prefix)
|
||||
argv_array_pushl(&child->args, "--prefix", suc->prefix, NULL);
|
||||
argv_array_pushl(&child->args, "--path", sub->path, NULL);
|
||||
argv_array_pushl(&child->args, "--name", sub->name, NULL);
|
||||
argv_array_pushl(&child->args, "--url", url, NULL);
|
||||
if (suc->reference)
|
||||
argv_array_push(&child->args, suc->reference);
|
||||
if (suc->depth)
|
||||
argv_array_push(&child->args, suc->depth);
|
||||
|
||||
cleanup:
|
||||
free(url);
|
||||
strbuf_reset(&displaypath_sb);
|
||||
strbuf_reset(&sb);
|
||||
|
||||
return needs_cloning;
|
||||
}
|
||||
|
||||
static int update_clone_get_next_task(struct child_process *child,
|
||||
struct strbuf *err,
|
||||
void *suc_cb,
|
||||
void **void_task_cb)
|
||||
{
|
||||
struct submodule_update_clone *suc = suc_cb;
|
||||
|
||||
for (; suc->current < suc->list.nr; suc->current++) {
|
||||
const struct cache_entry *ce = suc->list.entries[suc->current];
|
||||
if (prepare_to_clone_next_submodule(ce, child, suc, err)) {
|
||||
suc->current++;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int update_clone_start_failure(struct strbuf *err,
|
||||
void *suc_cb,
|
||||
void *void_task_cb)
|
||||
{
|
||||
struct submodule_update_clone *suc = suc_cb;
|
||||
suc->quickstop = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int update_clone_task_finished(int result,
|
||||
struct strbuf *err,
|
||||
void *suc_cb,
|
||||
void *void_task_cb)
|
||||
{
|
||||
struct submodule_update_clone *suc = suc_cb;
|
||||
|
||||
if (!result)
|
||||
return 0;
|
||||
|
||||
suc->quickstop = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int update_clone(int argc, const char **argv, const char *prefix)
|
||||
{
|
||||
const char *update = NULL;
|
||||
int max_jobs = -1;
|
||||
struct string_list_item *item;
|
||||
struct pathspec pathspec;
|
||||
struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
|
||||
|
||||
struct option module_update_clone_options[] = {
|
||||
OPT_STRING(0, "prefix", &prefix,
|
||||
N_("path"),
|
||||
N_("path into the working tree")),
|
||||
OPT_STRING(0, "recursive-prefix", &suc.recursive_prefix,
|
||||
N_("path"),
|
||||
N_("path into the working tree, across nested "
|
||||
"submodule boundaries")),
|
||||
OPT_STRING(0, "update", &update,
|
||||
N_("string"),
|
||||
N_("rebase, merge, checkout or none")),
|
||||
OPT_STRING(0, "reference", &suc.reference, N_("repo"),
|
||||
N_("reference repository")),
|
||||
OPT_STRING(0, "depth", &suc.depth, "<depth>",
|
||||
N_("Create a shallow clone truncated to the "
|
||||
"specified number of revisions")),
|
||||
OPT_INTEGER('j', "jobs", &max_jobs,
|
||||
N_("parallel jobs")),
|
||||
OPT__QUIET(&suc.quiet, N_("don't print cloning progress")),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
const char *const git_submodule_helper_usage[] = {
|
||||
N_("git submodule--helper update_clone [--prefix=<path>] [<path>...]"),
|
||||
NULL
|
||||
};
|
||||
suc.prefix = prefix;
|
||||
|
||||
argc = parse_options(argc, argv, prefix, module_update_clone_options,
|
||||
git_submodule_helper_usage, 0);
|
||||
|
||||
if (update)
|
||||
if (parse_submodule_update_strategy(update, &suc.update) < 0)
|
||||
die(_("bad value for update parameter"));
|
||||
|
||||
if (module_list_compute(argc, argv, prefix, &pathspec, &suc.list) < 0)
|
||||
return 1;
|
||||
|
||||
if (pathspec.nr)
|
||||
suc.warn_if_uninitialized = 1;
|
||||
|
||||
/* Overlay the parsed .gitmodules file with .git/config */
|
||||
gitmodules_config();
|
||||
git_config(submodule_config, NULL);
|
||||
|
||||
if (max_jobs < 0)
|
||||
max_jobs = parallel_submodules();
|
||||
|
||||
run_processes_parallel(max_jobs,
|
||||
update_clone_get_next_task,
|
||||
update_clone_start_failure,
|
||||
update_clone_task_finished,
|
||||
&suc);
|
||||
|
||||
/*
|
||||
* We saved the output and put it out all at once now.
|
||||
* That means:
|
||||
* - the listener does not have to interleave their (checkout)
|
||||
* work with our fetching. The writes involved in a
|
||||
* checkout involve more straightforward sequential I/O.
|
||||
* - the listener can avoid doing any work if fetching failed.
|
||||
*/
|
||||
if (suc.quickstop)
|
||||
return 1;
|
||||
|
||||
for_each_string_list_item(item, &suc.projectlines)
|
||||
utf8_fprintf(stdout, "%s", item->string);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct cmd_struct {
|
||||
const char *cmd;
|
||||
int (*fn)(int, const char **, const char *);
|
||||
@ -258,19 +509,20 @@ static struct cmd_struct commands[] = {
|
||||
{"list", module_list},
|
||||
{"name", module_name},
|
||||
{"clone", module_clone},
|
||||
{"update-clone", update_clone}
|
||||
};
|
||||
|
||||
int cmd_submodule__helper(int argc, const char **argv, const char *prefix)
|
||||
{
|
||||
int i;
|
||||
if (argc < 2)
|
||||
die(_("fatal: submodule--helper subcommand must be "
|
||||
die(_("submodule--helper subcommand must be "
|
||||
"called with a subcommand"));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(commands); i++)
|
||||
if (!strcmp(argv[1], commands[i].cmd))
|
||||
return commands[i].fn(argc - 1, argv + 1, prefix);
|
||||
|
||||
die(_("fatal: '%s' is not a valid submodule--helper "
|
||||
die(_("'%s' is not a valid submodule--helper "
|
||||
"subcommand"), argv[1]);
|
||||
}
|
||||
|
@ -663,6 +663,14 @@ cmd_update()
|
||||
--depth=*)
|
||||
depth=$1
|
||||
;;
|
||||
-j|--jobs)
|
||||
case "$2" in '') usage ;; esac
|
||||
jobs="--jobs=$2"
|
||||
shift
|
||||
;;
|
||||
--jobs=*)
|
||||
jobs=$1
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
@ -682,17 +690,21 @@ cmd_update()
|
||||
cmd_init "--" "$@" || return
|
||||
fi
|
||||
|
||||
cloned_modules=
|
||||
git submodule--helper list --prefix "$wt_prefix" "$@" | {
|
||||
{
|
||||
git submodule--helper update-clone ${GIT_QUIET:+--quiet} \
|
||||
${wt_prefix:+--prefix "$wt_prefix"} \
|
||||
${prefix:+--recursive-prefix "$prefix"} \
|
||||
${update:+--update "$update"} \
|
||||
${reference:+--reference "$reference"} \
|
||||
${depth:+--depth "$depth"} \
|
||||
${jobs:+$jobs} \
|
||||
"$@" || echo "#unmatched"
|
||||
} | {
|
||||
err=
|
||||
while read mode sha1 stage sm_path
|
||||
while read mode sha1 stage just_cloned sm_path
|
||||
do
|
||||
die_if_unmatched "$mode"
|
||||
if test "$stage" = U
|
||||
then
|
||||
echo >&2 "Skipping unmerged submodule $prefix$sm_path"
|
||||
continue
|
||||
fi
|
||||
|
||||
name=$(git submodule--helper name "$sm_path") || exit
|
||||
url=$(git config submodule."$name".url)
|
||||
branch=$(get_submodule_config "$name" branch master)
|
||||
@ -709,27 +721,10 @@ cmd_update()
|
||||
|
||||
displaypath=$(relative_path "$prefix$sm_path")
|
||||
|
||||
if test "$update_module" = "none"
|
||||
if test $just_cloned -eq 1
|
||||
then
|
||||
echo "Skipping submodule '$displaypath'"
|
||||
continue
|
||||
fi
|
||||
|
||||
if test -z "$url"
|
||||
then
|
||||
# Only mention uninitialized submodules when its
|
||||
# path have been specified
|
||||
test "$#" != "0" &&
|
||||
say "$(eval_gettext "Submodule path '\$displaypath' not initialized
|
||||
Maybe you want to use 'update --init'?")"
|
||||
continue
|
||||
fi
|
||||
|
||||
if ! test -d "$sm_path"/.git && ! test -f "$sm_path"/.git
|
||||
then
|
||||
git submodule--helper clone ${GIT_QUIET:+--quiet} --prefix "$prefix" --path "$sm_path" --name "$name" --url "$url" "$reference" "$depth" || exit
|
||||
cloned_modules="$cloned_modules;$name"
|
||||
subsha1=
|
||||
update_module=checkout
|
||||
else
|
||||
subsha1=$(clear_local_git_env; cd "$sm_path" &&
|
||||
git rev-parse --verify HEAD) ||
|
||||
@ -774,13 +769,6 @@ Maybe you want to use 'update --init'?")"
|
||||
die "$(eval_gettext "Fetched in submodule path '\$displaypath', but it did not contain $sha1. Direct fetching of that commit failed.")"
|
||||
fi
|
||||
|
||||
# Is this something we just cloned?
|
||||
case ";$cloned_modules;" in
|
||||
*";$name;"*)
|
||||
# then there is no local change to integrate
|
||||
update_module=checkout ;;
|
||||
esac
|
||||
|
||||
must_die_on_failure=
|
||||
case "$update_module" in
|
||||
checkout)
|
||||
|
@ -902,7 +902,7 @@ struct parallel_processes {
|
||||
struct strbuf buffered_output; /* of finished children */
|
||||
};
|
||||
|
||||
static int default_start_failure(struct strbuf *err,
|
||||
static int default_start_failure(struct strbuf *out,
|
||||
void *pp_cb,
|
||||
void *pp_task_cb)
|
||||
{
|
||||
@ -910,7 +910,7 @@ static int default_start_failure(struct strbuf *err,
|
||||
}
|
||||
|
||||
static int default_task_finished(int result,
|
||||
struct strbuf *err,
|
||||
struct strbuf *out,
|
||||
void *pp_cb,
|
||||
void *pp_task_cb)
|
||||
{
|
||||
@ -994,7 +994,7 @@ static void pp_cleanup(struct parallel_processes *pp)
|
||||
* When get_next_task added messages to the buffer in its last
|
||||
* iteration, the buffered output is non empty.
|
||||
*/
|
||||
fputs(pp->buffered_output.buf, stderr);
|
||||
strbuf_write(&pp->buffered_output, stderr);
|
||||
strbuf_release(&pp->buffered_output);
|
||||
|
||||
sigchain_pop_common();
|
||||
@ -1079,7 +1079,7 @@ static void pp_output(struct parallel_processes *pp)
|
||||
int i = pp->output_owner;
|
||||
if (pp->children[i].state == GIT_CP_WORKING &&
|
||||
pp->children[i].err.len) {
|
||||
fputs(pp->children[i].err.buf, stderr);
|
||||
strbuf_write(&pp->children[i].err, stderr);
|
||||
strbuf_reset(&pp->children[i].err);
|
||||
}
|
||||
}
|
||||
@ -1117,11 +1117,11 @@ static int pp_collect_finished(struct parallel_processes *pp)
|
||||
strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
|
||||
strbuf_reset(&pp->children[i].err);
|
||||
} else {
|
||||
fputs(pp->children[i].err.buf, stderr);
|
||||
strbuf_write(&pp->children[i].err, stderr);
|
||||
strbuf_reset(&pp->children[i].err);
|
||||
|
||||
/* Output all other finished child processes */
|
||||
fputs(pp->buffered_output.buf, stderr);
|
||||
strbuf_write(&pp->buffered_output, stderr);
|
||||
strbuf_reset(&pp->buffered_output);
|
||||
|
||||
/*
|
||||
|
@ -140,7 +140,7 @@ void NORETURN async_exit(int code);
|
||||
* return the negative signal number.
|
||||
*/
|
||||
typedef int (*get_next_task_fn)(struct child_process *cp,
|
||||
struct strbuf *err,
|
||||
struct strbuf *out,
|
||||
void *pp_cb,
|
||||
void **pp_task_cb);
|
||||
|
||||
@ -149,7 +149,7 @@ typedef int (*get_next_task_fn)(struct child_process *cp,
|
||||
* a new process.
|
||||
*
|
||||
* You must not write to stdout or stderr in this function. Add your
|
||||
* message to the strbuf err instead, which will be printed without
|
||||
* message to the strbuf out instead, which will be printed without
|
||||
* messing up the output of the other parallel processes.
|
||||
*
|
||||
* pp_cb is the callback cookie as passed into run_processes_parallel,
|
||||
@ -159,7 +159,7 @@ typedef int (*get_next_task_fn)(struct child_process *cp,
|
||||
* To send a signal to other child processes for abortion, return
|
||||
* the negative signal number.
|
||||
*/
|
||||
typedef int (*start_failure_fn)(struct strbuf *err,
|
||||
typedef int (*start_failure_fn)(struct strbuf *out,
|
||||
void *pp_cb,
|
||||
void *pp_task_cb);
|
||||
|
||||
@ -167,7 +167,7 @@ typedef int (*start_failure_fn)(struct strbuf *err,
|
||||
* This callback is called on every child process that finished processing.
|
||||
*
|
||||
* You must not write to stdout or stderr in this function. Add your
|
||||
* message to the strbuf err instead, which will be printed without
|
||||
* message to the strbuf out instead, which will be printed without
|
||||
* messing up the output of the other parallel processes.
|
||||
*
|
||||
* pp_cb is the callback cookie as passed into run_processes_parallel,
|
||||
@ -178,7 +178,7 @@ typedef int (*start_failure_fn)(struct strbuf *err,
|
||||
* the negative signal number.
|
||||
*/
|
||||
typedef int (*task_finished_fn)(int result,
|
||||
struct strbuf *err,
|
||||
struct strbuf *out,
|
||||
void *pp_cb,
|
||||
void *pp_task_cb);
|
||||
|
||||
|
6
strbuf.c
6
strbuf.c
@ -395,6 +395,12 @@ ssize_t strbuf_read_once(struct strbuf *sb, int fd, size_t hint)
|
||||
return cnt;
|
||||
}
|
||||
|
||||
ssize_t strbuf_write(struct strbuf *sb, FILE *f)
|
||||
{
|
||||
return sb->len ? fwrite(sb->buf, 1, sb->len, f) : 0;
|
||||
}
|
||||
|
||||
|
||||
#define STRBUF_MAXLINK (2*PATH_MAX)
|
||||
|
||||
int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint)
|
||||
|
6
strbuf.h
6
strbuf.h
@ -386,6 +386,12 @@ extern ssize_t strbuf_read_file(struct strbuf *sb, const char *path, size_t hint
|
||||
*/
|
||||
extern int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint);
|
||||
|
||||
/**
|
||||
* Write the whole content of the strbuf to the stream not stopping at
|
||||
* NUL bytes.
|
||||
*/
|
||||
extern ssize_t strbuf_write(struct strbuf *sb, FILE *stream);
|
||||
|
||||
/**
|
||||
* Read a line from a FILE *, overwriting the existing contents of
|
||||
* the strbuf. The strbuf_getline*() family of functions share
|
||||
|
@ -59,6 +59,7 @@ static void free_one_config(struct submodule_entry *entry)
|
||||
{
|
||||
free((void *) entry->config->path);
|
||||
free((void *) entry->config->name);
|
||||
free((void *) entry->config->update_strategy.command);
|
||||
free(entry->config);
|
||||
}
|
||||
|
||||
@ -194,6 +195,8 @@ static struct submodule *lookup_or_create_by_name(struct submodule_cache *cache,
|
||||
|
||||
submodule->path = NULL;
|
||||
submodule->url = NULL;
|
||||
submodule->update_strategy.type = SM_UPDATE_UNSPECIFIED;
|
||||
submodule->update_strategy.command = NULL;
|
||||
submodule->fetch_recurse = RECURSE_SUBMODULES_NONE;
|
||||
submodule->ignore = NULL;
|
||||
|
||||
@ -293,7 +296,7 @@ static int parse_config(const char *var, const char *value, void *data)
|
||||
if (!strcmp(item.buf, "path")) {
|
||||
if (!value)
|
||||
ret = config_error_nonbool(var);
|
||||
else if (!me->overwrite && submodule->path != NULL)
|
||||
else if (!me->overwrite && submodule->path)
|
||||
warn_multiple_config(me->commit_sha1, submodule->name,
|
||||
"path");
|
||||
else {
|
||||
@ -317,7 +320,7 @@ static int parse_config(const char *var, const char *value, void *data)
|
||||
} else if (!strcmp(item.buf, "ignore")) {
|
||||
if (!value)
|
||||
ret = config_error_nonbool(var);
|
||||
else if (!me->overwrite && submodule->ignore != NULL)
|
||||
else if (!me->overwrite && submodule->ignore)
|
||||
warn_multiple_config(me->commit_sha1, submodule->name,
|
||||
"ignore");
|
||||
else if (strcmp(value, "untracked") &&
|
||||
@ -333,13 +336,23 @@ static int parse_config(const char *var, const char *value, void *data)
|
||||
} else if (!strcmp(item.buf, "url")) {
|
||||
if (!value) {
|
||||
ret = config_error_nonbool(var);
|
||||
} else if (!me->overwrite && submodule->url != NULL) {
|
||||
} else if (!me->overwrite && submodule->url) {
|
||||
warn_multiple_config(me->commit_sha1, submodule->name,
|
||||
"url");
|
||||
} else {
|
||||
free((void *) submodule->url);
|
||||
submodule->url = xstrdup(value);
|
||||
}
|
||||
} else if (!strcmp(item.buf, "update")) {
|
||||
if (!value)
|
||||
ret = config_error_nonbool(var);
|
||||
else if (!me->overwrite &&
|
||||
submodule->update_strategy.type != SM_UPDATE_UNSPECIFIED)
|
||||
warn_multiple_config(me->commit_sha1, submodule->name,
|
||||
"update");
|
||||
else if (parse_submodule_update_strategy(value,
|
||||
&submodule->update_strategy) < 0)
|
||||
die(_("invalid value for %s"), var);
|
||||
}
|
||||
|
||||
strbuf_release(&name);
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define SUBMODULE_CONFIG_CACHE_H
|
||||
|
||||
#include "hashmap.h"
|
||||
#include "submodule.h"
|
||||
#include "strbuf.h"
|
||||
|
||||
/*
|
||||
@ -14,6 +15,7 @@ struct submodule {
|
||||
const char *url;
|
||||
int fetch_recurse;
|
||||
const char *ignore;
|
||||
struct submodule_update_strategy update_strategy;
|
||||
/* the sha1 blob id of the responsible .gitmodules file */
|
||||
unsigned char gitmodules_sha1[20];
|
||||
};
|
||||
|
37
submodule.c
37
submodule.c
@ -15,6 +15,7 @@
|
||||
#include "thread-utils.h"
|
||||
|
||||
static int config_fetch_recurse_submodules = RECURSE_SUBMODULES_ON_DEMAND;
|
||||
static int parallel_jobs = 1;
|
||||
static struct string_list changed_submodule_paths;
|
||||
static int initialized_fetch_ref_tips;
|
||||
static struct sha1_array ref_tips_before_fetch;
|
||||
@ -169,7 +170,12 @@ void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt,
|
||||
|
||||
int submodule_config(const char *var, const char *value, void *cb)
|
||||
{
|
||||
if (starts_with(var, "submodule."))
|
||||
if (!strcmp(var, "submodule.fetchjobs")) {
|
||||
parallel_jobs = git_config_int(var, value);
|
||||
if (parallel_jobs < 0)
|
||||
die(_("negative values not allowed for submodule.fetchJobs"));
|
||||
return 0;
|
||||
} else if (starts_with(var, "submodule."))
|
||||
return parse_submodule_config_option(var, value);
|
||||
else if (!strcmp(var, "fetch.recursesubmodules")) {
|
||||
config_fetch_recurse_submodules = parse_fetch_recurse_submodules_arg(var, value);
|
||||
@ -210,6 +216,27 @@ void gitmodules_config(void)
|
||||
}
|
||||
}
|
||||
|
||||
int parse_submodule_update_strategy(const char *value,
|
||||
struct submodule_update_strategy *dst)
|
||||
{
|
||||
free((void*)dst->command);
|
||||
dst->command = NULL;
|
||||
if (!strcmp(value, "none"))
|
||||
dst->type = SM_UPDATE_NONE;
|
||||
else if (!strcmp(value, "checkout"))
|
||||
dst->type = SM_UPDATE_CHECKOUT;
|
||||
else if (!strcmp(value, "rebase"))
|
||||
dst->type = SM_UPDATE_REBASE;
|
||||
else if (!strcmp(value, "merge"))
|
||||
dst->type = SM_UPDATE_MERGE;
|
||||
else if (skip_prefix(value, "!", &value)) {
|
||||
dst->type = SM_UPDATE_COMMAND;
|
||||
dst->command = xstrdup(value);
|
||||
} else
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void handle_ignore_submodules_arg(struct diff_options *diffopt,
|
||||
const char *arg)
|
||||
{
|
||||
@ -750,6 +777,9 @@ int fetch_populated_submodules(const struct argv_array *options,
|
||||
argv_array_push(&spf.args, "--recurse-submodules-default");
|
||||
/* default value, "--submodule-prefix" and its value are added later */
|
||||
|
||||
if (max_parallel_jobs < 0)
|
||||
max_parallel_jobs = parallel_jobs;
|
||||
|
||||
calculate_changed_submodule_paths();
|
||||
run_processes_parallel(max_parallel_jobs,
|
||||
get_next_submodule,
|
||||
@ -1094,3 +1124,8 @@ void connect_work_tree_and_git_dir(const char *work_tree, const char *git_dir)
|
||||
strbuf_release(&rel_path);
|
||||
free((void *)real_work_tree);
|
||||
}
|
||||
|
||||
int parallel_submodules(void)
|
||||
{
|
||||
return parallel_jobs;
|
||||
}
|
||||
|
18
submodule.h
18
submodule.h
@ -14,6 +14,21 @@ enum {
|
||||
RECURSE_SUBMODULES_ON = 2
|
||||
};
|
||||
|
||||
enum submodule_update_type {
|
||||
SM_UPDATE_UNSPECIFIED = 0,
|
||||
SM_UPDATE_CHECKOUT,
|
||||
SM_UPDATE_REBASE,
|
||||
SM_UPDATE_MERGE,
|
||||
SM_UPDATE_NONE,
|
||||
SM_UPDATE_COMMAND
|
||||
};
|
||||
|
||||
struct submodule_update_strategy {
|
||||
enum submodule_update_type type;
|
||||
const char *command;
|
||||
};
|
||||
#define SUBMODULE_UPDATE_STRATEGY_INIT {SM_UPDATE_UNSPECIFIED, NULL}
|
||||
|
||||
int is_staging_gitmodules_ok(void);
|
||||
int update_path_in_gitmodules(const char *oldpath, const char *newpath);
|
||||
int remove_path_from_gitmodules(const char *path);
|
||||
@ -22,6 +37,8 @@ void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt,
|
||||
const char *path);
|
||||
int submodule_config(const char *var, const char *value, void *cb);
|
||||
void gitmodules_config(void);
|
||||
int parse_submodule_update_strategy(const char *value,
|
||||
struct submodule_update_strategy *dst);
|
||||
void handle_ignore_submodules_arg(struct diff_options *diffopt, const char *);
|
||||
void show_submodule_summary(FILE *f, const char *path,
|
||||
const char *line_prefix,
|
||||
@ -42,5 +59,6 @@ int find_unpushed_submodules(unsigned char new_sha1[20], const char *remotes_nam
|
||||
struct string_list *needs_pushing);
|
||||
int push_unpushed_submodules(unsigned char new_sha1[20], const char *remotes_name);
|
||||
void connect_work_tree_and_git_dir(const char *work_tree, const char *git_dir);
|
||||
int parallel_submodules(void);
|
||||
|
||||
#endif
|
||||
|
@ -471,4 +471,18 @@ test_expect_success "don't fetch submodule when newly recorded commits are alrea
|
||||
test_i18ncmp expect.err actual.err
|
||||
'
|
||||
|
||||
test_expect_success 'fetching submodules respects parallel settings' '
|
||||
git config fetch.recurseSubmodules true &&
|
||||
(
|
||||
cd downstream &&
|
||||
GIT_TRACE=$(pwd)/trace.out git fetch --jobs 7 &&
|
||||
grep "7 tasks" trace.out &&
|
||||
git config submodule.fetchJobs 8 &&
|
||||
GIT_TRACE=$(pwd)/trace.out git fetch &&
|
||||
grep "8 tasks" trace.out &&
|
||||
GIT_TRACE=$(pwd)/trace.out git fetch --jobs 9 &&
|
||||
grep "9 tasks" trace.out
|
||||
)
|
||||
'
|
||||
|
||||
test_done
|
||||
|
@ -462,7 +462,7 @@ test_expect_success 'update --init' '
|
||||
git config --remove-section submodule.example &&
|
||||
test_must_fail git config submodule.example.url &&
|
||||
|
||||
git submodule update init > update.out &&
|
||||
git submodule update init 2> update.out &&
|
||||
cat update.out &&
|
||||
test_i18ngrep "not initialized" update.out &&
|
||||
test_must_fail git rev-parse --resolve-git-dir init/.git &&
|
||||
@ -480,7 +480,7 @@ test_expect_success 'update --init from subdirectory' '
|
||||
mkdir -p sub &&
|
||||
(
|
||||
cd sub &&
|
||||
git submodule update ../init >update.out &&
|
||||
git submodule update ../init 2>update.out &&
|
||||
cat update.out &&
|
||||
test_i18ngrep "not initialized" update.out &&
|
||||
test_must_fail git rev-parse --resolve-git-dir ../init/.git &&
|
||||
|
@ -774,4 +774,31 @@ test_expect_success 'submodule update --recursive drops module name before recur
|
||||
test_i18ngrep "Submodule path .deeper/submodule/subsubmodule.: checked out" actual
|
||||
)
|
||||
'
|
||||
|
||||
test_expect_success 'submodule update can be run in parallel' '
|
||||
(cd super2 &&
|
||||
GIT_TRACE=$(pwd)/trace.out git submodule update --jobs 7 &&
|
||||
grep "7 tasks" trace.out &&
|
||||
git config submodule.fetchJobs 8 &&
|
||||
GIT_TRACE=$(pwd)/trace.out git submodule update &&
|
||||
grep "8 tasks" trace.out &&
|
||||
GIT_TRACE=$(pwd)/trace.out git submodule update --jobs 9 &&
|
||||
grep "9 tasks" trace.out
|
||||
)
|
||||
'
|
||||
|
||||
test_expect_success 'git clone passes the parallel jobs config on to submodules' '
|
||||
test_when_finished "rm -rf super4" &&
|
||||
GIT_TRACE=$(pwd)/trace.out git clone --recurse-submodules --jobs 7 . super4 &&
|
||||
grep "7 tasks" trace.out &&
|
||||
rm -rf super4 &&
|
||||
git config --global submodule.fetchJobs 8 &&
|
||||
GIT_TRACE=$(pwd)/trace.out git clone --recurse-submodules . super4 &&
|
||||
grep "8 tasks" trace.out &&
|
||||
rm -rf super4 &&
|
||||
GIT_TRACE=$(pwd)/trace.out git clone --recurse-submodules --jobs 9 . super4 &&
|
||||
grep "9 tasks" trace.out &&
|
||||
rm -rf super4
|
||||
'
|
||||
|
||||
test_done
|
||||
|
Loading…
Reference in New Issue
Block a user