2007-09-11 05:03:04 +02:00
|
|
|
#include "cache.h"
|
2017-06-14 20:07:36 +02:00
|
|
|
#include "config.h"
|
2007-09-11 05:03:04 +02:00
|
|
|
#include "transport.h"
|
|
|
|
#include "run-command.h"
|
2007-09-11 05:03:11 +02:00
|
|
|
#include "pkt-line.h"
|
|
|
|
#include "fetch-pack.h"
|
2013-07-08 22:56:53 +02:00
|
|
|
#include "remote.h"
|
|
|
|
#include "connect.h"
|
2007-10-30 03:03:42 +01:00
|
|
|
#include "send-pack.h"
|
2007-09-11 05:03:11 +02:00
|
|
|
#include "walker.h"
|
2007-09-11 05:03:21 +02:00
|
|
|
#include "bundle.h"
|
2007-10-01 01:59:39 +02:00
|
|
|
#include "dir.h"
|
|
|
|
#include "refs.h"
|
2010-01-16 22:45:31 +01:00
|
|
|
#include "branch.h"
|
2010-05-23 11:17:55 +02:00
|
|
|
#include "url.h"
|
2011-08-20 00:08:47 +02:00
|
|
|
#include "submodule.h"
|
2012-03-29 09:21:23 +02:00
|
|
|
#include "string-list.h"
|
2013-12-05 14:02:29 +01:00
|
|
|
#include "sha1-array.h"
|
2015-11-16 09:05:58 +01:00
|
|
|
#include "sigchain.h"
|
2017-12-14 22:44:45 +01:00
|
|
|
#include "transport-internal.h"
|
2018-03-23 18:20:56 +01:00
|
|
|
#include "object-store.h"
|
2007-10-01 01:59:39 +02:00
|
|
|
|
2010-01-16 22:45:31 +01:00
|
|
|
static void set_upstreams(struct transport *transport, struct ref *refs,
|
|
|
|
int pretend)
|
|
|
|
{
|
|
|
|
struct ref *ref;
|
|
|
|
for (ref = refs; ref; ref = ref->next) {
|
|
|
|
const char *localname;
|
|
|
|
const char *tmp;
|
|
|
|
const char *remotename;
|
|
|
|
int flag = 0;
|
|
|
|
/*
|
|
|
|
* Check suitability for tracking. Must be successful /
|
|
|
|
* already up-to-date ref create/modify (not delete).
|
|
|
|
*/
|
|
|
|
if (ref->status != REF_STATUS_OK &&
|
|
|
|
ref->status != REF_STATUS_UPTODATE)
|
|
|
|
continue;
|
|
|
|
if (!ref->peer_ref)
|
|
|
|
continue;
|
2015-11-10 03:22:20 +01:00
|
|
|
if (is_null_oid(&ref->new_oid))
|
2010-01-16 22:45:31 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Follow symbolic refs (mainly for HEAD). */
|
|
|
|
localname = ref->peer_ref->name;
|
|
|
|
remotename = ref->name;
|
2014-07-15 21:59:36 +02:00
|
|
|
tmp = resolve_ref_unsafe(localname, RESOLVE_REF_READING,
|
2017-09-23 11:45:04 +02:00
|
|
|
NULL, &flag);
|
2010-01-16 22:45:31 +01:00
|
|
|
if (tmp && flag & REF_ISSYMREF &&
|
2013-11-30 21:55:40 +01:00
|
|
|
starts_with(tmp, "refs/heads/"))
|
2010-01-16 22:45:31 +01:00
|
|
|
localname = tmp;
|
|
|
|
|
|
|
|
/* Both source and destination must be local branches. */
|
2013-11-30 21:55:40 +01:00
|
|
|
if (!localname || !starts_with(localname, "refs/heads/"))
|
2010-01-16 22:45:31 +01:00
|
|
|
continue;
|
2013-11-30 21:55:40 +01:00
|
|
|
if (!remotename || !starts_with(remotename, "refs/heads/"))
|
2010-01-16 22:45:31 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!pretend)
|
|
|
|
install_branch_config(BRANCH_CONFIG_VERBOSE,
|
|
|
|
localname + 11, transport->remote->name,
|
|
|
|
remotename);
|
|
|
|
else
|
2016-06-17 22:20:53 +02:00
|
|
|
printf(_("Would set upstream of '%s' to '%s' of '%s'\n"),
|
2010-01-16 22:45:31 +01:00
|
|
|
localname + 11, remotename + 11,
|
|
|
|
transport->remote->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-09-11 05:03:21 +02:00
|
|
|
struct bundle_transport_data {
|
|
|
|
int fd;
|
|
|
|
struct bundle_header header;
|
|
|
|
};
|
|
|
|
|
2009-03-09 02:06:07 +01:00
|
|
|
static struct ref *get_refs_from_bundle(struct transport *transport, int for_push)
|
2007-09-11 05:03:21 +02:00
|
|
|
{
|
|
|
|
struct bundle_transport_data *data = transport->data;
|
|
|
|
struct ref *result = NULL;
|
|
|
|
int i;
|
|
|
|
|
2009-03-09 02:06:07 +01:00
|
|
|
if (for_push)
|
|
|
|
return NULL;
|
|
|
|
|
2007-09-11 05:03:21 +02:00
|
|
|
if (data->fd > 0)
|
|
|
|
close(data->fd);
|
|
|
|
data->fd = read_bundle_header(transport->url, &data->header);
|
|
|
|
if (data->fd < 0)
|
|
|
|
die ("Could not read bundle '%s'.", transport->url);
|
|
|
|
for (i = 0; i < data->header.references.nr; i++) {
|
|
|
|
struct ref_list_entry *e = data->header.references.list + i;
|
2008-10-18 10:44:18 +02:00
|
|
|
struct ref *ref = alloc_ref(e->name);
|
2017-05-01 04:28:59 +02:00
|
|
|
oidcpy(&ref->old_oid, &e->oid);
|
2007-09-11 05:03:21 +02:00
|
|
|
ref->next = result;
|
|
|
|
result = ref;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-09-14 09:31:23 +02:00
|
|
|
static int fetch_refs_from_bundle(struct transport *transport,
|
2009-11-18 02:42:24 +01:00
|
|
|
int nr_heads, struct ref **to_fetch)
|
2007-09-11 05:03:21 +02:00
|
|
|
{
|
|
|
|
struct bundle_transport_data *data = transport->data;
|
2011-09-19 01:52:32 +02:00
|
|
|
return unbundle(&data->header, data->fd,
|
|
|
|
transport->progress ? BUNDLE_VERBOSE : 0);
|
2007-09-11 05:03:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int close_bundle(struct transport *transport)
|
|
|
|
{
|
|
|
|
struct bundle_transport_data *data = transport->data;
|
|
|
|
if (data->fd > 0)
|
|
|
|
close(data->fd);
|
2007-09-19 06:49:42 +02:00
|
|
|
free(data);
|
2007-09-11 05:03:21 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-09-11 05:03:04 +02:00
|
|
|
struct git_transport_data {
|
2009-12-09 16:26:30 +01:00
|
|
|
struct git_transport_options options;
|
2008-02-04 19:26:23 +01:00
|
|
|
struct child_process *conn;
|
|
|
|
int fd[2];
|
2009-12-09 16:26:31 +01:00
|
|
|
unsigned got_remote_heads : 1;
|
2017-03-31 03:40:00 +02:00
|
|
|
struct oid_array extra_have;
|
|
|
|
struct oid_array shallow;
|
2007-09-11 05:03:04 +02:00
|
|
|
};
|
|
|
|
|
2009-12-09 16:26:30 +01:00
|
|
|
static int set_git_option(struct git_transport_options *opts,
|
2007-09-11 05:03:04 +02:00
|
|
|
const char *name, const char *value)
|
|
|
|
{
|
2007-09-11 05:03:11 +02:00
|
|
|
if (!strcmp(name, TRANS_OPT_UPLOADPACK)) {
|
2009-12-09 16:26:30 +01:00
|
|
|
opts->uploadpack = value;
|
2007-09-11 05:03:11 +02:00
|
|
|
return 0;
|
|
|
|
} else if (!strcmp(name, TRANS_OPT_RECEIVEPACK)) {
|
2009-12-09 16:26:30 +01:00
|
|
|
opts->receivepack = value;
|
2007-09-11 05:03:04 +02:00
|
|
|
return 0;
|
|
|
|
} else if (!strcmp(name, TRANS_OPT_THIN)) {
|
2009-12-09 16:26:30 +01:00
|
|
|
opts->thin = !!value;
|
2007-09-11 05:03:04 +02:00
|
|
|
return 0;
|
2008-03-04 04:27:40 +01:00
|
|
|
} else if (!strcmp(name, TRANS_OPT_FOLLOWTAGS)) {
|
2009-12-09 16:26:30 +01:00
|
|
|
opts->followtags = !!value;
|
2008-03-04 04:27:40 +01:00
|
|
|
return 0;
|
2007-09-11 05:03:11 +02:00
|
|
|
} else if (!strcmp(name, TRANS_OPT_KEEP)) {
|
2009-12-09 16:26:30 +01:00
|
|
|
opts->keep = !!value;
|
2007-09-11 05:03:11 +02:00
|
|
|
return 0;
|
2013-12-05 14:02:42 +01:00
|
|
|
} else if (!strcmp(name, TRANS_OPT_UPDATE_SHALLOW)) {
|
|
|
|
opts->update_shallow = !!value;
|
|
|
|
return 0;
|
2007-09-11 05:03:11 +02:00
|
|
|
} else if (!strcmp(name, TRANS_OPT_DEPTH)) {
|
|
|
|
if (!value)
|
2009-12-09 16:26:30 +01:00
|
|
|
opts->depth = 0;
|
2012-01-04 11:01:55 +01:00
|
|
|
else {
|
|
|
|
char *end;
|
|
|
|
opts->depth = strtol(value, &end, 0);
|
|
|
|
if (*end)
|
2016-06-17 22:20:53 +02:00
|
|
|
die(_("transport: invalid depth option '%s'"), value);
|
2012-01-04 11:01:55 +01:00
|
|
|
}
|
2007-09-11 05:03:11 +02:00
|
|
|
return 0;
|
2016-06-12 12:53:59 +02:00
|
|
|
} else if (!strcmp(name, TRANS_OPT_DEEPEN_SINCE)) {
|
|
|
|
opts->deepen_since = value;
|
|
|
|
return 0;
|
2016-06-12 12:54:04 +02:00
|
|
|
} else if (!strcmp(name, TRANS_OPT_DEEPEN_NOT)) {
|
|
|
|
opts->deepen_not = (const struct string_list *)value;
|
|
|
|
return 0;
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 12:54:09 +02:00
|
|
|
} else if (!strcmp(name, TRANS_OPT_DEEPEN_RELATIVE)) {
|
|
|
|
opts->deepen_relative = !!value;
|
|
|
|
return 0;
|
introduce fetch-object: fetch one promisor object
Introduce fetch-object, providing the ability to fetch one object from a
promisor remote.
This uses fetch-pack. To do this, the transport mechanism has been
updated with 2 flags, "from-promisor" to indicate that the resulting
pack comes from a promisor remote (and thus should be annotated as such
by index-pack), and "no-dependents" to indicate that only the objects
themselves need to be fetched (but fetching additional objects is
nevertheless safe).
Whenever "no-dependents" is used, fetch-pack will refrain from using any
object flags, because it is most likely invoked as part of a dynamic
object fetch by another Git command (which may itself use object flags).
An alternative to this is to leave fetch-pack alone, and instead update
the allocation of flags so that fetch-pack's flags never overlap with
any others, but this will end up shrinking the number of flags available
to nearly every other Git command (that is, every Git command that
accesses objects), so the approach in this commit was used instead.
This will be tested in a subsequent commit.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-05 17:58:49 +01:00
|
|
|
} else if (!strcmp(name, TRANS_OPT_FROM_PROMISOR)) {
|
|
|
|
opts->from_promisor = !!value;
|
|
|
|
return 0;
|
|
|
|
} else if (!strcmp(name, TRANS_OPT_NO_DEPENDENTS)) {
|
|
|
|
opts->no_dependents = !!value;
|
|
|
|
return 0;
|
2017-12-08 16:58:40 +01:00
|
|
|
} else if (!strcmp(name, TRANS_OPT_LIST_OBJECTS_FILTER)) {
|
|
|
|
parse_list_objects_filter(&opts->filter_options, value);
|
|
|
|
return 0;
|
2007-09-11 05:03:04 +02:00
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-01-28 23:51:23 +01:00
|
|
|
static int connect_setup(struct transport *transport, int for_push)
|
2008-02-04 19:26:23 +01:00
|
|
|
{
|
|
|
|
struct git_transport_data *data = transport->data;
|
2016-01-28 23:51:23 +01:00
|
|
|
int flags = transport->verbose > 0 ? CONNECT_VERBOSE : 0;
|
2009-12-09 16:26:31 +01:00
|
|
|
|
|
|
|
if (data->conn)
|
|
|
|
return 0;
|
|
|
|
|
2016-02-03 05:09:14 +01:00
|
|
|
switch (transport->family) {
|
|
|
|
case TRANSPORT_FAMILY_ALL: break;
|
|
|
|
case TRANSPORT_FAMILY_IPV4: flags |= CONNECT_IPV4; break;
|
|
|
|
case TRANSPORT_FAMILY_IPV6: flags |= CONNECT_IPV6; break;
|
|
|
|
}
|
|
|
|
|
2011-09-06 20:06:32 +02:00
|
|
|
data->conn = git_connect(data->fd, transport->url,
|
|
|
|
for_push ? data->options.receivepack :
|
|
|
|
data->options.uploadpack,
|
2016-01-28 23:51:23 +01:00
|
|
|
flags);
|
2009-12-09 16:26:31 +01:00
|
|
|
|
2008-02-04 19:26:23 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-03-09 02:06:07 +01:00
|
|
|
static struct ref *get_refs_via_connect(struct transport *transport, int for_push)
|
2007-09-11 05:03:11 +02:00
|
|
|
{
|
|
|
|
struct git_transport_data *data = transport->data;
|
|
|
|
struct ref *refs;
|
|
|
|
|
2016-01-28 23:51:23 +01:00
|
|
|
connect_setup(transport, for_push);
|
2013-02-20 21:06:45 +01:00
|
|
|
get_remote_heads(data->fd[0], NULL, 0, &refs,
|
2013-12-05 14:02:39 +01:00
|
|
|
for_push ? REF_NORMAL : 0,
|
|
|
|
&data->extra_have,
|
2013-12-05 14:02:40 +01:00
|
|
|
&data->shallow);
|
2009-12-09 16:26:31 +01:00
|
|
|
data->got_remote_heads = 1;
|
2007-09-11 05:03:11 +02:00
|
|
|
|
|
|
|
return refs;
|
|
|
|
}
|
|
|
|
|
2007-09-14 09:31:23 +02:00
|
|
|
static int fetch_refs_via_pack(struct transport *transport,
|
2009-11-18 02:42:24 +01:00
|
|
|
int nr_heads, struct ref **to_fetch)
|
2007-09-11 05:03:11 +02:00
|
|
|
{
|
2017-02-22 17:02:15 +01:00
|
|
|
int ret = 0;
|
2007-09-11 05:03:11 +02:00
|
|
|
struct git_transport_data *data = transport->data;
|
2015-03-19 21:38:35 +01:00
|
|
|
struct ref *refs;
|
2007-09-11 05:03:11 +02:00
|
|
|
char *dest = xstrdup(transport->url);
|
|
|
|
struct fetch_pack_args args;
|
2008-02-28 17:10:51 +01:00
|
|
|
struct ref *refs_tmp = NULL;
|
2007-09-11 05:03:11 +02:00
|
|
|
|
2007-09-19 06:49:35 +02:00
|
|
|
memset(&args, 0, sizeof(args));
|
2009-12-09 16:26:30 +01:00
|
|
|
args.uploadpack = data->options.uploadpack;
|
|
|
|
args.keep_pack = data->options.keep;
|
2007-09-19 06:49:35 +02:00
|
|
|
args.lock_pack = 1;
|
2009-12-09 16:26:30 +01:00
|
|
|
args.use_thin_pack = data->options.thin;
|
|
|
|
args.include_tag = data->options.followtags;
|
make "git push -v" actually verbose
Providing a single "-v" to "git push" currently does
nothing. Giving two flags ("git push -v -v") turns on the
first level of verbosity.
This is caused by a regression introduced in 8afd8dc (push:
support multiple levels of verbosity, 2010-02-24). Before
the series containing 8afd8dc, the verbosity handling for
fetching and pushing was completely separate. Commit bde873c
refactored the verbosity handling out of the fetch side, and
then 8afd8dc converted push to use the refactored code.
However, the fetch and push sides numbered and passed along
their verbosity levels differently. For both, a verbosity
level of "-1" meant "quiet", and "0" meant "default output".
But from there they differed.
For fetch, a verbosity level of "1" indicated to the "fetch"
program that it should make the status table slightly more
verbose, showing up-to-date entries. A verbosity level of
"2" meant that we should pass a verbose flag to the
transport; in the case of fetch-pack, this displays protocol
debugging information.
As a result, the refactored code in bde873c checks for
"verbosity >= 2", and only then passes it on to the
transport. From the transport code's perspective, a
verbosity of 0 or 1 both meant "0".
Push, on the other hand, does not show its own status table;
that is always handled by the transport layer or below
(originally send-pack itself, but these days it is done by
the transport code). So a verbosity level of 1 meant that we
should pass the verbose flag to send-pack, so that it knows
we want a verbose status table. However, once 8afd8dc
switched it to the refactored fetch code, a verbosity level
of 1 was now being ignored. Thus, you needed to
artificially bump the verbosity to 2 (via "-v -v") to have
any effect.
We can fix this by letting the transport code know about the
true verbosity level (i.e., let it distinguish level 0 or
1).
We then have to also make an adjustment to any transport
methods that assumed "verbose > 0" meant they could spew
lots of debugging information. Before, they could only get
"0" or "2", but now they will also receive "1". They need to
adjust their condition for turning on such spew from
"verbose > 0" to "verbose > 1".
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-12-17 10:37:15 +01:00
|
|
|
args.verbose = (transport->verbose > 1);
|
2008-10-05 15:53:00 +02:00
|
|
|
args.quiet = (transport->verbose < 0);
|
2010-02-24 13:50:26 +01:00
|
|
|
args.no_progress = !transport->progress;
|
2009-12-09 16:26:30 +01:00
|
|
|
args.depth = data->options.depth;
|
2016-06-12 12:53:59 +02:00
|
|
|
args.deepen_since = data->options.deepen_since;
|
2016-06-12 12:54:04 +02:00
|
|
|
args.deepen_not = data->options.deepen_not;
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 12:54:09 +02:00
|
|
|
args.deepen_relative = data->options.deepen_relative;
|
2013-05-26 03:16:17 +02:00
|
|
|
args.check_self_contained_and_connected =
|
|
|
|
data->options.check_self_contained_and_connected;
|
2013-12-05 14:02:39 +01:00
|
|
|
args.cloning = transport->cloning;
|
2013-12-05 14:02:42 +01:00
|
|
|
args.update_shallow = data->options.update_shallow;
|
introduce fetch-object: fetch one promisor object
Introduce fetch-object, providing the ability to fetch one object from a
promisor remote.
This uses fetch-pack. To do this, the transport mechanism has been
updated with 2 flags, "from-promisor" to indicate that the resulting
pack comes from a promisor remote (and thus should be annotated as such
by index-pack), and "no-dependents" to indicate that only the objects
themselves need to be fetched (but fetching additional objects is
nevertheless safe).
Whenever "no-dependents" is used, fetch-pack will refrain from using any
object flags, because it is most likely invoked as part of a dynamic
object fetch by another Git command (which may itself use object flags).
An alternative to this is to leave fetch-pack alone, and instead update
the allocation of flags so that fetch-pack's flags never overlap with
any others, but this will end up shrinking the number of flags available
to nearly every other Git command (that is, every Git command that
accesses objects), so the approach in this commit was used instead.
This will be tested in a subsequent commit.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-05 17:58:49 +01:00
|
|
|
args.from_promisor = data->options.from_promisor;
|
|
|
|
args.no_dependents = data->options.no_dependents;
|
2017-12-08 16:58:40 +01:00
|
|
|
args.filter_options = data->options.filter_options;
|
2007-09-11 05:03:11 +02:00
|
|
|
|
2009-12-09 16:26:31 +01:00
|
|
|
if (!data->got_remote_heads) {
|
2016-01-28 23:51:23 +01:00
|
|
|
connect_setup(transport, 0);
|
2013-12-05 14:02:33 +01:00
|
|
|
get_remote_heads(data->fd[0], NULL, 0, &refs_tmp, 0,
|
2013-12-05 14:02:40 +01:00
|
|
|
NULL, &data->shallow);
|
2009-12-09 16:26:31 +01:00
|
|
|
data->got_remote_heads = 1;
|
2008-02-04 19:26:23 +01:00
|
|
|
}
|
|
|
|
|
2008-02-28 17:10:51 +01:00
|
|
|
refs = fetch_pack(&args, data->fd, data->conn,
|
|
|
|
refs_tmp ? refs_tmp : transport->remote_refs,
|
2013-12-05 14:02:39 +01:00
|
|
|
dest, to_fetch, nr_heads, &data->shallow,
|
2013-01-29 23:02:15 +01:00
|
|
|
&transport->pack_lockfile);
|
2008-02-04 19:26:23 +01:00
|
|
|
close(data->fd[0]);
|
|
|
|
close(data->fd[1]);
|
2017-02-22 17:02:15 +01:00
|
|
|
if (finish_connect(data->conn))
|
|
|
|
ret = -1;
|
2008-02-04 19:26:23 +01:00
|
|
|
data->conn = NULL;
|
2009-12-09 16:26:31 +01:00
|
|
|
data->got_remote_heads = 0;
|
2013-05-26 03:16:17 +02:00
|
|
|
data->options.self_contained_and_connected =
|
|
|
|
args.self_contained_and_connected;
|
2007-09-11 05:03:11 +02:00
|
|
|
|
2017-02-22 17:02:15 +01:00
|
|
|
if (refs == NULL)
|
|
|
|
ret = -1;
|
|
|
|
if (report_unmatched_refs(to_fetch, nr_heads))
|
|
|
|
ret = -1;
|
|
|
|
|
2008-02-28 17:10:51 +01:00
|
|
|
free_refs(refs_tmp);
|
2015-03-19 21:38:35 +01:00
|
|
|
free_refs(refs);
|
2007-09-11 05:03:11 +02:00
|
|
|
free(dest);
|
2017-02-22 17:02:15 +01:00
|
|
|
return ret;
|
2007-09-11 05:03:11 +02:00
|
|
|
}
|
|
|
|
|
2009-08-05 22:23:26 +02:00
|
|
|
static int push_had_errors(struct ref *ref)
|
|
|
|
{
|
|
|
|
for (; ref; ref = ref->next) {
|
|
|
|
switch (ref->status) {
|
|
|
|
case REF_STATUS_NONE:
|
|
|
|
case REF_STATUS_UPTODATE:
|
|
|
|
case REF_STATUS_OK:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-17 00:42:52 +01:00
|
|
|
int transport_refs_pushed(struct ref *ref)
|
2009-03-09 02:06:07 +01:00
|
|
|
{
|
|
|
|
for (; ref; ref = ref->next) {
|
|
|
|
switch(ref->status) {
|
|
|
|
case REF_STATUS_NONE:
|
|
|
|
case REF_STATUS_UPTODATE:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-17 00:42:52 +01:00
|
|
|
void transport_update_tracking_ref(struct remote *remote, struct ref *ref, int verbose)
|
2009-03-09 02:06:07 +01:00
|
|
|
{
|
|
|
|
struct refspec rs;
|
|
|
|
|
|
|
|
if (ref->status != REF_STATUS_OK && ref->status != REF_STATUS_UPTODATE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rs.src = ref->name;
|
|
|
|
rs.dst = NULL;
|
|
|
|
|
|
|
|
if (!remote_find_tracking(remote, &rs)) {
|
|
|
|
if (verbose)
|
|
|
|
fprintf(stderr, "updating local tracking ref '%s'\n", rs.dst);
|
|
|
|
if (ref->deletion) {
|
2017-02-21 02:10:32 +01:00
|
|
|
delete_ref(NULL, rs.dst, NULL, 0);
|
2009-03-09 02:06:07 +01:00
|
|
|
} else
|
2017-10-16 00:06:51 +02:00
|
|
|
update_ref("update by push", rs.dst, &ref->new_oid,
|
|
|
|
NULL, 0, 0);
|
2009-03-09 02:06:07 +01:00
|
|
|
free(rs.dst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-21 23:39:41 +02:00
|
|
|
static void print_ref_status(char flag, const char *summary,
|
|
|
|
struct ref *to, struct ref *from, const char *msg,
|
|
|
|
int porcelain, int summary_width)
|
2009-03-09 02:06:07 +01:00
|
|
|
{
|
2009-06-23 03:10:01 +02:00
|
|
|
if (porcelain) {
|
|
|
|
if (from)
|
|
|
|
fprintf(stdout, "%c\t%s:%s\t", flag, from->name, to->name);
|
|
|
|
else
|
|
|
|
fprintf(stdout, "%c\t:%s\t", flag, to->name);
|
|
|
|
if (msg)
|
|
|
|
fprintf(stdout, "%s (%s)\n", summary, msg);
|
|
|
|
else
|
|
|
|
fprintf(stdout, "%s\n", summary);
|
|
|
|
} else {
|
2016-10-21 23:39:41 +02:00
|
|
|
fprintf(stderr, " %c %-*s ", flag, summary_width, summary);
|
2009-06-23 03:10:01 +02:00
|
|
|
if (from)
|
|
|
|
fprintf(stderr, "%s -> %s", prettify_refname(from->name), prettify_refname(to->name));
|
|
|
|
else
|
|
|
|
fputs(prettify_refname(to->name), stderr);
|
|
|
|
if (msg) {
|
|
|
|
fputs(" (", stderr);
|
|
|
|
fputs(msg, stderr);
|
|
|
|
fputc(')', stderr);
|
|
|
|
}
|
|
|
|
fputc('\n', stderr);
|
2009-03-09 02:06:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-21 23:39:41 +02:00
|
|
|
static void print_ok_ref_status(struct ref *ref, int porcelain, int summary_width)
|
2009-03-09 02:06:07 +01:00
|
|
|
{
|
|
|
|
if (ref->deletion)
|
2016-10-21 23:39:41 +02:00
|
|
|
print_ref_status('-', "[deleted]", ref, NULL, NULL,
|
|
|
|
porcelain, summary_width);
|
2015-11-10 03:22:20 +01:00
|
|
|
else if (is_null_oid(&ref->old_oid))
|
2009-03-09 02:06:07 +01:00
|
|
|
print_ref_status('*',
|
2013-11-30 21:55:40 +01:00
|
|
|
(starts_with(ref->name, "refs/tags/") ? "[new tag]" :
|
2009-06-23 03:10:01 +02:00
|
|
|
"[new branch]"),
|
2016-10-21 23:39:41 +02:00
|
|
|
ref, ref->peer_ref, NULL, porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
else {
|
2015-09-24 23:07:40 +02:00
|
|
|
struct strbuf quickref = STRBUF_INIT;
|
2009-03-09 02:06:07 +01:00
|
|
|
char type;
|
|
|
|
const char *msg;
|
|
|
|
|
strbuf: convert strbuf_add_unique_abbrev to use struct object_id
Convert the declaration and definition of strbuf_add_unique_abbrev to
make it take a pointer to struct object_id. Predeclare the struct in
strbuf.h, as cache.h includes strbuf.h before it declares the struct,
and otherwise the struct declaration would have the wrong scope.
Apply the following semantic patch, along with the standard object_id
transforms, to adjust the callers:
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2.hash, E3);
+ strbuf_add_unique_abbrev(E1, &E2, E3);
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2->hash, E3);
+ strbuf_add_unique_abbrev(E1, E2, E3);
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:28 +01:00
|
|
|
strbuf_add_unique_abbrev(&quickref, &ref->old_oid,
|
2016-08-06 17:41:01 +02:00
|
|
|
DEFAULT_ABBREV);
|
2013-01-22 05:24:07 +01:00
|
|
|
if (ref->forced_update) {
|
2015-09-24 23:07:40 +02:00
|
|
|
strbuf_addstr(&quickref, "...");
|
2009-03-09 02:06:07 +01:00
|
|
|
type = '+';
|
|
|
|
msg = "forced update";
|
|
|
|
} else {
|
2015-09-24 23:07:40 +02:00
|
|
|
strbuf_addstr(&quickref, "..");
|
2009-03-09 02:06:07 +01:00
|
|
|
type = ' ';
|
|
|
|
msg = NULL;
|
|
|
|
}
|
strbuf: convert strbuf_add_unique_abbrev to use struct object_id
Convert the declaration and definition of strbuf_add_unique_abbrev to
make it take a pointer to struct object_id. Predeclare the struct in
strbuf.h, as cache.h includes strbuf.h before it declares the struct,
and otherwise the struct declaration would have the wrong scope.
Apply the following semantic patch, along with the standard object_id
transforms, to adjust the callers:
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2.hash, E3);
+ strbuf_add_unique_abbrev(E1, &E2, E3);
@@
expression E1, E2, E3;
@@
- strbuf_add_unique_abbrev(E1, E2->hash, E3);
+ strbuf_add_unique_abbrev(E1, E2, E3);
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:28 +01:00
|
|
|
strbuf_add_unique_abbrev(&quickref, &ref->new_oid,
|
2016-08-06 17:41:01 +02:00
|
|
|
DEFAULT_ABBREV);
|
2009-03-09 02:06:07 +01:00
|
|
|
|
2016-10-21 23:39:41 +02:00
|
|
|
print_ref_status(type, quickref.buf, ref, ref->peer_ref, msg,
|
|
|
|
porcelain, summary_width);
|
2015-09-24 23:07:40 +02:00
|
|
|
strbuf_release(&quickref);
|
2009-03-09 02:06:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-21 23:39:41 +02:00
|
|
|
static int print_one_push_status(struct ref *ref, const char *dest, int count,
|
|
|
|
int porcelain, int summary_width)
|
2009-03-09 02:06:07 +01:00
|
|
|
{
|
2016-07-14 01:36:53 +02:00
|
|
|
if (!count) {
|
|
|
|
char *url = transport_anonymize_url(dest);
|
|
|
|
fprintf(porcelain ? stdout : stderr, "To %s\n", url);
|
|
|
|
free(url);
|
|
|
|
}
|
2009-03-09 02:06:07 +01:00
|
|
|
|
|
|
|
switch(ref->status) {
|
|
|
|
case REF_STATUS_NONE:
|
2016-10-21 23:39:41 +02:00
|
|
|
print_ref_status('X', "[no match]", ref, NULL, NULL,
|
|
|
|
porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
break;
|
|
|
|
case REF_STATUS_REJECT_NODELETE:
|
|
|
|
print_ref_status('!', "[rejected]", ref, NULL,
|
2016-10-21 23:39:41 +02:00
|
|
|
"remote does not support deleting refs",
|
|
|
|
porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
break;
|
|
|
|
case REF_STATUS_UPTODATE:
|
|
|
|
print_ref_status('=', "[up to date]", ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
ref->peer_ref, NULL, porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
break;
|
|
|
|
case REF_STATUS_REJECT_NONFASTFORWARD:
|
|
|
|
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
"non-fast-forward", porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
break;
|
2012-11-30 02:41:37 +01:00
|
|
|
case REF_STATUS_REJECT_ALREADY_EXISTS:
|
|
|
|
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
"already exists", porcelain, summary_width);
|
2012-11-30 02:41:37 +01:00
|
|
|
break;
|
push: introduce REJECT_FETCH_FIRST and REJECT_NEEDS_FORCE
When we push to update an existing ref, if:
* the object at the tip of the remote is not a commit; or
* the object we are pushing is not a commit,
it won't be correct to suggest to fetch, integrate and push again,
as the old and new objects will not "merge". We should explain that
the push must be forced when there is a non-committish object is
involved in such a case.
If we do not have the current object at the tip of the remote, we do
not even know that object, when fetched, is something that can be
merged. In such a case, suggesting to pull first just like
non-fast-forward case may not be technically correct, but in
practice, most such failures are seen when you try to push your work
to a branch without knowing that somebody else already pushed to
update the same branch since you forked, so "pull first" would work
as a suggestion most of the time. And if the object at the tip is
not a commit, "pull first" will fail, without making any permanent
damage. As a side effect, it also makes the error message the user
will get during the next "push" attempt easier to understand, now
the user is aware that a non-commit object is involved.
In these cases, the current code already rejects such a push on the
client end, but we used the same error and advice messages as the
ones used when rejecting a non-fast-forward push, i.e. pull from
there and integrate before pushing again.
Introduce new rejection reasons and reword the messages
appropriately.
[jc: with help by Peff on message details]
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-23 22:55:30 +01:00
|
|
|
case REF_STATUS_REJECT_FETCH_FIRST:
|
|
|
|
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
"fetch first", porcelain, summary_width);
|
push: introduce REJECT_FETCH_FIRST and REJECT_NEEDS_FORCE
When we push to update an existing ref, if:
* the object at the tip of the remote is not a commit; or
* the object we are pushing is not a commit,
it won't be correct to suggest to fetch, integrate and push again,
as the old and new objects will not "merge". We should explain that
the push must be forced when there is a non-committish object is
involved in such a case.
If we do not have the current object at the tip of the remote, we do
not even know that object, when fetched, is something that can be
merged. In such a case, suggesting to pull first just like
non-fast-forward case may not be technically correct, but in
practice, most such failures are seen when you try to push your work
to a branch without knowing that somebody else already pushed to
update the same branch since you forked, so "pull first" would work
as a suggestion most of the time. And if the object at the tip is
not a commit, "pull first" will fail, without making any permanent
damage. As a side effect, it also makes the error message the user
will get during the next "push" attempt easier to understand, now
the user is aware that a non-commit object is involved.
In these cases, the current code already rejects such a push on the
client end, but we used the same error and advice messages as the
ones used when rejecting a non-fast-forward push, i.e. pull from
there and integrate before pushing again.
Introduce new rejection reasons and reword the messages
appropriately.
[jc: with help by Peff on message details]
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-23 22:55:30 +01:00
|
|
|
break;
|
|
|
|
case REF_STATUS_REJECT_NEEDS_FORCE:
|
|
|
|
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
"needs force", porcelain, summary_width);
|
push: introduce REJECT_FETCH_FIRST and REJECT_NEEDS_FORCE
When we push to update an existing ref, if:
* the object at the tip of the remote is not a commit; or
* the object we are pushing is not a commit,
it won't be correct to suggest to fetch, integrate and push again,
as the old and new objects will not "merge". We should explain that
the push must be forced when there is a non-committish object is
involved in such a case.
If we do not have the current object at the tip of the remote, we do
not even know that object, when fetched, is something that can be
merged. In such a case, suggesting to pull first just like
non-fast-forward case may not be technically correct, but in
practice, most such failures are seen when you try to push your work
to a branch without knowing that somebody else already pushed to
update the same branch since you forked, so "pull first" would work
as a suggestion most of the time. And if the object at the tip is
not a commit, "pull first" will fail, without making any permanent
damage. As a side effect, it also makes the error message the user
will get during the next "push" attempt easier to understand, now
the user is aware that a non-commit object is involved.
In these cases, the current code already rejects such a push on the
client end, but we used the same error and advice messages as the
ones used when rejecting a non-fast-forward push, i.e. pull from
there and integrate before pushing again.
Introduce new rejection reasons and reword the messages
appropriately.
[jc: with help by Peff on message details]
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-23 22:55:30 +01:00
|
|
|
break;
|
2013-07-08 23:42:40 +02:00
|
|
|
case REF_STATUS_REJECT_STALE:
|
|
|
|
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
"stale info", porcelain, summary_width);
|
2013-07-08 23:42:40 +02:00
|
|
|
break;
|
2013-12-05 14:02:40 +01:00
|
|
|
case REF_STATUS_REJECT_SHALLOW:
|
|
|
|
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
"new shallow roots not allowed",
|
|
|
|
porcelain, summary_width);
|
2013-12-05 14:02:40 +01:00
|
|
|
break;
|
2009-03-09 02:06:07 +01:00
|
|
|
case REF_STATUS_REMOTE_REJECT:
|
|
|
|
print_ref_status('!', "[remote rejected]", ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
ref->deletion ? NULL : ref->peer_ref,
|
|
|
|
ref->remote_status, porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
break;
|
|
|
|
case REF_STATUS_EXPECTING_REPORT:
|
|
|
|
print_ref_status('!', "[remote failure]", ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
ref->deletion ? NULL : ref->peer_ref,
|
|
|
|
"remote failed to report status",
|
|
|
|
porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
break;
|
2015-01-08 04:23:22 +01:00
|
|
|
case REF_STATUS_ATOMIC_PUSH_FAILED:
|
|
|
|
print_ref_status('!', "[rejected]", ref, ref->peer_ref,
|
2016-10-21 23:39:41 +02:00
|
|
|
"atomic push failed", porcelain, summary_width);
|
2015-01-08 04:23:22 +01:00
|
|
|
break;
|
2009-03-09 02:06:07 +01:00
|
|
|
case REF_STATUS_OK:
|
2016-10-21 23:39:41 +02:00
|
|
|
print_ok_ref_status(ref, porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-10-22 06:33:06 +02:00
|
|
|
static int measure_abbrev(const struct object_id *oid, int sofar)
|
|
|
|
{
|
2017-03-26 18:01:24 +02:00
|
|
|
char hex[GIT_MAX_HEXSZ + 1];
|
2018-03-12 03:27:30 +01:00
|
|
|
int w = find_unique_abbrev_r(hex, oid, DEFAULT_ABBREV);
|
2016-10-22 06:33:06 +02:00
|
|
|
|
|
|
|
return (w < sofar) ? sofar : w;
|
|
|
|
}
|
|
|
|
|
2016-10-22 00:28:07 +02:00
|
|
|
int transport_summary_width(const struct ref *refs)
|
|
|
|
{
|
2016-10-22 06:33:06 +02:00
|
|
|
int maxw = -1;
|
|
|
|
|
|
|
|
for (; refs; refs = refs->next) {
|
|
|
|
maxw = measure_abbrev(&refs->old_oid, maxw);
|
|
|
|
maxw = measure_abbrev(&refs->new_oid, maxw);
|
|
|
|
}
|
|
|
|
if (maxw < 0)
|
|
|
|
maxw = FALLBACK_DEFAULT_ABBREV;
|
|
|
|
return (2 * maxw + 3);
|
2016-10-22 00:28:07 +02:00
|
|
|
}
|
|
|
|
|
2010-02-17 00:42:52 +01:00
|
|
|
void transport_print_push_status(const char *dest, struct ref *refs,
|
2012-11-30 02:41:33 +01:00
|
|
|
int verbose, int porcelain, unsigned int *reject_reasons)
|
2009-03-09 02:06:07 +01:00
|
|
|
{
|
|
|
|
struct ref *ref;
|
|
|
|
int n = 0;
|
push: Provide situational hints for non-fast-forward errors
Pushing a non-fast-forward update to a remote repository will result in
an error, but the hint text doesn't provide the correct resolution in
every case. Give better resolution advice in three push scenarios:
1) If you push your current branch and it triggers a non-fast-forward
error, you should merge remote changes with 'git pull' before pushing
again.
2) If you push to a shared repository others push to, and your local
tracking branches are not kept up to date, the 'matching refs' default
will generate non-fast-forward errors on outdated branches. If this is
your workflow, the 'matching refs' default is not for you. Consider
setting the 'push.default' configuration variable to 'current' or
'upstream' to ensure only your current branch is pushed.
3) If you explicitly specify a ref that is not your current branch or
push matching branches with ':', you will generate a non-fast-forward
error if any pushed branch tip is out of date. You should checkout the
offending branch and merge remote changes before pushing again.
Teach transport.c to recognize these scenarios and configure push.c
to hint for them. If 'git push's default behavior changes or we
discover more scenarios, extension is easy. Standardize on the
advice API and add three new advice variables, 'pushNonFFCurrent',
'pushNonFFDefault', and 'pushNonFFMatching'. Setting any of these
to 'false' will disable their affiliated advice. Setting
'pushNonFastForward' to false will disable all three, thus preserving the
config option for users who already set it, but guaranteeing new
users won't disable push advice accidentally.
Based-on-patch-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Christopher Tiwald <christiwald@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-03-20 05:31:33 +01:00
|
|
|
char *head;
|
2016-10-22 00:28:07 +02:00
|
|
|
int summary_width = transport_summary_width(refs);
|
push: Provide situational hints for non-fast-forward errors
Pushing a non-fast-forward update to a remote repository will result in
an error, but the hint text doesn't provide the correct resolution in
every case. Give better resolution advice in three push scenarios:
1) If you push your current branch and it triggers a non-fast-forward
error, you should merge remote changes with 'git pull' before pushing
again.
2) If you push to a shared repository others push to, and your local
tracking branches are not kept up to date, the 'matching refs' default
will generate non-fast-forward errors on outdated branches. If this is
your workflow, the 'matching refs' default is not for you. Consider
setting the 'push.default' configuration variable to 'current' or
'upstream' to ensure only your current branch is pushed.
3) If you explicitly specify a ref that is not your current branch or
push matching branches with ':', you will generate a non-fast-forward
error if any pushed branch tip is out of date. You should checkout the
offending branch and merge remote changes before pushing again.
Teach transport.c to recognize these scenarios and configure push.c
to hint for them. If 'git push's default behavior changes or we
discover more scenarios, extension is easy. Standardize on the
advice API and add three new advice variables, 'pushNonFFCurrent',
'pushNonFFDefault', and 'pushNonFFMatching'. Setting any of these
to 'false' will disable their affiliated advice. Setting
'pushNonFastForward' to false will disable all three, thus preserving the
config option for users who already set it, but guaranteeing new
users won't disable push advice accidentally.
Based-on-patch-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Christopher Tiwald <christiwald@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-03-20 05:31:33 +01:00
|
|
|
|
2017-10-01 09:29:03 +02:00
|
|
|
head = resolve_refdup("HEAD", RESOLVE_REF_READING, NULL, NULL);
|
2009-03-09 02:06:07 +01:00
|
|
|
|
|
|
|
if (verbose) {
|
|
|
|
for (ref = refs; ref; ref = ref->next)
|
|
|
|
if (ref->status == REF_STATUS_UPTODATE)
|
2016-10-21 23:39:41 +02:00
|
|
|
n += print_one_push_status(ref, dest, n,
|
|
|
|
porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for (ref = refs; ref; ref = ref->next)
|
|
|
|
if (ref->status == REF_STATUS_OK)
|
2016-10-21 23:39:41 +02:00
|
|
|
n += print_one_push_status(ref, dest, n,
|
|
|
|
porcelain, summary_width);
|
2009-03-09 02:06:07 +01:00
|
|
|
|
2012-11-30 02:41:33 +01:00
|
|
|
*reject_reasons = 0;
|
2009-03-09 02:06:07 +01:00
|
|
|
for (ref = refs; ref; ref = ref->next) {
|
|
|
|
if (ref->status != REF_STATUS_NONE &&
|
|
|
|
ref->status != REF_STATUS_UPTODATE &&
|
|
|
|
ref->status != REF_STATUS_OK)
|
2016-10-21 23:39:41 +02:00
|
|
|
n += print_one_push_status(ref, dest, n,
|
|
|
|
porcelain, summary_width);
|
2012-11-30 02:41:33 +01:00
|
|
|
if (ref->status == REF_STATUS_REJECT_NONFASTFORWARD) {
|
2013-01-31 13:22:51 +01:00
|
|
|
if (head != NULL && !strcmp(head, ref->name))
|
2012-11-30 02:41:33 +01:00
|
|
|
*reject_reasons |= REJECT_NON_FF_HEAD;
|
push: Provide situational hints for non-fast-forward errors
Pushing a non-fast-forward update to a remote repository will result in
an error, but the hint text doesn't provide the correct resolution in
every case. Give better resolution advice in three push scenarios:
1) If you push your current branch and it triggers a non-fast-forward
error, you should merge remote changes with 'git pull' before pushing
again.
2) If you push to a shared repository others push to, and your local
tracking branches are not kept up to date, the 'matching refs' default
will generate non-fast-forward errors on outdated branches. If this is
your workflow, the 'matching refs' default is not for you. Consider
setting the 'push.default' configuration variable to 'current' or
'upstream' to ensure only your current branch is pushed.
3) If you explicitly specify a ref that is not your current branch or
push matching branches with ':', you will generate a non-fast-forward
error if any pushed branch tip is out of date. You should checkout the
offending branch and merge remote changes before pushing again.
Teach transport.c to recognize these scenarios and configure push.c
to hint for them. If 'git push's default behavior changes or we
discover more scenarios, extension is easy. Standardize on the
advice API and add three new advice variables, 'pushNonFFCurrent',
'pushNonFFDefault', and 'pushNonFFMatching'. Setting any of these
to 'false' will disable their affiliated advice. Setting
'pushNonFastForward' to false will disable all three, thus preserving the
config option for users who already set it, but guaranteeing new
users won't disable push advice accidentally.
Based-on-patch-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Christopher Tiwald <christiwald@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-03-20 05:31:33 +01:00
|
|
|
else
|
2012-11-30 02:41:33 +01:00
|
|
|
*reject_reasons |= REJECT_NON_FF_OTHER;
|
2012-11-30 02:41:37 +01:00
|
|
|
} else if (ref->status == REF_STATUS_REJECT_ALREADY_EXISTS) {
|
|
|
|
*reject_reasons |= REJECT_ALREADY_EXISTS;
|
push: introduce REJECT_FETCH_FIRST and REJECT_NEEDS_FORCE
When we push to update an existing ref, if:
* the object at the tip of the remote is not a commit; or
* the object we are pushing is not a commit,
it won't be correct to suggest to fetch, integrate and push again,
as the old and new objects will not "merge". We should explain that
the push must be forced when there is a non-committish object is
involved in such a case.
If we do not have the current object at the tip of the remote, we do
not even know that object, when fetched, is something that can be
merged. In such a case, suggesting to pull first just like
non-fast-forward case may not be technically correct, but in
practice, most such failures are seen when you try to push your work
to a branch without knowing that somebody else already pushed to
update the same branch since you forked, so "pull first" would work
as a suggestion most of the time. And if the object at the tip is
not a commit, "pull first" will fail, without making any permanent
damage. As a side effect, it also makes the error message the user
will get during the next "push" attempt easier to understand, now
the user is aware that a non-commit object is involved.
In these cases, the current code already rejects such a push on the
client end, but we used the same error and advice messages as the
ones used when rejecting a non-fast-forward push, i.e. pull from
there and integrate before pushing again.
Introduce new rejection reasons and reword the messages
appropriately.
[jc: with help by Peff on message details]
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-23 22:55:30 +01:00
|
|
|
} else if (ref->status == REF_STATUS_REJECT_FETCH_FIRST) {
|
|
|
|
*reject_reasons |= REJECT_FETCH_FIRST;
|
|
|
|
} else if (ref->status == REF_STATUS_REJECT_NEEDS_FORCE) {
|
|
|
|
*reject_reasons |= REJECT_NEEDS_FORCE;
|
push: Provide situational hints for non-fast-forward errors
Pushing a non-fast-forward update to a remote repository will result in
an error, but the hint text doesn't provide the correct resolution in
every case. Give better resolution advice in three push scenarios:
1) If you push your current branch and it triggers a non-fast-forward
error, you should merge remote changes with 'git pull' before pushing
again.
2) If you push to a shared repository others push to, and your local
tracking branches are not kept up to date, the 'matching refs' default
will generate non-fast-forward errors on outdated branches. If this is
your workflow, the 'matching refs' default is not for you. Consider
setting the 'push.default' configuration variable to 'current' or
'upstream' to ensure only your current branch is pushed.
3) If you explicitly specify a ref that is not your current branch or
push matching branches with ':', you will generate a non-fast-forward
error if any pushed branch tip is out of date. You should checkout the
offending branch and merge remote changes before pushing again.
Teach transport.c to recognize these scenarios and configure push.c
to hint for them. If 'git push's default behavior changes or we
discover more scenarios, extension is easy. Standardize on the
advice API and add three new advice variables, 'pushNonFFCurrent',
'pushNonFFDefault', and 'pushNonFFMatching'. Setting any of these
to 'false' will disable their affiliated advice. Setting
'pushNonFastForward' to false will disable all three, thus preserving the
config option for users who already set it, but guaranteeing new
users won't disable push advice accidentally.
Based-on-patch-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Christopher Tiwald <christiwald@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-03-20 05:31:33 +01:00
|
|
|
}
|
2009-03-09 02:06:07 +01:00
|
|
|
}
|
2014-10-21 03:50:44 +02:00
|
|
|
free(head);
|
2009-03-09 02:06:07 +01:00
|
|
|
}
|
|
|
|
|
2010-02-17 00:42:52 +01:00
|
|
|
void transport_verify_remote_names(int nr_heads, const char **heads)
|
2009-03-09 02:06:07 +01:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_heads; i++) {
|
|
|
|
const char *local = heads[i];
|
|
|
|
const char *remote = strrchr(heads[i], ':');
|
|
|
|
|
|
|
|
if (*local == '+')
|
|
|
|
local++;
|
|
|
|
|
|
|
|
/* A matching refspec is okay. */
|
|
|
|
if (remote == local && remote[1] == '\0')
|
|
|
|
continue;
|
|
|
|
|
|
|
|
remote = remote ? (remote + 1) : local;
|
2011-09-15 23:10:25 +02:00
|
|
|
if (check_refname_format(remote,
|
|
|
|
REFNAME_ALLOW_ONELEVEL|REFNAME_REFSPEC_PATTERN))
|
|
|
|
die("remote part of refspec is not a valid name in %s",
|
|
|
|
heads[i]);
|
2009-03-09 02:06:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int git_transport_push(struct transport *transport, struct ref *remote_refs, int flags)
|
2007-11-09 00:35:32 +01:00
|
|
|
{
|
2007-09-11 05:03:04 +02:00
|
|
|
struct git_transport_data *data = transport->data;
|
2007-10-30 03:03:42 +01:00
|
|
|
struct send_pack_args args;
|
2009-03-09 02:06:07 +01:00
|
|
|
int ret;
|
|
|
|
|
2009-12-09 16:26:31 +01:00
|
|
|
if (!data->got_remote_heads) {
|
2009-03-09 02:06:07 +01:00
|
|
|
struct ref *tmp_refs;
|
2016-01-28 23:51:23 +01:00
|
|
|
connect_setup(transport, 1);
|
2009-03-09 02:06:07 +01:00
|
|
|
|
2013-12-05 14:02:48 +01:00
|
|
|
get_remote_heads(data->fd[0], NULL, 0, &tmp_refs, REF_NORMAL,
|
|
|
|
NULL, &data->shallow);
|
2009-12-09 16:26:31 +01:00
|
|
|
data->got_remote_heads = 1;
|
2009-03-09 02:06:07 +01:00
|
|
|
}
|
2007-09-11 05:03:04 +02:00
|
|
|
|
2009-10-31 01:47:41 +01:00
|
|
|
memset(&args, 0, sizeof(args));
|
2007-11-10 00:32:25 +01:00
|
|
|
args.send_mirror = !!(flags & TRANSPORT_PUSH_MIRROR);
|
2007-10-30 03:03:42 +01:00
|
|
|
args.force_update = !!(flags & TRANSPORT_PUSH_FORCE);
|
2009-12-09 16:26:30 +01:00
|
|
|
args.use_thin_pack = data->options.thin;
|
2010-02-24 13:50:24 +01:00
|
|
|
args.verbose = (transport->verbose > 0);
|
|
|
|
args.quiet = (transport->verbose < 0);
|
2010-10-16 20:37:03 +02:00
|
|
|
args.progress = transport->progress;
|
2007-10-30 03:03:42 +01:00
|
|
|
args.dry_run = !!(flags & TRANSPORT_PUSH_DRY_RUN);
|
2010-02-27 05:52:15 +01:00
|
|
|
args.porcelain = !!(flags & TRANSPORT_PUSH_PORCELAIN);
|
2015-01-08 04:23:23 +01:00
|
|
|
args.atomic = !!(flags & TRANSPORT_PUSH_ATOMIC);
|
2016-07-14 23:49:47 +02:00
|
|
|
args.push_options = transport->push_options;
|
2014-08-23 03:15:24 +02:00
|
|
|
args.url = transport->url;
|
2007-10-30 03:03:42 +01:00
|
|
|
|
2015-08-19 17:26:46 +02:00
|
|
|
if (flags & TRANSPORT_PUSH_CERT_ALWAYS)
|
|
|
|
args.push_cert = SEND_PACK_PUSH_CERT_ALWAYS;
|
|
|
|
else if (flags & TRANSPORT_PUSH_CERT_IF_ASKED)
|
|
|
|
args.push_cert = SEND_PACK_PUSH_CERT_IF_ASKED;
|
|
|
|
else
|
|
|
|
args.push_cert = SEND_PACK_PUSH_CERT_NEVER;
|
|
|
|
|
2009-03-09 02:06:07 +01:00
|
|
|
ret = send_pack(&args, data->fd, data->conn, remote_refs,
|
|
|
|
&data->extra_have);
|
|
|
|
|
|
|
|
close(data->fd[1]);
|
|
|
|
close(data->fd[0]);
|
|
|
|
ret |= finish_connect(data->conn);
|
|
|
|
data->conn = NULL;
|
2009-12-09 16:26:31 +01:00
|
|
|
data->got_remote_heads = 0;
|
2009-03-09 02:06:07 +01:00
|
|
|
|
|
|
|
return ret;
|
2007-09-11 05:03:04 +02:00
|
|
|
}
|
|
|
|
|
2009-12-09 16:26:33 +01:00
|
|
|
static int connect_git(struct transport *transport, const char *name,
|
|
|
|
const char *executable, int fd[2])
|
|
|
|
{
|
|
|
|
struct git_transport_data *data = transport->data;
|
|
|
|
data->conn = git_connect(data->fd, transport->url,
|
|
|
|
executable, 0);
|
|
|
|
fd[0] = data->fd[0];
|
|
|
|
fd[1] = data->fd[1];
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-09-19 06:49:42 +02:00
|
|
|
static int disconnect_git(struct transport *transport)
|
|
|
|
{
|
2008-02-04 19:26:23 +01:00
|
|
|
struct git_transport_data *data = transport->data;
|
|
|
|
if (data->conn) {
|
2009-12-09 16:26:31 +01:00
|
|
|
if (data->got_remote_heads)
|
|
|
|
packet_flush(data->fd[1]);
|
2008-02-04 19:26:23 +01:00
|
|
|
close(data->fd[0]);
|
|
|
|
close(data->fd[1]);
|
|
|
|
finish_connect(data->conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(data);
|
2007-09-19 06:49:42 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-14 22:44:45 +01:00
|
|
|
static struct transport_vtable taken_over_vtable = {
|
|
|
|
NULL,
|
|
|
|
get_refs_via_connect,
|
|
|
|
fetch_refs_via_pack,
|
|
|
|
git_transport_push,
|
|
|
|
NULL,
|
|
|
|
disconnect_git
|
|
|
|
};
|
|
|
|
|
2009-12-09 16:26:31 +01:00
|
|
|
void transport_take_over(struct transport *transport,
|
|
|
|
struct child_process *child)
|
|
|
|
{
|
|
|
|
struct git_transport_data *data;
|
|
|
|
|
|
|
|
if (!transport->smart_options)
|
2016-07-26 18:05:50 +02:00
|
|
|
die("BUG: taking over transport requires non-NULL "
|
2009-12-09 16:26:31 +01:00
|
|
|
"smart_options field.");
|
|
|
|
|
|
|
|
data = xcalloc(1, sizeof(*data));
|
|
|
|
data->options = *transport->smart_options;
|
|
|
|
data->conn = child;
|
|
|
|
data->fd[0] = data->conn->out;
|
|
|
|
data->fd[1] = data->conn->in;
|
|
|
|
data->got_remote_heads = 0;
|
|
|
|
transport->data = data;
|
|
|
|
|
2017-12-14 22:44:45 +01:00
|
|
|
transport->vtable = &taken_over_vtable;
|
2009-12-09 16:26:31 +01:00
|
|
|
transport->smart_options = &(data->options);
|
fetch: work around "transport-take-over" hack
A Git-aware "connect" transport allows the "transport_take_over" to
redirect generic transport requests like fetch(), push_refs() and
get_refs_list() to the native Git transport handling methods. The
take-over process replaces transport->data with a fake data that
these method implementations understand.
While this hack works OK for a single request, it breaks when the
transport needs to make more than one requests. transport->data
that used to hold necessary information for the specific helper to
work correctly is destroyed during the take-over process.
One codepath that this matters is "git fetch" in auto-follow mode;
when it does not get all the tags that ought to point at the history
it got (which can be determined by looking at the peeled tags in the
initial advertisement) from the primary transfer, it internally
makes a second request to complete the fetch. Because "take-over"
hack has already destroyed the data necessary to talk to the
transport helper by the time this happens, the second request cannot
make a request to the helper to make another connection to fetch
these additional tags.
Mark such a transport as "cannot_reuse", and use a separate
transport to perform the backfill fetch in order to work around
this breakage.
Note that this problem does not manifest itself when running t5802,
because our upload-pack gives you all the necessary auto-followed
tags during the primary transfer. You would need to step through
"git fetch" in a debugger, stop immediately after the primary
transfer finishes and writes these auto-followed tags, remove the
tag references and repack/prune the repository to convince the
"find-non-local-tags" procedure that the primary transfer failed to
give us all the necessary tags, and then let it continue, in order
to trigger the bug in the secondary transfer this patch fixes.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-08 00:47:18 +02:00
|
|
|
|
|
|
|
transport->cannot_reuse = 1;
|
2009-12-09 16:26:31 +01:00
|
|
|
}
|
|
|
|
|
2007-09-11 05:03:04 +02:00
|
|
|
static int is_file(const char *url)
|
|
|
|
{
|
|
|
|
struct stat buf;
|
|
|
|
if (stat(url, &buf))
|
|
|
|
return 0;
|
|
|
|
return S_ISREG(buf.st_mode);
|
|
|
|
}
|
|
|
|
|
2009-12-09 16:26:29 +01:00
|
|
|
static int external_specification_len(const char *url)
|
|
|
|
{
|
|
|
|
return strchr(url, ':') - url;
|
|
|
|
}
|
|
|
|
|
2015-09-23 00:03:49 +02:00
|
|
|
static const struct string_list *protocol_whitelist(void)
|
transport: add a protocol-whitelist environment variable
If we are cloning an untrusted remote repository into a
sandbox, we may also want to fetch remote submodules in
order to get the complete view as intended by the other
side. However, that opens us up to attacks where a malicious
user gets us to clone something they would not otherwise
have access to (this is not necessarily a problem by itself,
but we may then act on the cloned contents in a way that
exposes them to the attacker).
Ideally such a setup would sandbox git entirely away from
high-value items, but this is not always practical or easy
to set up (e.g., OS network controls may block multiple
protocols, and we would want to enable some but not others).
We can help this case by providing a way to restrict
particular protocols. We use a whitelist in the environment.
This is more annoying to set up than a blacklist, but
defaults to safety if the set of protocols git supports
grows). If no whitelist is specified, we continue to default
to allowing all protocols (this is an "unsafe" default, but
since the minority of users will want this sandboxing
effect, it is the only sensible one).
A note on the tests: ideally these would all be in a single
test file, but the git-daemon and httpd test infrastructure
is an all-or-nothing proposition rather than a test-by-test
prerequisite. By putting them all together, we would be
unable to test the file-local code on machines without
apache.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-16 19:12:52 +02:00
|
|
|
{
|
2015-09-23 00:03:49 +02:00
|
|
|
static int enabled = -1;
|
|
|
|
static struct string_list allowed = STRING_LIST_INIT_DUP;
|
transport: add a protocol-whitelist environment variable
If we are cloning an untrusted remote repository into a
sandbox, we may also want to fetch remote submodules in
order to get the complete view as intended by the other
side. However, that opens us up to attacks where a malicious
user gets us to clone something they would not otherwise
have access to (this is not necessarily a problem by itself,
but we may then act on the cloned contents in a way that
exposes them to the attacker).
Ideally such a setup would sandbox git entirely away from
high-value items, but this is not always practical or easy
to set up (e.g., OS network controls may block multiple
protocols, and we would want to enable some but not others).
We can help this case by providing a way to restrict
particular protocols. We use a whitelist in the environment.
This is more annoying to set up than a blacklist, but
defaults to safety if the set of protocols git supports
grows). If no whitelist is specified, we continue to default
to allowing all protocols (this is an "unsafe" default, but
since the minority of users will want this sandboxing
effect, it is the only sensible one).
A note on the tests: ideally these would all be in a single
test file, but the git-daemon and httpd test infrastructure
is an all-or-nothing proposition rather than a test-by-test
prerequisite. By putting them all together, we would be
unable to test the file-local code on machines without
apache.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-16 19:12:52 +02:00
|
|
|
|
2015-09-23 00:03:49 +02:00
|
|
|
if (enabled < 0) {
|
|
|
|
const char *v = getenv("GIT_ALLOW_PROTOCOL");
|
|
|
|
if (v) {
|
|
|
|
string_list_split(&allowed, v, ':', -1);
|
|
|
|
string_list_sort(&allowed);
|
|
|
|
enabled = 1;
|
|
|
|
} else {
|
|
|
|
enabled = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return enabled ? &allowed : NULL;
|
|
|
|
}
|
transport: add a protocol-whitelist environment variable
If we are cloning an untrusted remote repository into a
sandbox, we may also want to fetch remote submodules in
order to get the complete view as intended by the other
side. However, that opens us up to attacks where a malicious
user gets us to clone something they would not otherwise
have access to (this is not necessarily a problem by itself,
but we may then act on the cloned contents in a way that
exposes them to the attacker).
Ideally such a setup would sandbox git entirely away from
high-value items, but this is not always practical or easy
to set up (e.g., OS network controls may block multiple
protocols, and we would want to enable some but not others).
We can help this case by providing a way to restrict
particular protocols. We use a whitelist in the environment.
This is more annoying to set up than a blacklist, but
defaults to safety if the set of protocols git supports
grows). If no whitelist is specified, we continue to default
to allowing all protocols (this is an "unsafe" default, but
since the minority of users will want this sandboxing
effect, it is the only sensible one).
A note on the tests: ideally these would all be in a single
test file, but the git-daemon and httpd test infrastructure
is an all-or-nothing proposition rather than a test-by-test
prerequisite. By putting them all together, we would be
unable to test the file-local code on machines without
apache.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-16 19:12:52 +02:00
|
|
|
|
2016-12-14 23:39:52 +01:00
|
|
|
enum protocol_allow_config {
|
|
|
|
PROTOCOL_ALLOW_NEVER = 0,
|
|
|
|
PROTOCOL_ALLOW_USER_ONLY,
|
|
|
|
PROTOCOL_ALLOW_ALWAYS
|
|
|
|
};
|
|
|
|
|
|
|
|
static enum protocol_allow_config parse_protocol_config(const char *key,
|
|
|
|
const char *value)
|
2015-09-23 00:03:49 +02:00
|
|
|
{
|
2016-12-14 23:39:52 +01:00
|
|
|
if (!strcasecmp(value, "always"))
|
|
|
|
return PROTOCOL_ALLOW_ALWAYS;
|
|
|
|
else if (!strcasecmp(value, "never"))
|
|
|
|
return PROTOCOL_ALLOW_NEVER;
|
|
|
|
else if (!strcasecmp(value, "user"))
|
|
|
|
return PROTOCOL_ALLOW_USER_ONLY;
|
|
|
|
|
|
|
|
die("unknown value for config '%s': %s", key, value);
|
2015-09-23 00:03:49 +02:00
|
|
|
}
|
|
|
|
|
2016-12-14 23:39:52 +01:00
|
|
|
static enum protocol_allow_config get_protocol_config(const char *type)
|
2015-09-23 00:03:49 +02:00
|
|
|
{
|
2016-12-14 23:39:52 +01:00
|
|
|
char *key = xstrfmt("protocol.%s.allow", type);
|
|
|
|
char *value;
|
|
|
|
|
|
|
|
/* first check the per-protocol config */
|
|
|
|
if (!git_config_get_string(key, &value)) {
|
|
|
|
enum protocol_allow_config ret =
|
|
|
|
parse_protocol_config(key, value);
|
|
|
|
free(key);
|
|
|
|
free(value);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
free(key);
|
|
|
|
|
|
|
|
/* if defined, fallback to user-defined default for unknown protocols */
|
|
|
|
if (!git_config_get_string("protocol.allow", &value)) {
|
|
|
|
enum protocol_allow_config ret =
|
|
|
|
parse_protocol_config("protocol.allow", value);
|
|
|
|
free(value);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fallback to built-in defaults */
|
|
|
|
/* known safe */
|
|
|
|
if (!strcmp(type, "http") ||
|
|
|
|
!strcmp(type, "https") ||
|
|
|
|
!strcmp(type, "git") ||
|
|
|
|
!strcmp(type, "ssh") ||
|
|
|
|
!strcmp(type, "file"))
|
|
|
|
return PROTOCOL_ALLOW_ALWAYS;
|
|
|
|
|
|
|
|
/* known scary; err on the side of caution */
|
|
|
|
if (!strcmp(type, "ext"))
|
|
|
|
return PROTOCOL_ALLOW_NEVER;
|
|
|
|
|
|
|
|
/* unknown; by default let them be used only directly by the user */
|
|
|
|
return PROTOCOL_ALLOW_USER_ONLY;
|
2015-09-23 00:03:49 +02:00
|
|
|
}
|
|
|
|
|
2016-12-14 23:39:54 +01:00
|
|
|
int is_transport_allowed(const char *type, int from_user)
|
2015-09-23 00:03:49 +02:00
|
|
|
{
|
2016-12-14 23:39:52 +01:00
|
|
|
const struct string_list *whitelist = protocol_whitelist();
|
|
|
|
if (whitelist)
|
|
|
|
return string_list_has_string(whitelist, type);
|
|
|
|
|
|
|
|
switch (get_protocol_config(type)) {
|
|
|
|
case PROTOCOL_ALLOW_ALWAYS:
|
|
|
|
return 1;
|
|
|
|
case PROTOCOL_ALLOW_NEVER:
|
|
|
|
return 0;
|
|
|
|
case PROTOCOL_ALLOW_USER_ONLY:
|
2016-12-14 23:39:54 +01:00
|
|
|
if (from_user < 0)
|
|
|
|
from_user = git_env_bool("GIT_PROTOCOL_FROM_USER", 1);
|
|
|
|
return from_user;
|
2016-12-14 23:39:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
die("BUG: invalid protocol_allow_config type");
|
2015-09-23 00:03:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void transport_check_allowed(const char *type)
|
|
|
|
{
|
2016-12-14 23:39:54 +01:00
|
|
|
if (!is_transport_allowed(type, -1))
|
transport: add a protocol-whitelist environment variable
If we are cloning an untrusted remote repository into a
sandbox, we may also want to fetch remote submodules in
order to get the complete view as intended by the other
side. However, that opens us up to attacks where a malicious
user gets us to clone something they would not otherwise
have access to (this is not necessarily a problem by itself,
but we may then act on the cloned contents in a way that
exposes them to the attacker).
Ideally such a setup would sandbox git entirely away from
high-value items, but this is not always practical or easy
to set up (e.g., OS network controls may block multiple
protocols, and we would want to enable some but not others).
We can help this case by providing a way to restrict
particular protocols. We use a whitelist in the environment.
This is more annoying to set up than a blacklist, but
defaults to safety if the set of protocols git supports
grows). If no whitelist is specified, we continue to default
to allowing all protocols (this is an "unsafe" default, but
since the minority of users will want this sandboxing
effect, it is the only sensible one).
A note on the tests: ideally these would all be in a single
test file, but the git-daemon and httpd test infrastructure
is an all-or-nothing proposition rather than a test-by-test
prerequisite. By putting them all together, we would be
unable to test the file-local code on machines without
apache.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-16 19:12:52 +02:00
|
|
|
die("transport '%s' not allowed", type);
|
|
|
|
}
|
|
|
|
|
2017-12-14 22:44:45 +01:00
|
|
|
static struct transport_vtable bundle_vtable = {
|
|
|
|
NULL,
|
|
|
|
get_refs_from_bundle,
|
|
|
|
fetch_refs_from_bundle,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
close_bundle
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct transport_vtable builtin_smart_vtable = {
|
|
|
|
NULL,
|
|
|
|
get_refs_via_connect,
|
|
|
|
fetch_refs_via_pack,
|
|
|
|
git_transport_push,
|
|
|
|
connect_git,
|
|
|
|
disconnect_git
|
|
|
|
};
|
|
|
|
|
2007-09-15 09:23:14 +02:00
|
|
|
struct transport *transport_get(struct remote *remote, const char *url)
|
2007-09-11 05:03:04 +02:00
|
|
|
{
|
2010-01-27 18:53:17 +01:00
|
|
|
const char *helper;
|
2007-09-15 09:23:07 +02:00
|
|
|
struct transport *ret = xcalloc(1, sizeof(*ret));
|
|
|
|
|
2010-02-24 13:50:26 +01:00
|
|
|
ret->progress = isatty(2);
|
|
|
|
|
2009-11-04 03:38:51 +01:00
|
|
|
if (!remote)
|
|
|
|
die("No remote provided to transport_get()");
|
|
|
|
|
2010-02-16 08:18:21 +01:00
|
|
|
ret->got_remote_refs = 0;
|
2007-09-15 09:23:07 +02:00
|
|
|
ret->remote = remote;
|
2010-01-27 18:53:17 +01:00
|
|
|
helper = remote->foreign_vcs;
|
2009-11-18 02:42:22 +01:00
|
|
|
|
2010-01-27 21:22:37 +01:00
|
|
|
if (!url && remote->url)
|
2009-11-18 02:42:22 +01:00
|
|
|
url = remote->url[0];
|
2007-09-15 09:23:07 +02:00
|
|
|
ret->url = url;
|
|
|
|
|
2009-11-18 02:42:26 +01:00
|
|
|
/* maybe it is a foreign URL? */
|
|
|
|
if (url) {
|
|
|
|
const char *p = url;
|
|
|
|
|
2010-05-23 11:17:55 +02:00
|
|
|
while (is_urlschemechar(p == url, *p))
|
2009-11-18 02:42:26 +01:00
|
|
|
p++;
|
2013-11-30 21:55:40 +01:00
|
|
|
if (starts_with(p, "::"))
|
2010-01-27 18:53:17 +01:00
|
|
|
helper = xstrndup(url, p - url);
|
2009-11-18 02:42:26 +01:00
|
|
|
}
|
|
|
|
|
2010-01-27 18:53:17 +01:00
|
|
|
if (helper) {
|
|
|
|
transport_helper_init(ret, helper);
|
2013-11-30 21:55:40 +01:00
|
|
|
} else if (starts_with(url, "rsync:")) {
|
transport: drop support for git-over-rsync
The git-over-rsync protocol is inefficient and broken, and
has been for a long time. It transfers way more objects than
it needs (grabbing all of the remote's "objects/",
regardless of which objects we need). It does its own ad-hoc
parsing of loose and packed refs from the remote, but
doesn't properly override packed refs with loose ones,
leading to garbage results (e.g., expecting the other side
to have an object pointed to by a stale packed-refs entry,
or complaining that the other side has two copies of the
refs[1]).
This latter breakage means that nobody could have
successfully pulled from a moderately active repository
since cd547b4 (fetch/push: readd rsync support, 2007-10-01).
We never made an official deprecation notice in the release
notes for git's rsync protocol, but the tutorial has marked
it as such since 914328a (Update tutorial., 2005-08-30).
And on the mailing list as far back as Oct 2005, we can find
Junio mentioning it as having "been deprecated for quite
some time."[2,3,4]. So it was old news then; cogito had
deprecated the transport in July of 2005[5] (though it did
come back briefly when Linus broke git-http-pull!).
Of course some people professed their love of rsync through
2006, but Linus clarified in his usual gentle manner[6]:
> Thanks! This is why I still use rsync, even though
> everybody and their mother tells me "Linus says rsync is
> deprecated."
No. You're using rsync because you're actively doing
something _wrong_.
The deprecation sentiment was reinforced in 2008, with a
mention that cloning via rsync is broken (with no fix)[7].
Even the commit porting rsync over to C from shell (cd547b4)
lists it as deprecated! So between the 10 years of informal
warnings, and the fact that it has been severely broken
since 2007, it's probably safe to simply remove it without
further deprecation warnings.
[1] http://article.gmane.org/gmane.comp.version-control.git/285101
[2] http://article.gmane.org/gmane.comp.version-control.git/10093
[3] http://article.gmane.org/gmane.comp.version-control.git/17734
[4] http://article.gmane.org/gmane.comp.version-control.git/18911
[5] http://article.gmane.org/gmane.comp.version-control.git/5617
[6] http://article.gmane.org/gmane.comp.version-control.git/19354
[7] http://article.gmane.org/gmane.comp.version-control.git/103635
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-01-30 08:21:26 +01:00
|
|
|
die("git-over-rsync is no longer supported");
|
2013-11-28 20:50:03 +01:00
|
|
|
} else if (url_is_local_not_ssh(url) && is_file(url) && is_bundle(url, 1)) {
|
2007-09-11 05:03:21 +02:00
|
|
|
struct bundle_transport_data *data = xcalloc(1, sizeof(*data));
|
transport: add a protocol-whitelist environment variable
If we are cloning an untrusted remote repository into a
sandbox, we may also want to fetch remote submodules in
order to get the complete view as intended by the other
side. However, that opens us up to attacks where a malicious
user gets us to clone something they would not otherwise
have access to (this is not necessarily a problem by itself,
but we may then act on the cloned contents in a way that
exposes them to the attacker).
Ideally such a setup would sandbox git entirely away from
high-value items, but this is not always practical or easy
to set up (e.g., OS network controls may block multiple
protocols, and we would want to enable some but not others).
We can help this case by providing a way to restrict
particular protocols. We use a whitelist in the environment.
This is more annoying to set up than a blacklist, but
defaults to safety if the set of protocols git supports
grows). If no whitelist is specified, we continue to default
to allowing all protocols (this is an "unsafe" default, but
since the minority of users will want this sandboxing
effect, it is the only sensible one).
A note on the tests: ideally these would all be in a single
test file, but the git-daemon and httpd test infrastructure
is an all-or-nothing proposition rather than a test-by-test
prerequisite. By putting them all together, we would be
unable to test the file-local code on machines without
apache.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-16 19:12:52 +02:00
|
|
|
transport_check_allowed("file");
|
2007-09-11 05:03:21 +02:00
|
|
|
ret->data = data;
|
2017-12-14 22:44:45 +01:00
|
|
|
ret->vtable = &bundle_vtable;
|
2009-12-09 16:26:30 +01:00
|
|
|
ret->smart_options = NULL;
|
2009-12-09 16:26:29 +01:00
|
|
|
} else if (!is_url(url)
|
2013-11-30 21:55:40 +01:00
|
|
|
|| starts_with(url, "file://")
|
|
|
|
|| starts_with(url, "git://")
|
|
|
|
|| starts_with(url, "ssh://")
|
2016-02-15 15:29:06 +01:00
|
|
|
|| starts_with(url, "git+ssh://") /* deprecated - do not use */
|
|
|
|
|| starts_with(url, "ssh+git://") /* deprecated - do not use */
|
|
|
|
) {
|
transport: add a protocol-whitelist environment variable
If we are cloning an untrusted remote repository into a
sandbox, we may also want to fetch remote submodules in
order to get the complete view as intended by the other
side. However, that opens us up to attacks where a malicious
user gets us to clone something they would not otherwise
have access to (this is not necessarily a problem by itself,
but we may then act on the cloned contents in a way that
exposes them to the attacker).
Ideally such a setup would sandbox git entirely away from
high-value items, but this is not always practical or easy
to set up (e.g., OS network controls may block multiple
protocols, and we would want to enable some but not others).
We can help this case by providing a way to restrict
particular protocols. We use a whitelist in the environment.
This is more annoying to set up than a blacklist, but
defaults to safety if the set of protocols git supports
grows). If no whitelist is specified, we continue to default
to allowing all protocols (this is an "unsafe" default, but
since the minority of users will want this sandboxing
effect, it is the only sensible one).
A note on the tests: ideally these would all be in a single
test file, but the git-daemon and httpd test infrastructure
is an all-or-nothing proposition rather than a test-by-test
prerequisite. By putting them all together, we would be
unable to test the file-local code on machines without
apache.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-16 19:12:52 +02:00
|
|
|
/*
|
|
|
|
* These are builtin smart transports; "allowed" transports
|
|
|
|
* will be checked individually in git_connect.
|
|
|
|
*/
|
2007-09-11 05:03:04 +02:00
|
|
|
struct git_transport_data *data = xcalloc(1, sizeof(*data));
|
|
|
|
ret->data = data;
|
2017-12-14 22:44:45 +01:00
|
|
|
ret->vtable = &builtin_smart_vtable;
|
2009-12-09 16:26:30 +01:00
|
|
|
ret->smart_options = &(data->options);
|
2007-09-19 06:49:31 +02:00
|
|
|
|
2008-02-04 19:26:23 +01:00
|
|
|
data->conn = NULL;
|
2009-12-09 16:26:31 +01:00
|
|
|
data->got_remote_heads = 0;
|
2009-12-09 16:26:29 +01:00
|
|
|
} else {
|
|
|
|
/* Unknown protocol in URL. Pass to external handler. */
|
|
|
|
int len = external_specification_len(url);
|
2014-12-24 01:18:31 +01:00
|
|
|
char *handler = xmemdupz(url, len);
|
2009-12-09 16:26:29 +01:00
|
|
|
transport_helper_init(ret, handler);
|
2007-09-11 05:03:04 +02:00
|
|
|
}
|
2007-09-15 09:23:07 +02:00
|
|
|
|
2009-12-09 16:26:30 +01:00
|
|
|
if (ret->smart_options) {
|
|
|
|
ret->smart_options->thin = 1;
|
|
|
|
ret->smart_options->uploadpack = "git-upload-pack";
|
|
|
|
if (remote->uploadpack)
|
|
|
|
ret->smart_options->uploadpack = remote->uploadpack;
|
|
|
|
ret->smart_options->receivepack = "git-receive-pack";
|
|
|
|
if (remote->receivepack)
|
|
|
|
ret->smart_options->receivepack = remote->receivepack;
|
|
|
|
}
|
|
|
|
|
2007-09-11 05:03:04 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int transport_set_option(struct transport *transport,
|
|
|
|
const char *name, const char *value)
|
|
|
|
{
|
2009-12-09 16:26:30 +01:00
|
|
|
int git_reports = 1, protocol_reports = 1;
|
|
|
|
|
|
|
|
if (transport->smart_options)
|
|
|
|
git_reports = set_git_option(transport->smart_options,
|
|
|
|
name, value);
|
|
|
|
|
2017-12-14 22:44:45 +01:00
|
|
|
if (transport->vtable->set_option)
|
|
|
|
protocol_reports = transport->vtable->set_option(transport,
|
|
|
|
name, value);
|
2009-12-09 16:26:30 +01:00
|
|
|
|
|
|
|
/* If either report is 0, report 0 (success). */
|
|
|
|
if (!git_reports || !protocol_reports)
|
|
|
|
return 0;
|
|
|
|
/* If either reports -1 (invalid value), report -1. */
|
|
|
|
if ((git_reports == -1) || (protocol_reports == -1))
|
|
|
|
return -1;
|
|
|
|
/* Otherwise if both report unknown, report unknown. */
|
2007-09-18 10:54:57 +02:00
|
|
|
return 1;
|
2007-09-11 05:03:04 +02:00
|
|
|
}
|
|
|
|
|
2010-02-24 13:50:26 +01:00
|
|
|
void transport_set_verbosity(struct transport *transport, int verbosity,
|
|
|
|
int force_progress)
|
2010-02-24 13:50:23 +01:00
|
|
|
{
|
make "git push -v" actually verbose
Providing a single "-v" to "git push" currently does
nothing. Giving two flags ("git push -v -v") turns on the
first level of verbosity.
This is caused by a regression introduced in 8afd8dc (push:
support multiple levels of verbosity, 2010-02-24). Before
the series containing 8afd8dc, the verbosity handling for
fetching and pushing was completely separate. Commit bde873c
refactored the verbosity handling out of the fetch side, and
then 8afd8dc converted push to use the refactored code.
However, the fetch and push sides numbered and passed along
their verbosity levels differently. For both, a verbosity
level of "-1" meant "quiet", and "0" meant "default output".
But from there they differed.
For fetch, a verbosity level of "1" indicated to the "fetch"
program that it should make the status table slightly more
verbose, showing up-to-date entries. A verbosity level of
"2" meant that we should pass a verbose flag to the
transport; in the case of fetch-pack, this displays protocol
debugging information.
As a result, the refactored code in bde873c checks for
"verbosity >= 2", and only then passes it on to the
transport. From the transport code's perspective, a
verbosity of 0 or 1 both meant "0".
Push, on the other hand, does not show its own status table;
that is always handled by the transport layer or below
(originally send-pack itself, but these days it is done by
the transport code). So a verbosity level of 1 meant that we
should pass the verbose flag to send-pack, so that it knows
we want a verbose status table. However, once 8afd8dc
switched it to the refactored fetch code, a verbosity level
of 1 was now being ignored. Thus, you needed to
artificially bump the verbosity to 2 (via "-v -v") to have
any effect.
We can fix this by letting the transport code know about the
true verbosity level (i.e., let it distinguish level 0 or
1).
We then have to also make an adjustment to any transport
methods that assumed "verbose > 0" meant they could spew
lots of debugging information. Before, they could only get
"0" or "2", but now they will also receive "1". They need to
adjust their condition for turning on such spew from
"verbose > 0" to "verbose > 1".
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-12-17 10:37:15 +01:00
|
|
|
if (verbosity >= 1)
|
2010-02-24 13:50:23 +01:00
|
|
|
transport->verbose = verbosity <= 3 ? verbosity : 3;
|
|
|
|
if (verbosity < 0)
|
|
|
|
transport->verbose = -1;
|
2010-02-24 13:50:26 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Rules used to determine whether to report progress (processing aborts
|
|
|
|
* when a rule is satisfied):
|
|
|
|
*
|
2012-02-13 21:17:15 +01:00
|
|
|
* . Report progress, if force_progress is 1 (ie. --progress).
|
|
|
|
* . Don't report progress, if force_progress is 0 (ie. --no-progress).
|
|
|
|
* . Don't report progress, if verbosity < 0 (ie. -q/--quiet ).
|
|
|
|
* . Report progress if isatty(2) is 1.
|
2010-02-24 13:50:26 +01:00
|
|
|
**/
|
2012-02-13 21:17:15 +01:00
|
|
|
if (force_progress >= 0)
|
|
|
|
transport->progress = !!force_progress;
|
|
|
|
else
|
|
|
|
transport->progress = verbosity >= 0 && isatty(2);
|
2010-02-24 13:50:23 +01:00
|
|
|
}
|
|
|
|
|
2012-03-29 09:21:23 +02:00
|
|
|
static void die_with_unpushed_submodules(struct string_list *needs_pushing)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2016-06-17 22:20:53 +02:00
|
|
|
fprintf(stderr, _("The following submodule paths contain changes that can\n"
|
|
|
|
"not be found on any remote:\n"));
|
2012-03-29 09:21:23 +02:00
|
|
|
for (i = 0; i < needs_pushing->nr; i++)
|
2016-08-23 23:40:08 +02:00
|
|
|
fprintf(stderr, " %s\n", needs_pushing->items[i].string);
|
2016-06-17 22:20:53 +02:00
|
|
|
fprintf(stderr, _("\nPlease try\n\n"
|
|
|
|
" git push --recurse-submodules=on-demand\n\n"
|
|
|
|
"or cd to the path and use\n\n"
|
|
|
|
" git push\n\n"
|
|
|
|
"to push them to a remote.\n\n"));
|
2012-03-29 09:21:23 +02:00
|
|
|
|
|
|
|
string_list_clear(needs_pushing, 0);
|
|
|
|
|
2016-06-17 22:20:53 +02:00
|
|
|
die(_("Aborting."));
|
2012-03-29 09:21:23 +02:00
|
|
|
}
|
|
|
|
|
2013-01-13 06:17:03 +01:00
|
|
|
static int run_pre_push_hook(struct transport *transport,
|
|
|
|
struct ref *remote_refs)
|
|
|
|
{
|
|
|
|
int ret = 0, x;
|
|
|
|
struct ref *r;
|
2014-08-19 21:09:35 +02:00
|
|
|
struct child_process proc = CHILD_PROCESS_INIT;
|
2013-01-13 06:17:03 +01:00
|
|
|
struct strbuf buf;
|
|
|
|
const char *argv[4];
|
|
|
|
|
|
|
|
if (!(argv[0] = find_hook("pre-push")))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
argv[1] = transport->remote->name;
|
|
|
|
argv[2] = transport->url;
|
|
|
|
argv[3] = NULL;
|
|
|
|
|
|
|
|
proc.argv = argv;
|
|
|
|
proc.in = -1;
|
|
|
|
|
|
|
|
if (start_command(&proc)) {
|
|
|
|
finish_command(&proc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-11-16 09:05:58 +01:00
|
|
|
sigchain_push(SIGPIPE, SIG_IGN);
|
|
|
|
|
2013-01-13 06:17:03 +01:00
|
|
|
strbuf_init(&buf, 256);
|
|
|
|
|
|
|
|
for (r = remote_refs; r; r = r->next) {
|
|
|
|
if (!r->peer_ref) continue;
|
|
|
|
if (r->status == REF_STATUS_REJECT_NONFASTFORWARD) continue;
|
2013-07-08 23:42:40 +02:00
|
|
|
if (r->status == REF_STATUS_REJECT_STALE) continue;
|
2013-01-13 06:17:03 +01:00
|
|
|
if (r->status == REF_STATUS_UPTODATE) continue;
|
|
|
|
|
|
|
|
strbuf_reset(&buf);
|
|
|
|
strbuf_addf( &buf, "%s %s %s %s\n",
|
2015-11-10 03:22:20 +01:00
|
|
|
r->peer_ref->name, oid_to_hex(&r->new_oid),
|
|
|
|
r->name, oid_to_hex(&r->old_oid));
|
2013-01-13 06:17:03 +01:00
|
|
|
|
2015-11-16 09:05:58 +01:00
|
|
|
if (write_in_full(proc.in, buf.buf, buf.len) < 0) {
|
|
|
|
/* We do not mind if a hook does not read all refs. */
|
|
|
|
if (errno != EPIPE)
|
|
|
|
ret = -1;
|
2013-01-13 06:17:03 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
strbuf_release(&buf);
|
|
|
|
|
|
|
|
x = close(proc.in);
|
|
|
|
if (!ret)
|
|
|
|
ret = x;
|
|
|
|
|
2015-11-16 09:05:58 +01:00
|
|
|
sigchain_pop(SIGPIPE);
|
|
|
|
|
2013-01-13 06:17:03 +01:00
|
|
|
x = finish_command(&proc);
|
|
|
|
if (!ret)
|
|
|
|
ret = x;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-09-11 05:03:04 +02:00
|
|
|
int transport_push(struct transport *transport,
|
2009-08-08 09:51:08 +02:00
|
|
|
int refspec_nr, const char **refspec, int flags,
|
2012-11-30 02:41:33 +01:00
|
|
|
unsigned int *reject_reasons)
|
2007-09-11 05:03:04 +02:00
|
|
|
{
|
2012-11-30 02:41:33 +01:00
|
|
|
*reject_reasons = 0;
|
2010-02-17 00:42:52 +01:00
|
|
|
transport_verify_remote_names(refspec_nr, refspec);
|
2009-03-09 02:06:07 +01:00
|
|
|
|
2017-12-14 22:44:45 +01:00
|
|
|
if (transport->vtable->push_refs) {
|
2014-03-05 20:04:54 +01:00
|
|
|
struct ref *remote_refs;
|
2009-03-09 02:06:07 +01:00
|
|
|
struct ref *local_refs = get_local_heads();
|
|
|
|
int match_flags = MATCH_REFS_NONE;
|
2010-02-24 13:50:24 +01:00
|
|
|
int verbose = (transport->verbose > 0);
|
|
|
|
int quiet = (transport->verbose < 0);
|
2009-06-23 03:10:01 +02:00
|
|
|
int porcelain = flags & TRANSPORT_PUSH_PORCELAIN;
|
2010-01-16 22:45:31 +01:00
|
|
|
int pretend = flags & TRANSPORT_PUSH_DRY_RUN;
|
2010-02-27 05:52:15 +01:00
|
|
|
int push_ret, ret, err;
|
2009-03-09 02:06:07 +01:00
|
|
|
|
2014-03-05 20:04:54 +01:00
|
|
|
if (check_push_refs(local_refs, refspec_nr, refspec) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-12-14 22:44:45 +01:00
|
|
|
remote_refs = transport->vtable->get_refs_list(transport, 1);
|
2014-03-05 20:04:54 +01:00
|
|
|
|
2009-03-09 02:06:07 +01:00
|
|
|
if (flags & TRANSPORT_PUSH_ALL)
|
|
|
|
match_flags |= MATCH_REFS_ALL;
|
|
|
|
if (flags & TRANSPORT_PUSH_MIRROR)
|
|
|
|
match_flags |= MATCH_REFS_MIRROR;
|
2012-02-22 23:43:41 +01:00
|
|
|
if (flags & TRANSPORT_PUSH_PRUNE)
|
|
|
|
match_flags |= MATCH_REFS_PRUNE;
|
2013-03-04 21:09:50 +01:00
|
|
|
if (flags & TRANSPORT_PUSH_FOLLOW_TAGS)
|
|
|
|
match_flags |= MATCH_REFS_FOLLOW_TAGS;
|
2009-03-09 02:06:07 +01:00
|
|
|
|
2011-09-09 20:54:58 +02:00
|
|
|
if (match_push_refs(local_refs, &remote_refs,
|
|
|
|
refspec_nr, refspec, match_flags)) {
|
2009-03-09 02:06:07 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-07-09 20:01:06 +02:00
|
|
|
if (transport->smart_options &&
|
|
|
|
transport->smart_options->cas &&
|
|
|
|
!is_empty_cas(transport->smart_options->cas))
|
|
|
|
apply_push_cas(transport->smart_options->cas,
|
|
|
|
transport->remote, remote_refs);
|
|
|
|
|
2010-01-08 03:12:42 +01:00
|
|
|
set_ref_status_for_push(remote_refs,
|
|
|
|
flags & TRANSPORT_PUSH_MIRROR,
|
|
|
|
flags & TRANSPORT_PUSH_FORCE);
|
|
|
|
|
2013-01-13 06:17:03 +01:00
|
|
|
if (!(flags & TRANSPORT_PUSH_NO_HOOK))
|
|
|
|
if (run_pre_push_hook(transport, remote_refs))
|
|
|
|
return -1;
|
|
|
|
|
2016-12-19 19:25:33 +01:00
|
|
|
if ((flags & (TRANSPORT_RECURSE_SUBMODULES_ON_DEMAND |
|
|
|
|
TRANSPORT_RECURSE_SUBMODULES_ONLY)) &&
|
|
|
|
!is_bare_repository()) {
|
2011-08-20 00:08:47 +02:00
|
|
|
struct ref *ref = remote_refs;
|
2017-03-31 03:40:00 +02:00
|
|
|
struct oid_array commits = OID_ARRAY_INIT;
|
2016-11-16 16:11:05 +01:00
|
|
|
|
2011-08-20 00:08:47 +02:00
|
|
|
for (; ref; ref = ref->next)
|
2016-11-16 16:11:05 +01:00
|
|
|
if (!is_null_oid(&ref->new_oid))
|
2017-03-31 03:40:00 +02:00
|
|
|
oid_array_append(&commits,
|
2017-03-31 03:39:56 +02:00
|
|
|
&ref->new_oid);
|
2016-11-16 16:11:05 +01:00
|
|
|
|
2016-11-17 19:46:04 +01:00
|
|
|
if (!push_unpushed_submodules(&commits,
|
2017-04-05 19:47:19 +02:00
|
|
|
transport->remote,
|
|
|
|
refspec, refspec_nr,
|
2017-04-05 19:47:16 +02:00
|
|
|
transport->push_options,
|
2016-11-17 19:46:04 +01:00
|
|
|
pretend)) {
|
2017-03-31 03:40:00 +02:00
|
|
|
oid_array_clear(&commits);
|
2016-11-16 16:11:05 +01:00
|
|
|
die("Failed to push all needed submodules!");
|
|
|
|
}
|
2017-03-31 03:40:00 +02:00
|
|
|
oid_array_clear(&commits);
|
2012-03-29 09:21:24 +02:00
|
|
|
}
|
|
|
|
|
2016-11-17 19:46:04 +01:00
|
|
|
if (((flags & TRANSPORT_RECURSE_SUBMODULES_CHECK) ||
|
2016-12-19 19:25:33 +01:00
|
|
|
((flags & (TRANSPORT_RECURSE_SUBMODULES_ON_DEMAND |
|
|
|
|
TRANSPORT_RECURSE_SUBMODULES_ONLY)) &&
|
2016-11-17 19:46:04 +01:00
|
|
|
!pretend)) && !is_bare_repository()) {
|
2011-08-20 00:08:47 +02:00
|
|
|
struct ref *ref = remote_refs;
|
2014-07-18 11:19:00 +02:00
|
|
|
struct string_list needs_pushing = STRING_LIST_INIT_DUP;
|
2017-03-31 03:40:00 +02:00
|
|
|
struct oid_array commits = OID_ARRAY_INIT;
|
2012-03-29 09:21:23 +02:00
|
|
|
|
2011-08-20 00:08:47 +02:00
|
|
|
for (; ref; ref = ref->next)
|
2016-11-16 16:11:05 +01:00
|
|
|
if (!is_null_oid(&ref->new_oid))
|
2017-03-31 03:40:00 +02:00
|
|
|
oid_array_append(&commits,
|
2017-03-31 03:39:56 +02:00
|
|
|
&ref->new_oid);
|
2016-11-16 16:11:05 +01:00
|
|
|
|
|
|
|
if (find_unpushed_submodules(&commits, transport->remote->name,
|
|
|
|
&needs_pushing)) {
|
2017-03-31 03:40:00 +02:00
|
|
|
oid_array_clear(&commits);
|
2016-11-16 16:11:05 +01:00
|
|
|
die_with_unpushed_submodules(&needs_pushing);
|
|
|
|
}
|
|
|
|
string_list_clear(&needs_pushing, 0);
|
2017-03-31 03:40:00 +02:00
|
|
|
oid_array_clear(&commits);
|
2011-08-20 00:08:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-19 19:25:33 +01:00
|
|
|
if (!(flags & TRANSPORT_RECURSE_SUBMODULES_ONLY))
|
2017-12-14 22:44:45 +01:00
|
|
|
push_ret = transport->vtable->push_refs(transport, remote_refs, flags);
|
2016-12-19 19:25:33 +01:00
|
|
|
else
|
|
|
|
push_ret = 0;
|
2010-01-08 03:12:43 +01:00
|
|
|
err = push_had_errors(remote_refs);
|
2010-02-27 05:52:15 +01:00
|
|
|
ret = push_ret | err;
|
2009-03-09 02:06:07 +01:00
|
|
|
|
2010-01-08 03:12:43 +01:00
|
|
|
if (!quiet || err)
|
2010-02-17 00:42:52 +01:00
|
|
|
transport_print_push_status(transport->url, remote_refs,
|
2009-08-13 01:36:04 +02:00
|
|
|
verbose | porcelain, porcelain,
|
2012-11-30 02:41:33 +01:00
|
|
|
reject_reasons);
|
2009-03-09 02:06:07 +01:00
|
|
|
|
2010-01-16 22:45:31 +01:00
|
|
|
if (flags & TRANSPORT_PUSH_SET_UPSTREAM)
|
|
|
|
set_upstreams(transport, remote_refs, pretend);
|
|
|
|
|
2016-12-19 19:25:33 +01:00
|
|
|
if (!(flags & (TRANSPORT_PUSH_DRY_RUN |
|
|
|
|
TRANSPORT_RECURSE_SUBMODULES_ONLY))) {
|
2009-03-09 02:06:07 +01:00
|
|
|
struct ref *ref;
|
|
|
|
for (ref = remote_refs; ref; ref = ref->next)
|
2010-02-17 00:42:52 +01:00
|
|
|
transport_update_tracking_ref(transport->remote, ref, verbose);
|
2009-03-09 02:06:07 +01:00
|
|
|
}
|
|
|
|
|
2010-02-27 05:52:15 +01:00
|
|
|
if (porcelain && !push_ret)
|
|
|
|
puts("Done");
|
2010-03-15 08:58:24 +01:00
|
|
|
else if (!quiet && !ret && !transport_refs_pushed(remote_refs))
|
2009-03-09 02:06:07 +01:00
|
|
|
fprintf(stderr, "Everything up-to-date\n");
|
2010-02-27 05:52:15 +01:00
|
|
|
|
2009-03-09 02:06:07 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 1;
|
2007-09-11 05:03:04 +02:00
|
|
|
}
|
|
|
|
|
2007-10-30 02:05:40 +01:00
|
|
|
const struct ref *transport_get_remote_refs(struct transport *transport)
|
2007-09-11 05:03:11 +02:00
|
|
|
{
|
2010-02-16 08:18:21 +01:00
|
|
|
if (!transport->got_remote_refs) {
|
2017-12-14 22:44:45 +01:00
|
|
|
transport->remote_refs = transport->vtable->get_refs_list(transport, 0);
|
2010-02-16 08:18:21 +01:00
|
|
|
transport->got_remote_refs = 1;
|
|
|
|
}
|
2009-12-09 16:26:31 +01:00
|
|
|
|
2007-09-11 05:03:11 +02:00
|
|
|
return transport->remote_refs;
|
|
|
|
}
|
|
|
|
|
2009-11-18 02:42:24 +01:00
|
|
|
int transport_fetch_refs(struct transport *transport, struct ref *refs)
|
2007-09-11 05:03:11 +02:00
|
|
|
{
|
2007-09-14 09:31:21 +02:00
|
|
|
int rc;
|
2009-08-24 06:04:09 +02:00
|
|
|
int nr_heads = 0, nr_alloc = 0, nr_refs = 0;
|
2009-11-18 02:42:24 +01:00
|
|
|
struct ref **heads = NULL;
|
|
|
|
struct ref *rm;
|
2007-09-11 05:03:11 +02:00
|
|
|
|
|
|
|
for (rm = refs; rm; rm = rm->next) {
|
2009-08-24 06:04:09 +02:00
|
|
|
nr_refs++;
|
2007-09-11 05:03:11 +02:00
|
|
|
if (rm->peer_ref &&
|
2015-11-10 03:22:20 +01:00
|
|
|
!is_null_oid(&rm->old_oid) &&
|
|
|
|
!oidcmp(&rm->peer_ref->old_oid, &rm->old_oid))
|
2007-09-11 05:03:11 +02:00
|
|
|
continue;
|
2007-09-14 09:31:18 +02:00
|
|
|
ALLOC_GROW(heads, nr_heads + 1, nr_alloc);
|
2007-09-14 09:31:21 +02:00
|
|
|
heads[nr_heads++] = rm;
|
2007-09-11 05:03:11 +02:00
|
|
|
}
|
|
|
|
|
2009-08-24 06:04:09 +02:00
|
|
|
if (!nr_heads) {
|
|
|
|
/*
|
|
|
|
* When deepening of a shallow repository is requested,
|
|
|
|
* then local and remote refs are likely to still be equal.
|
|
|
|
* Just feed them all to the fetch method in that case.
|
|
|
|
* This condition shouldn't be met in a non-deepening fetch
|
2013-06-18 19:44:58 +02:00
|
|
|
* (see builtin/fetch.c:quickfetch()).
|
2009-08-24 06:04:09 +02:00
|
|
|
*/
|
2016-02-22 23:44:25 +01:00
|
|
|
ALLOC_ARRAY(heads, nr_refs);
|
2009-08-24 06:04:09 +02:00
|
|
|
for (rm = refs; rm; rm = rm->next)
|
|
|
|
heads[nr_heads++] = rm;
|
|
|
|
}
|
|
|
|
|
2017-12-14 22:44:45 +01:00
|
|
|
rc = transport->vtable->fetch(transport, nr_heads, heads);
|
2009-12-09 16:26:31 +01:00
|
|
|
|
2007-09-11 05:03:11 +02:00
|
|
|
free(heads);
|
2007-09-14 09:31:21 +02:00
|
|
|
return rc;
|
2007-09-11 05:03:11 +02:00
|
|
|
}
|
|
|
|
|
2007-09-14 09:31:23 +02:00
|
|
|
void transport_unlock_pack(struct transport *transport)
|
|
|
|
{
|
|
|
|
if (transport->pack_lockfile) {
|
2009-04-29 23:22:56 +02:00
|
|
|
unlink_or_warn(transport->pack_lockfile);
|
2017-06-16 01:15:46 +02:00
|
|
|
FREE_AND_NULL(transport->pack_lockfile);
|
2007-09-14 09:31:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-09 16:26:33 +01:00
|
|
|
int transport_connect(struct transport *transport, const char *name,
|
|
|
|
const char *exec, int fd[2])
|
|
|
|
{
|
2017-12-14 22:44:45 +01:00
|
|
|
if (transport->vtable->connect)
|
|
|
|
return transport->vtable->connect(transport, name, exec, fd);
|
2009-12-09 16:26:33 +01:00
|
|
|
else
|
|
|
|
die("Operation not supported by protocol");
|
|
|
|
}
|
|
|
|
|
2007-09-11 05:03:04 +02:00
|
|
|
int transport_disconnect(struct transport *transport)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
2017-12-14 22:44:45 +01:00
|
|
|
if (transport->vtable->disconnect)
|
|
|
|
ret = transport->vtable->disconnect(transport);
|
2007-09-11 05:03:04 +02:00
|
|
|
free(transport);
|
|
|
|
return ret;
|
|
|
|
}
|
2009-04-17 10:20:11 +02:00
|
|
|
|
|
|
|
/*
|
2012-03-28 10:41:54 +02:00
|
|
|
* Strip username (and password) from a URL and return
|
2009-04-17 10:20:11 +02:00
|
|
|
* it in a newly allocated string.
|
|
|
|
*/
|
|
|
|
char *transport_anonymize_url(const char *url)
|
|
|
|
{
|
2016-02-22 23:45:05 +01:00
|
|
|
char *scheme_prefix, *anon_part;
|
2009-04-17 10:20:11 +02:00
|
|
|
size_t anon_len, prefix_len = 0;
|
|
|
|
|
|
|
|
anon_part = strchr(url, '@');
|
2013-11-28 20:50:03 +01:00
|
|
|
if (url_is_local_not_ssh(url) || !anon_part)
|
2009-04-17 10:20:11 +02:00
|
|
|
goto literal_copy;
|
|
|
|
|
|
|
|
anon_len = strlen(++anon_part);
|
|
|
|
scheme_prefix = strstr(url, "://");
|
|
|
|
if (!scheme_prefix) {
|
|
|
|
if (!strchr(anon_part, ':'))
|
|
|
|
/* cannot be "me@there:/path/name" */
|
|
|
|
goto literal_copy;
|
|
|
|
} else {
|
|
|
|
const char *cp;
|
|
|
|
/* make sure scheme is reasonable */
|
|
|
|
for (cp = url; cp < scheme_prefix; cp++) {
|
|
|
|
switch (*cp) {
|
|
|
|
/* RFC 1738 2.1 */
|
|
|
|
case '+': case '.': case '-':
|
|
|
|
break; /* ok */
|
|
|
|
default:
|
|
|
|
if (isalnum(*cp))
|
|
|
|
break;
|
|
|
|
/* it isn't */
|
|
|
|
goto literal_copy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* @ past the first slash does not count */
|
|
|
|
cp = strchr(scheme_prefix + 3, '/');
|
|
|
|
if (cp && cp < anon_part)
|
|
|
|
goto literal_copy;
|
|
|
|
prefix_len = scheme_prefix - url + 3;
|
|
|
|
}
|
2016-02-22 23:45:05 +01:00
|
|
|
return xstrfmt("%.*s%.*s", (int)prefix_len, url,
|
|
|
|
(int)anon_len, anon_part);
|
2009-04-17 10:20:11 +02:00
|
|
|
literal_copy:
|
|
|
|
return xstrdup(url);
|
|
|
|
}
|
2011-03-11 20:32:53 +01:00
|
|
|
|
for_each_alternate_ref: replace transport code with for-each-ref
The current method for getting the refs from an alternate is
to run upload-pack in the alternate and parse its output
using the normal transport code. This works and is
reasonably short, but it has a very bad memory footprint
when there are a lot of refs in the alternate. There are two
problems:
1. It reads in all of the refs before passing any back to
us. Which means that our peak memory usage has to store
every ref (including duplicates for peeled variants),
even if our callback could determine that some are not
interesting (e.g., because they point to the same sha1
as another ref).
2. It allocates a "struct ref" for each one. Among other
things, this contains 3 separate 20-byte oids, along
with the name and various pointers. That can add up,
especially if the callback is only interested in the
sha1 (which it can store in a sha1_array as just 20
bytes).
On a particularly pathological case, where the alternate had
over 80 million refs pointing to only around 60,000 unique
objects, the peak heap usage of "git clone --reference" grew
to over 25GB.
This patch instead calls git-for-each-ref in the alternate
repository, and passes each line to the callback as we read
it. That drops the peak heap of the same command to 50MB.
I considered and rejected a few alternatives.
We could read all of the refs in the alternate using our own
ref code, just as we do with submodules. However, as memory
footprint is one of the concerns here, we want to avoid
loading those refs into our own memory as a whole.
It's possible that this will be a better technique in the
future when the ref code can more easily iterate without
loading all of packed-refs into memory.
Another option is to keep calling upload-pack, and just
parse its output ourselves in a streaming fashion. Besides
for-each-ref being simpler (we get to define the format
ourselves, and don't have to deal with speaking the git
protocol), it's more flexible for possible future changes.
For instance, it might be useful for the caller to be able
to limit the set of "interesting" alternate refs. The
motivating example is one where many "forks" of a particular
repository share object storage, and the shared storage has
refs for each fork (which is why so many of the refs are
duplicates; each fork has the same tags). A plausible
future optimization would be to ask for the alternate refs
for just _one_ fork (if you had some out-of-band way of
knowing which was the most interesting or important for the
current operation).
Similarly, no callbacks actually care about the symref value
of alternate refs, and as before, this patch ignores them
entirely. However, if we wanted to add them, for-each-ref's
"%(symref)" is going to be more flexible than upload-pack,
because the latter only handles the HEAD symref due to
historical constraints.
There is one potential downside, though: unlike upload-pack,
our for-each-ref command doesn't report the peeled value of
refs. The existing code calls the alternate_ref_fn callback
twice for tags: once for the tag, and once for the peeled
value with the refname set to "ref^{}".
For the callers in fetch-pack, this doesn't matter at all.
We immediately peel each tag down to a commit either way (so
there's a slight improvement, as do not bother passing the
redundant data over the pipe). For the caller in
receive-pack, it means we will not advertise the peeled
values of tags in our alternate. However, we also don't
advertise peeled values for our _own_ tags, so this is
actually making things more consistent.
It's unclear whether receive-pack advertising peeled values
is a win or not. On one hand, giving more information to the
other side may let it omit some objects from the push. On
the other hand, for tags which both sides have, they simply
bloat the advertisement. The upload-pack advertisement of
git.git is about 30% larger than the receive-pack
advertisement due to its peeled information.
This patch omits the peeled information from
for_each_alternate_ref entirely, and leaves it up to the
caller whether they want to dig up the information.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-02-08 21:53:00 +01:00
|
|
|
static void read_alternate_refs(const char *path,
|
|
|
|
alternate_ref_fn *cb,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct child_process cmd = CHILD_PROCESS_INIT;
|
|
|
|
struct strbuf line = STRBUF_INIT;
|
|
|
|
FILE *fh;
|
|
|
|
|
|
|
|
cmd.git_cmd = 1;
|
|
|
|
argv_array_pushf(&cmd.args, "--git-dir=%s", path);
|
|
|
|
argv_array_push(&cmd.args, "for-each-ref");
|
|
|
|
argv_array_push(&cmd.args, "--format=%(objectname) %(refname)");
|
|
|
|
cmd.env = local_repo_env;
|
|
|
|
cmd.out = -1;
|
|
|
|
|
|
|
|
if (start_command(&cmd))
|
|
|
|
return;
|
|
|
|
|
|
|
|
fh = xfdopen(cmd.out, "r");
|
|
|
|
while (strbuf_getline_lf(&line, fh) != EOF) {
|
|
|
|
struct object_id oid;
|
|
|
|
|
|
|
|
if (get_oid_hex(line.buf, &oid) ||
|
|
|
|
line.buf[GIT_SHA1_HEXSZ] != ' ') {
|
|
|
|
warning("invalid line while parsing alternate refs: %s",
|
|
|
|
line.buf);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cb(line.buf + GIT_SHA1_HEXSZ + 1, &oid, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
fclose(fh);
|
|
|
|
finish_command(&cmd);
|
|
|
|
}
|
|
|
|
|
refactor refs_from_alternate_cb to allow passing extra data
The foreach_alt_odb function triggers a callback for each
alternate object db we have, with room for a single void
pointer as data. Currently, we always call refs_from_alternate_cb
as the callback function, and then pass another callback (to
receive each ref individually) as the void pointer.
This has two problems:
1. C technically forbids stuffing a function pointer into
a "void *". In practice, this probably doesn't matter
on any architectures git runs on, but it never hurts to
follow the letter of the law.
2. There is no room for an extra data pointer. Indeed, the
alternate_ref_fn that refs_from_alternate_cb calls
takes a void* for data, but we always pass it NULL.
Instead, let's properly stuff our function pointer into a
data struct, which also leaves room for an extra
caller-supplied data pointer. And to keep things simple for
existing callers, let's make a for_each_alternate_ref
function that takes care of creating the extra struct.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-05-19 23:33:17 +02:00
|
|
|
struct alternate_refs_data {
|
|
|
|
alternate_ref_fn *fn;
|
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int refs_from_alternate_cb(struct alternate_object_database *e,
|
|
|
|
void *data)
|
2011-03-11 20:32:53 +01:00
|
|
|
{
|
2017-02-08 21:52:54 +01:00
|
|
|
struct strbuf path = STRBUF_INIT;
|
|
|
|
size_t base_len;
|
refactor refs_from_alternate_cb to allow passing extra data
The foreach_alt_odb function triggers a callback for each
alternate object db we have, with room for a single void
pointer as data. Currently, we always call refs_from_alternate_cb
as the callback function, and then pass another callback (to
receive each ref individually) as the void pointer.
This has two problems:
1. C technically forbids stuffing a function pointer into
a "void *". In practice, this probably doesn't matter
on any architectures git runs on, but it never hurts to
follow the letter of the law.
2. There is no room for an extra data pointer. Indeed, the
alternate_ref_fn that refs_from_alternate_cb calls
takes a void* for data, but we always pass it NULL.
Instead, let's properly stuff our function pointer into a
data struct, which also leaves room for an extra
caller-supplied data pointer. And to keep things simple for
existing callers, let's make a for_each_alternate_ref
function that takes care of creating the extra struct.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-05-19 23:33:17 +02:00
|
|
|
struct alternate_refs_data *cb = data;
|
2011-03-11 20:32:53 +01:00
|
|
|
|
2017-02-08 21:52:54 +01:00
|
|
|
if (!strbuf_realpath(&path, e->path, 0))
|
|
|
|
goto out;
|
|
|
|
if (!strbuf_strip_suffix(&path, "/objects"))
|
2014-07-24 06:41:30 +02:00
|
|
|
goto out;
|
2017-02-08 21:52:54 +01:00
|
|
|
base_len = path.len;
|
|
|
|
|
2011-03-11 20:32:53 +01:00
|
|
|
/* Is this a git repository with refs? */
|
2017-02-08 21:52:54 +01:00
|
|
|
strbuf_addstr(&path, "/refs");
|
|
|
|
if (!is_directory(path.buf))
|
2014-07-24 06:41:30 +02:00
|
|
|
goto out;
|
2017-02-08 21:52:54 +01:00
|
|
|
strbuf_setlen(&path, base_len);
|
|
|
|
|
for_each_alternate_ref: replace transport code with for-each-ref
The current method for getting the refs from an alternate is
to run upload-pack in the alternate and parse its output
using the normal transport code. This works and is
reasonably short, but it has a very bad memory footprint
when there are a lot of refs in the alternate. There are two
problems:
1. It reads in all of the refs before passing any back to
us. Which means that our peak memory usage has to store
every ref (including duplicates for peeled variants),
even if our callback could determine that some are not
interesting (e.g., because they point to the same sha1
as another ref).
2. It allocates a "struct ref" for each one. Among other
things, this contains 3 separate 20-byte oids, along
with the name and various pointers. That can add up,
especially if the callback is only interested in the
sha1 (which it can store in a sha1_array as just 20
bytes).
On a particularly pathological case, where the alternate had
over 80 million refs pointing to only around 60,000 unique
objects, the peak heap usage of "git clone --reference" grew
to over 25GB.
This patch instead calls git-for-each-ref in the alternate
repository, and passes each line to the callback as we read
it. That drops the peak heap of the same command to 50MB.
I considered and rejected a few alternatives.
We could read all of the refs in the alternate using our own
ref code, just as we do with submodules. However, as memory
footprint is one of the concerns here, we want to avoid
loading those refs into our own memory as a whole.
It's possible that this will be a better technique in the
future when the ref code can more easily iterate without
loading all of packed-refs into memory.
Another option is to keep calling upload-pack, and just
parse its output ourselves in a streaming fashion. Besides
for-each-ref being simpler (we get to define the format
ourselves, and don't have to deal with speaking the git
protocol), it's more flexible for possible future changes.
For instance, it might be useful for the caller to be able
to limit the set of "interesting" alternate refs. The
motivating example is one where many "forks" of a particular
repository share object storage, and the shared storage has
refs for each fork (which is why so many of the refs are
duplicates; each fork has the same tags). A plausible
future optimization would be to ask for the alternate refs
for just _one_ fork (if you had some out-of-band way of
knowing which was the most interesting or important for the
current operation).
Similarly, no callbacks actually care about the symref value
of alternate refs, and as before, this patch ignores them
entirely. However, if we wanted to add them, for-each-ref's
"%(symref)" is going to be more flexible than upload-pack,
because the latter only handles the HEAD symref due to
historical constraints.
There is one potential downside, though: unlike upload-pack,
our for-each-ref command doesn't report the peeled value of
refs. The existing code calls the alternate_ref_fn callback
twice for tags: once for the tag, and once for the peeled
value with the refname set to "ref^{}".
For the callers in fetch-pack, this doesn't matter at all.
We immediately peel each tag down to a commit either way (so
there's a slight improvement, as do not bother passing the
redundant data over the pipe). For the caller in
receive-pack, it means we will not advertise the peeled
values of tags in our alternate. However, we also don't
advertise peeled values for our _own_ tags, so this is
actually making things more consistent.
It's unclear whether receive-pack advertising peeled values
is a win or not. On one hand, giving more information to the
other side may let it omit some objects from the push. On
the other hand, for tags which both sides have, they simply
bloat the advertisement. The upload-pack advertisement of
git.git is about 30% larger than the receive-pack
advertisement due to its peeled information.
This patch omits the peeled information from
for_each_alternate_ref entirely, and leaves it up to the
caller whether they want to dig up the information.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-02-08 21:53:00 +01:00
|
|
|
read_alternate_refs(path.buf, cb->fn, cb->data);
|
|
|
|
|
2014-07-24 06:41:30 +02:00
|
|
|
out:
|
2017-02-08 21:52:54 +01:00
|
|
|
strbuf_release(&path);
|
2011-03-11 20:32:53 +01:00
|
|
|
return 0;
|
|
|
|
}
|
refactor refs_from_alternate_cb to allow passing extra data
The foreach_alt_odb function triggers a callback for each
alternate object db we have, with room for a single void
pointer as data. Currently, we always call refs_from_alternate_cb
as the callback function, and then pass another callback (to
receive each ref individually) as the void pointer.
This has two problems:
1. C technically forbids stuffing a function pointer into
a "void *". In practice, this probably doesn't matter
on any architectures git runs on, but it never hurts to
follow the letter of the law.
2. There is no room for an extra data pointer. Indeed, the
alternate_ref_fn that refs_from_alternate_cb calls
takes a void* for data, but we always pass it NULL.
Instead, let's properly stuff our function pointer into a
data struct, which also leaves room for an extra
caller-supplied data pointer. And to keep things simple for
existing callers, let's make a for_each_alternate_ref
function that takes care of creating the extra struct.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-05-19 23:33:17 +02:00
|
|
|
|
|
|
|
void for_each_alternate_ref(alternate_ref_fn fn, void *data)
|
|
|
|
{
|
|
|
|
struct alternate_refs_data cb;
|
|
|
|
cb.fn = fn;
|
|
|
|
cb.data = data;
|
|
|
|
foreach_alt_odb(refs_from_alternate_cb, &cb);
|
|
|
|
}
|