Fix sparse warnings
Fix warnings from 'make check'.
- These files don't include 'builtin.h' causing sparse to complain that
cmd_* isn't declared:
builtin/clone.c:364, builtin/fetch-pack.c:797,
builtin/fmt-merge-msg.c:34, builtin/hash-object.c:78,
builtin/merge-index.c:69, builtin/merge-recursive.c:22
builtin/merge-tree.c:341, builtin/mktag.c:156, builtin/notes.c:426
builtin/notes.c:822, builtin/pack-redundant.c:596,
builtin/pack-refs.c:10, builtin/patch-id.c:60, builtin/patch-id.c:149,
builtin/remote.c:1512, builtin/remote-ext.c:240,
builtin/remote-fd.c:53, builtin/reset.c:236, builtin/send-pack.c:384,
builtin/unpack-file.c:25, builtin/var.c:75
- These files have symbols which should be marked static since they're
only file scope:
submodule.c:12, diff.c:631, replace_object.c:92, submodule.c:13,
submodule.c:14, trace.c:78, transport.c:195, transport-helper.c:79,
unpack-trees.c:19, url.c:3, url.c:18, url.c:104, url.c:117, url.c:123,
url.c:129, url.c:136, thread-utils.c:21, thread-utils.c:48
- These files redeclare symbols to be different types:
builtin/index-pack.c:210, parse-options.c:564, parse-options.c:571,
usage.c:49, usage.c:58, usage.c:63, usage.c:72
- These files use a literal integer 0 when they really should use a NULL
pointer:
daemon.c:663, fast-import.c:2942, imap-send.c:1072, notes-merge.c:362
While we're in the area, clean up some unused #includes in builtin files
(mostly exec_cmd.h).
Signed-off-by: Stephen Boyd <bebarino@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-03-22 08:51:05 +01:00
|
|
|
#include "builtin.h"
|
2005-07-04 22:26:53 +02:00
|
|
|
#include "pkt-line.h"
|
2007-09-11 05:03:00 +02:00
|
|
|
#include "fetch-pack.h"
|
2007-09-19 06:49:35 +02:00
|
|
|
|
2005-08-12 11:08:29 +02:00
|
|
|
static const char fetch_pack_usage[] =
|
fetch-pack: new --stdin option to read refs from stdin
If a remote repo has too many tags (or branches), cloning it over the
smart HTTP transport can fail because remote-curl.c puts all the refs
from the remote repo on the fetch-pack command line. This can make the
command line longer than the global OS command line limit, causing
fetch-pack to fail.
This is especially a problem on Windows where the command line limit is
orders of magnitude shorter than Linux. There are already real repos out
there that msysGit cannot clone over smart HTTP due to this problem.
Here is an easy way to trigger this problem:
git init too-many-refs
cd too-many-refs
echo bla > bla.txt
git add .
git commit -m test
sha=$(git rev-parse HEAD)
tag=$(perl -e 'print "bla" x 30')
for i in `seq 50000`; do
echo $sha refs/tags/$tag-$i >> .git/packed-refs
done
Then share this repo over the smart HTTP protocol and try cloning it:
$ git clone http://localhost/.../too-many-refs/.git
Cloning into 'too-many-refs'...
fatal: cannot exec 'fetch-pack': Argument list too long
50k tags is obviously an absurd number, but it is required to
demonstrate the problem on Linux because it has a much more generous
command line limit. On Windows the clone fails with as little as 500
tags in the above loop, which is getting uncomfortably close to the
number of tags you might see in real long lived repos.
This is not just theoretical, msysGit is already failing to clone our
company repo due to this. It's a large repo converted from CVS, nearly
10 years of history.
Four possible solutions were discussed on the Git mailing list (in no
particular order):
1) Call fetch-pack multiple times with smaller batches of refs.
This was dismissed as inefficient and inelegant.
2) Add option --refs-fd=$n to pass a an fd from where to read the refs.
This was rejected because inheriting descriptors other than
stdin/stdout/stderr through exec() is apparently problematic on Windows,
plus it would require changes to the run-command API to open extra
pipes.
3) Add option --refs-from=$tmpfile to pass the refs using a temp file.
This was not favored because of the temp file requirement.
4) Add option --stdin to pass the refs on stdin, one per line.
In the end this option was chosen as the most efficient and most
desirable from scripting perspective.
There was however a small complication when using stdin to pass refs to
fetch-pack. The --stateless-rpc option to fetch-pack also uses stdin for
communication with the remote server.
If we are going to sneak refs on stdin line by line, it would have to be
done very carefully in the presence of --stateless-rpc, because when
reading refs line by line we might read ahead too much data into our
buffer and eat some of the remote protocol data which is also coming on
stdin.
One way to solve this would be to refactor get_remote_heads() in
fetch-pack.c to accept a residual buffer from our stdin line parsing
above, but this function is used in several places so other callers
would be burdened by this residual buffer interface even when most of
them don't need it.
In the end we settled on the following solution:
If --stdin is specified without --stateless-rpc, fetch-pack would read
the refs from stdin one per line, in a script friendly format.
However if --stdin is specified together with --stateless-rpc,
fetch-pack would read the refs from stdin in packetized format
(pkt-line) with a flush packet terminating the list of refs. This way we
can read the exact number of bytes that we need from stdin, and then
get_remote_heads() can continue reading from the same fd without losing
a single byte of remote protocol data.
This way the --stdin option only loses generality and scriptability when
used together with --stateless-rpc, which is not easily scriptable
anyway because it also uses pkt-line when talking to the remote server.
Signed-off-by: Ivan Todoroski <grnch@gmx.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-04-02 17:13:48 +02:00
|
|
|
"git fetch-pack [--all] [--stdin] [--quiet|-q] [--keep|-k] [--thin] "
|
|
|
|
"[--include-tag] [--upload-pack=<git-upload-pack>] [--depth=<n>] "
|
|
|
|
"[--no-progress] [-v] [<host>:]<directory> [<refs>...]";
|
2005-07-04 22:26:53 +02:00
|
|
|
|
2013-01-29 23:02:15 +01:00
|
|
|
static void add_sought_entry_mem(struct ref ***sought, int *nr, int *alloc,
|
|
|
|
const char *name, int namelen)
|
|
|
|
{
|
|
|
|
struct ref *ref = xcalloc(1, sizeof(*ref) + namelen + 1);
|
|
|
|
|
|
|
|
memcpy(ref->name, name, namelen);
|
|
|
|
ref->name[namelen] = '\0';
|
|
|
|
(*nr)++;
|
|
|
|
ALLOC_GROW(*sought, *nr, *alloc);
|
|
|
|
(*sought)[*nr - 1] = ref;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void add_sought_entry(struct ref ***sought, int *nr, int *alloc,
|
|
|
|
const char *string)
|
|
|
|
{
|
|
|
|
add_sought_entry_mem(sought, nr, alloc, string, strlen(string));
|
|
|
|
}
|
|
|
|
|
2007-09-19 06:49:39 +02:00
|
|
|
int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
|
|
|
|
{
|
2012-05-21 09:59:59 +02:00
|
|
|
int i, ret;
|
2008-02-04 19:26:23 +01:00
|
|
|
struct ref *ref = NULL;
|
2012-05-21 09:59:56 +02:00
|
|
|
const char *dest = NULL;
|
2013-01-29 23:02:15 +01:00
|
|
|
struct ref **sought = NULL;
|
|
|
|
int nr_sought = 0, alloc_sought = 0;
|
2008-02-04 19:26:23 +01:00
|
|
|
int fd[2];
|
2009-10-31 01:47:42 +01:00
|
|
|
char *pack_lockfile = NULL;
|
|
|
|
char **pack_lockfile_ptr = NULL;
|
2008-02-04 19:26:23 +01:00
|
|
|
struct child_process *conn;
|
2012-10-26 17:53:54 +02:00
|
|
|
struct fetch_pack_args args;
|
2007-01-25 02:02:15 +01:00
|
|
|
|
2011-02-24 15:30:19 +01:00
|
|
|
packet_trace_identity("fetch-pack");
|
|
|
|
|
2012-10-26 17:53:54 +02:00
|
|
|
memset(&args, 0, sizeof(args));
|
|
|
|
args.uploadpack = "git-upload-pack";
|
|
|
|
|
2012-05-21 09:59:58 +02:00
|
|
|
for (i = 1; i < argc && *argv[i] == '-'; i++) {
|
2007-09-11 05:03:00 +02:00
|
|
|
const char *arg = argv[i];
|
2005-07-04 22:26:53 +02:00
|
|
|
|
2012-05-21 09:59:58 +02:00
|
|
|
if (!prefixcmp(arg, "--upload-pack=")) {
|
|
|
|
args.uploadpack = arg + 14;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!prefixcmp(arg, "--exec=")) {
|
|
|
|
args.uploadpack = arg + 7;
|
|
|
|
continue;
|
2005-07-04 22:26:53 +02:00
|
|
|
}
|
2012-05-21 09:59:58 +02:00
|
|
|
if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) {
|
|
|
|
args.quiet = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp("--keep", arg) || !strcmp("-k", arg)) {
|
|
|
|
args.lock_pack = args.keep_pack;
|
|
|
|
args.keep_pack = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp("--thin", arg)) {
|
|
|
|
args.use_thin_pack = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp("--include-tag", arg)) {
|
|
|
|
args.include_tag = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp("--all", arg)) {
|
|
|
|
args.fetch_all = 1;
|
|
|
|
continue;
|
2005-07-04 22:26:53 +02:00
|
|
|
}
|
2012-05-21 09:59:58 +02:00
|
|
|
if (!strcmp("--stdin", arg)) {
|
|
|
|
args.stdin_refs = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp("-v", arg)) {
|
|
|
|
args.verbose = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!prefixcmp(arg, "--depth=")) {
|
|
|
|
args.depth = strtol(arg + 8, NULL, 0);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp("--no-progress", arg)) {
|
|
|
|
args.no_progress = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp("--stateless-rpc", arg)) {
|
|
|
|
args.stateless_rpc = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp("--lock-pack", arg)) {
|
|
|
|
args.lock_pack = 1;
|
|
|
|
pack_lockfile_ptr = &pack_lockfile;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
usage(fetch_pack_usage);
|
2005-07-04 22:26:53 +02:00
|
|
|
}
|
2012-05-21 09:59:57 +02:00
|
|
|
|
|
|
|
if (i < argc)
|
|
|
|
dest = argv[i++];
|
|
|
|
else
|
2005-07-04 22:26:53 +02:00
|
|
|
usage(fetch_pack_usage);
|
2007-09-11 05:03:00 +02:00
|
|
|
|
2012-05-21 09:59:59 +02:00
|
|
|
/*
|
|
|
|
* Copy refs from cmdline to growable list, then append any
|
|
|
|
* refs from the standard input:
|
|
|
|
*/
|
|
|
|
for (; i < argc; i++)
|
2013-01-29 23:02:15 +01:00
|
|
|
add_sought_entry(&sought, &nr_sought, &alloc_sought, argv[i]);
|
fetch-pack: new --stdin option to read refs from stdin
If a remote repo has too many tags (or branches), cloning it over the
smart HTTP transport can fail because remote-curl.c puts all the refs
from the remote repo on the fetch-pack command line. This can make the
command line longer than the global OS command line limit, causing
fetch-pack to fail.
This is especially a problem on Windows where the command line limit is
orders of magnitude shorter than Linux. There are already real repos out
there that msysGit cannot clone over smart HTTP due to this problem.
Here is an easy way to trigger this problem:
git init too-many-refs
cd too-many-refs
echo bla > bla.txt
git add .
git commit -m test
sha=$(git rev-parse HEAD)
tag=$(perl -e 'print "bla" x 30')
for i in `seq 50000`; do
echo $sha refs/tags/$tag-$i >> .git/packed-refs
done
Then share this repo over the smart HTTP protocol and try cloning it:
$ git clone http://localhost/.../too-many-refs/.git
Cloning into 'too-many-refs'...
fatal: cannot exec 'fetch-pack': Argument list too long
50k tags is obviously an absurd number, but it is required to
demonstrate the problem on Linux because it has a much more generous
command line limit. On Windows the clone fails with as little as 500
tags in the above loop, which is getting uncomfortably close to the
number of tags you might see in real long lived repos.
This is not just theoretical, msysGit is already failing to clone our
company repo due to this. It's a large repo converted from CVS, nearly
10 years of history.
Four possible solutions were discussed on the Git mailing list (in no
particular order):
1) Call fetch-pack multiple times with smaller batches of refs.
This was dismissed as inefficient and inelegant.
2) Add option --refs-fd=$n to pass a an fd from where to read the refs.
This was rejected because inheriting descriptors other than
stdin/stdout/stderr through exec() is apparently problematic on Windows,
plus it would require changes to the run-command API to open extra
pipes.
3) Add option --refs-from=$tmpfile to pass the refs using a temp file.
This was not favored because of the temp file requirement.
4) Add option --stdin to pass the refs on stdin, one per line.
In the end this option was chosen as the most efficient and most
desirable from scripting perspective.
There was however a small complication when using stdin to pass refs to
fetch-pack. The --stateless-rpc option to fetch-pack also uses stdin for
communication with the remote server.
If we are going to sneak refs on stdin line by line, it would have to be
done very carefully in the presence of --stateless-rpc, because when
reading refs line by line we might read ahead too much data into our
buffer and eat some of the remote protocol data which is also coming on
stdin.
One way to solve this would be to refactor get_remote_heads() in
fetch-pack.c to accept a residual buffer from our stdin line parsing
above, but this function is used in several places so other callers
would be burdened by this residual buffer interface even when most of
them don't need it.
In the end we settled on the following solution:
If --stdin is specified without --stateless-rpc, fetch-pack would read
the refs from stdin one per line, in a script friendly format.
However if --stdin is specified together with --stateless-rpc,
fetch-pack would read the refs from stdin in packetized format
(pkt-line) with a flush packet terminating the list of refs. This way we
can read the exact number of bytes that we need from stdin, and then
get_remote_heads() can continue reading from the same fd without losing
a single byte of remote protocol data.
This way the --stdin option only loses generality and scriptability when
used together with --stateless-rpc, which is not easily scriptable
anyway because it also uses pkt-line when talking to the remote server.
Signed-off-by: Ivan Todoroski <grnch@gmx.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-04-02 17:13:48 +02:00
|
|
|
if (args.stdin_refs) {
|
|
|
|
if (args.stateless_rpc) {
|
|
|
|
/* in stateless RPC mode we use pkt-line to read
|
|
|
|
* from stdin, until we get a flush packet
|
|
|
|
*/
|
|
|
|
static char line[1000];
|
|
|
|
for (;;) {
|
|
|
|
int n = packet_read_line(0, line, sizeof(line));
|
|
|
|
if (!n)
|
|
|
|
break;
|
|
|
|
if (line[n-1] == '\n')
|
|
|
|
n--;
|
2013-01-29 23:02:15 +01:00
|
|
|
add_sought_entry_mem(&sought, &nr_sought, &alloc_sought, line, n);
|
fetch-pack: new --stdin option to read refs from stdin
If a remote repo has too many tags (or branches), cloning it over the
smart HTTP transport can fail because remote-curl.c puts all the refs
from the remote repo on the fetch-pack command line. This can make the
command line longer than the global OS command line limit, causing
fetch-pack to fail.
This is especially a problem on Windows where the command line limit is
orders of magnitude shorter than Linux. There are already real repos out
there that msysGit cannot clone over smart HTTP due to this problem.
Here is an easy way to trigger this problem:
git init too-many-refs
cd too-many-refs
echo bla > bla.txt
git add .
git commit -m test
sha=$(git rev-parse HEAD)
tag=$(perl -e 'print "bla" x 30')
for i in `seq 50000`; do
echo $sha refs/tags/$tag-$i >> .git/packed-refs
done
Then share this repo over the smart HTTP protocol and try cloning it:
$ git clone http://localhost/.../too-many-refs/.git
Cloning into 'too-many-refs'...
fatal: cannot exec 'fetch-pack': Argument list too long
50k tags is obviously an absurd number, but it is required to
demonstrate the problem on Linux because it has a much more generous
command line limit. On Windows the clone fails with as little as 500
tags in the above loop, which is getting uncomfortably close to the
number of tags you might see in real long lived repos.
This is not just theoretical, msysGit is already failing to clone our
company repo due to this. It's a large repo converted from CVS, nearly
10 years of history.
Four possible solutions were discussed on the Git mailing list (in no
particular order):
1) Call fetch-pack multiple times with smaller batches of refs.
This was dismissed as inefficient and inelegant.
2) Add option --refs-fd=$n to pass a an fd from where to read the refs.
This was rejected because inheriting descriptors other than
stdin/stdout/stderr through exec() is apparently problematic on Windows,
plus it would require changes to the run-command API to open extra
pipes.
3) Add option --refs-from=$tmpfile to pass the refs using a temp file.
This was not favored because of the temp file requirement.
4) Add option --stdin to pass the refs on stdin, one per line.
In the end this option was chosen as the most efficient and most
desirable from scripting perspective.
There was however a small complication when using stdin to pass refs to
fetch-pack. The --stateless-rpc option to fetch-pack also uses stdin for
communication with the remote server.
If we are going to sneak refs on stdin line by line, it would have to be
done very carefully in the presence of --stateless-rpc, because when
reading refs line by line we might read ahead too much data into our
buffer and eat some of the remote protocol data which is also coming on
stdin.
One way to solve this would be to refactor get_remote_heads() in
fetch-pack.c to accept a residual buffer from our stdin line parsing
above, but this function is used in several places so other callers
would be burdened by this residual buffer interface even when most of
them don't need it.
In the end we settled on the following solution:
If --stdin is specified without --stateless-rpc, fetch-pack would read
the refs from stdin one per line, in a script friendly format.
However if --stdin is specified together with --stateless-rpc,
fetch-pack would read the refs from stdin in packetized format
(pkt-line) with a flush packet terminating the list of refs. This way we
can read the exact number of bytes that we need from stdin, and then
get_remote_heads() can continue reading from the same fd without losing
a single byte of remote protocol data.
This way the --stdin option only loses generality and scriptability when
used together with --stateless-rpc, which is not easily scriptable
anyway because it also uses pkt-line when talking to the remote server.
Signed-off-by: Ivan Todoroski <grnch@gmx.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-04-02 17:13:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* read from stdin one ref per line, until EOF */
|
|
|
|
struct strbuf line = STRBUF_INIT;
|
2012-09-09 08:19:40 +02:00
|
|
|
while (strbuf_getline(&line, stdin, '\n') != EOF)
|
2013-01-29 23:02:15 +01:00
|
|
|
add_sought_entry(&sought, &nr_sought, &alloc_sought, line.buf);
|
fetch-pack: new --stdin option to read refs from stdin
If a remote repo has too many tags (or branches), cloning it over the
smart HTTP transport can fail because remote-curl.c puts all the refs
from the remote repo on the fetch-pack command line. This can make the
command line longer than the global OS command line limit, causing
fetch-pack to fail.
This is especially a problem on Windows where the command line limit is
orders of magnitude shorter than Linux. There are already real repos out
there that msysGit cannot clone over smart HTTP due to this problem.
Here is an easy way to trigger this problem:
git init too-many-refs
cd too-many-refs
echo bla > bla.txt
git add .
git commit -m test
sha=$(git rev-parse HEAD)
tag=$(perl -e 'print "bla" x 30')
for i in `seq 50000`; do
echo $sha refs/tags/$tag-$i >> .git/packed-refs
done
Then share this repo over the smart HTTP protocol and try cloning it:
$ git clone http://localhost/.../too-many-refs/.git
Cloning into 'too-many-refs'...
fatal: cannot exec 'fetch-pack': Argument list too long
50k tags is obviously an absurd number, but it is required to
demonstrate the problem on Linux because it has a much more generous
command line limit. On Windows the clone fails with as little as 500
tags in the above loop, which is getting uncomfortably close to the
number of tags you might see in real long lived repos.
This is not just theoretical, msysGit is already failing to clone our
company repo due to this. It's a large repo converted from CVS, nearly
10 years of history.
Four possible solutions were discussed on the Git mailing list (in no
particular order):
1) Call fetch-pack multiple times with smaller batches of refs.
This was dismissed as inefficient and inelegant.
2) Add option --refs-fd=$n to pass a an fd from where to read the refs.
This was rejected because inheriting descriptors other than
stdin/stdout/stderr through exec() is apparently problematic on Windows,
plus it would require changes to the run-command API to open extra
pipes.
3) Add option --refs-from=$tmpfile to pass the refs using a temp file.
This was not favored because of the temp file requirement.
4) Add option --stdin to pass the refs on stdin, one per line.
In the end this option was chosen as the most efficient and most
desirable from scripting perspective.
There was however a small complication when using stdin to pass refs to
fetch-pack. The --stateless-rpc option to fetch-pack also uses stdin for
communication with the remote server.
If we are going to sneak refs on stdin line by line, it would have to be
done very carefully in the presence of --stateless-rpc, because when
reading refs line by line we might read ahead too much data into our
buffer and eat some of the remote protocol data which is also coming on
stdin.
One way to solve this would be to refactor get_remote_heads() in
fetch-pack.c to accept a residual buffer from our stdin line parsing
above, but this function is used in several places so other callers
would be burdened by this residual buffer interface even when most of
them don't need it.
In the end we settled on the following solution:
If --stdin is specified without --stateless-rpc, fetch-pack would read
the refs from stdin one per line, in a script friendly format.
However if --stdin is specified together with --stateless-rpc,
fetch-pack would read the refs from stdin in packetized format
(pkt-line) with a flush packet terminating the list of refs. This way we
can read the exact number of bytes that we need from stdin, and then
get_remote_heads() can continue reading from the same fd without losing
a single byte of remote protocol data.
This way the --stdin option only loses generality and scriptability when
used together with --stateless-rpc, which is not easily scriptable
anyway because it also uses pkt-line when talking to the remote server.
Signed-off-by: Ivan Todoroski <grnch@gmx.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-04-02 17:13:48 +02:00
|
|
|
strbuf_release(&line);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-31 01:47:42 +01:00
|
|
|
if (args.stateless_rpc) {
|
|
|
|
conn = NULL;
|
|
|
|
fd[0] = 0;
|
|
|
|
fd[1] = 1;
|
2008-02-04 19:26:23 +01:00
|
|
|
} else {
|
2012-05-21 09:59:56 +02:00
|
|
|
conn = git_connect(fd, dest, args.uploadpack,
|
2009-10-31 01:47:42 +01:00
|
|
|
args.verbose ? CONNECT_VERBOSE : 0);
|
|
|
|
}
|
|
|
|
|
2011-12-13 01:41:37 +01:00
|
|
|
get_remote_heads(fd[0], &ref, 0, NULL);
|
2009-10-31 01:47:42 +01:00
|
|
|
|
|
|
|
ref = fetch_pack(&args, fd, conn, ref, dest,
|
2013-01-29 23:02:15 +01:00
|
|
|
sought, nr_sought, pack_lockfile_ptr);
|
2009-10-31 01:47:42 +01:00
|
|
|
if (pack_lockfile) {
|
|
|
|
printf("lock %s\n", pack_lockfile);
|
|
|
|
fflush(stdout);
|
2008-02-04 19:26:23 +01:00
|
|
|
}
|
2009-10-31 01:47:42 +01:00
|
|
|
close(fd[0]);
|
|
|
|
close(fd[1]);
|
|
|
|
if (finish_connect(conn))
|
2012-09-09 08:19:46 +02:00
|
|
|
return 1;
|
2007-09-11 05:03:00 +02:00
|
|
|
|
2013-01-29 23:02:15 +01:00
|
|
|
ret = !ref;
|
2012-09-09 08:19:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the heads to pull were given, we should have consumed
|
|
|
|
* all of them by matching the remote. Otherwise, 'git fetch
|
|
|
|
* remote no-such-ref' would silently succeed without issuing
|
|
|
|
* an error.
|
|
|
|
*/
|
2013-01-29 23:02:15 +01:00
|
|
|
for (i = 0; i < nr_sought; i++) {
|
|
|
|
if (!sought[i] || sought[i]->matched)
|
|
|
|
continue;
|
|
|
|
error("no such remote ref %s", sought[i]->name);
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
2007-09-11 05:03:00 +02:00
|
|
|
while (ref) {
|
|
|
|
printf("%s %s\n",
|
|
|
|
sha1_to_hex(ref->old_sha1), ref->name);
|
|
|
|
ref = ref->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|