2005-07-04 22:26:53 +02:00
|
|
|
#include "cache.h"
|
|
|
|
#include "refs.h"
|
|
|
|
#include "pkt-line.h"
|
2006-09-10 12:20:24 +02:00
|
|
|
#include "sideband.h"
|
2005-10-14 03:57:40 +02:00
|
|
|
#include "tag.h"
|
|
|
|
#include "object.h"
|
2005-10-28 04:48:32 +02:00
|
|
|
#include "commit.h"
|
2006-01-11 03:12:17 +01:00
|
|
|
#include "exec_cmd.h"
|
2006-10-30 20:08:43 +01:00
|
|
|
#include "diff.h"
|
|
|
|
#include "revision.h"
|
|
|
|
#include "list-objects.h"
|
2005-07-04 22:26:53 +02:00
|
|
|
|
2005-10-19 23:27:01 +02:00
|
|
|
static const char upload_pack_usage[] = "git-upload-pack [--strict] [--timeout=nn] <dir>";
|
2005-07-04 22:26:53 +02:00
|
|
|
|
2006-07-06 06:28:20 +02:00
|
|
|
/* bits #0..7 in revision.h, #8..10 in commit.c */
|
|
|
|
#define THEY_HAVE (1u << 11)
|
|
|
|
#define OUR_REF (1u << 12)
|
|
|
|
#define WANTED (1u << 13)
|
|
|
|
#define COMMON_KNOWN (1u << 14)
|
|
|
|
#define REACHABLE (1u << 15)
|
|
|
|
|
2006-10-30 20:09:53 +01:00
|
|
|
#define SHALLOW (1u << 16)
|
|
|
|
#define NOT_SHALLOW (1u << 17)
|
|
|
|
#define CLIENT_SHALLOW (1u << 18)
|
|
|
|
|
2006-11-24 11:34:27 +01:00
|
|
|
static unsigned long oldest_have;
|
2006-07-06 06:28:20 +02:00
|
|
|
|
2006-08-15 19:23:48 +02:00
|
|
|
static int multi_ack, nr_our_refs;
|
2006-09-26 17:27:39 +02:00
|
|
|
static int use_thin_pack, use_ofs_delta;
|
2006-07-06 03:00:02 +02:00
|
|
|
static struct object_array have_obj;
|
|
|
|
static struct object_array want_obj;
|
2006-08-15 19:23:48 +02:00
|
|
|
static unsigned int timeout;
|
2006-09-11 01:27:08 +02:00
|
|
|
/* 0 for no sideband,
|
|
|
|
* otherwise maximum packet size (up to 65520 bytes).
|
|
|
|
*/
|
2006-08-15 19:23:48 +02:00
|
|
|
static int use_sideband;
|
2005-10-19 23:27:01 +02:00
|
|
|
|
|
|
|
static void reset_timeout(void)
|
|
|
|
{
|
|
|
|
alarm(timeout);
|
|
|
|
}
|
2005-07-05 00:29:17 +02:00
|
|
|
|
2005-07-05 01:35:13 +02:00
|
|
|
static int strip(char *line, int len)
|
|
|
|
{
|
|
|
|
if (len && line[len-1] == '\n')
|
|
|
|
line[--len] = 0;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2006-06-21 09:30:21 +02:00
|
|
|
static ssize_t send_client_data(int fd, const char *data, ssize_t sz)
|
|
|
|
{
|
2006-09-10 12:20:24 +02:00
|
|
|
if (use_sideband)
|
2006-09-11 01:27:08 +02:00
|
|
|
return send_sideband(1, fd, data, sz, use_sideband);
|
2006-09-10 12:20:24 +02:00
|
|
|
if (fd == 3)
|
|
|
|
/* emergency quit */
|
|
|
|
fd = 2;
|
|
|
|
if (fd == 2) {
|
2007-01-08 16:58:23 +01:00
|
|
|
/* XXX: are we happy to lose stuff here? */
|
2006-09-10 12:20:24 +02:00
|
|
|
xwrite(fd, data, sz);
|
|
|
|
return sz;
|
2006-06-21 09:30:21 +02:00
|
|
|
}
|
2006-09-10 12:20:24 +02:00
|
|
|
return safe_write(fd, data, sz);
|
2006-06-21 09:30:21 +02:00
|
|
|
}
|
|
|
|
|
2006-10-30 20:08:43 +01:00
|
|
|
FILE *pack_pipe = NULL;
|
|
|
|
static void show_commit(struct commit *commit)
|
|
|
|
{
|
|
|
|
if (commit->object.flags & BOUNDARY)
|
|
|
|
fputc('-', pack_pipe);
|
|
|
|
if (fputs(sha1_to_hex(commit->object.sha1), pack_pipe) < 0)
|
|
|
|
die("broken output pipe");
|
|
|
|
fputc('\n', pack_pipe);
|
|
|
|
fflush(pack_pipe);
|
|
|
|
free(commit->buffer);
|
|
|
|
commit->buffer = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void show_object(struct object_array_entry *p)
|
|
|
|
{
|
|
|
|
/* An object with name "foo\n0000000..." can be used to
|
|
|
|
* confuse downstream git-pack-objects very badly.
|
|
|
|
*/
|
|
|
|
const char *ep = strchr(p->name, '\n');
|
|
|
|
if (ep) {
|
|
|
|
fprintf(pack_pipe, "%s %.*s\n", sha1_to_hex(p->item->sha1),
|
|
|
|
(int) (ep - p->name),
|
|
|
|
p->name);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
fprintf(pack_pipe, "%s %s\n",
|
|
|
|
sha1_to_hex(p->item->sha1), p->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void show_edge(struct commit *commit)
|
|
|
|
{
|
|
|
|
fprintf(pack_pipe, "-%s\n", sha1_to_hex(commit->object.sha1));
|
|
|
|
}
|
|
|
|
|
2005-07-05 00:29:17 +02:00
|
|
|
static void create_pack_file(void)
|
|
|
|
{
|
2006-06-21 07:48:23 +02:00
|
|
|
/* Pipes between rev-list to pack-objects, pack-objects to us
|
|
|
|
* and pack-objects error stream for progress bar.
|
|
|
|
*/
|
|
|
|
int lp_pipe[2], pu_pipe[2], pe_pipe[2];
|
2006-06-21 03:26:34 +02:00
|
|
|
pid_t pid_rev_list, pid_pack_objects;
|
2006-07-06 03:00:02 +02:00
|
|
|
int create_full_pack = (nr_our_refs == want_obj.nr && !have_obj.nr);
|
2006-06-21 07:48:23 +02:00
|
|
|
char data[8193], progress[128];
|
2006-06-21 09:30:21 +02:00
|
|
|
char abort_msg[] = "aborting due to possible repository "
|
|
|
|
"corruption on the remote side.";
|
2006-06-21 03:26:34 +02:00
|
|
|
int buffered = -1;
|
2005-07-05 01:35:13 +02:00
|
|
|
|
2006-06-21 03:26:34 +02:00
|
|
|
if (pipe(lp_pipe) < 0)
|
2005-07-05 01:35:13 +02:00
|
|
|
die("git-upload-pack: unable to create pipe");
|
2006-06-21 03:26:34 +02:00
|
|
|
pid_rev_list = fork();
|
|
|
|
if (pid_rev_list < 0)
|
2005-07-05 01:35:13 +02:00
|
|
|
die("git-upload-pack: unable to fork git-rev-list");
|
|
|
|
|
2006-06-21 03:26:34 +02:00
|
|
|
if (!pid_rev_list) {
|
2005-07-05 01:35:13 +02:00
|
|
|
int i;
|
2006-10-30 20:08:43 +01:00
|
|
|
struct rev_info revs;
|
|
|
|
|
|
|
|
pack_pipe = fdopen(lp_pipe[1], "w");
|
2005-10-05 23:49:54 +02:00
|
|
|
|
2006-07-06 03:00:02 +02:00
|
|
|
if (create_full_pack)
|
2006-10-30 20:08:43 +01:00
|
|
|
use_thin_pack = 0; /* no point doing it */
|
|
|
|
init_revisions(&revs, NULL);
|
|
|
|
revs.tag_objects = 1;
|
|
|
|
revs.tree_objects = 1;
|
|
|
|
revs.blob_objects = 1;
|
|
|
|
if (use_thin_pack)
|
|
|
|
revs.edge_hint = 1;
|
|
|
|
|
|
|
|
if (create_full_pack) {
|
|
|
|
const char *args[] = {"rev-list", "--all", NULL};
|
|
|
|
setup_revisions(2, args, &revs, NULL);
|
|
|
|
} else {
|
2006-07-06 03:00:02 +02:00
|
|
|
for (i = 0; i < want_obj.nr; i++) {
|
|
|
|
struct object *o = want_obj.objects[i].item;
|
2006-11-14 07:47:45 +01:00
|
|
|
/* why??? */
|
2006-10-30 20:09:53 +01:00
|
|
|
o->flags &= ~UNINTERESTING;
|
2006-10-30 20:08:43 +01:00
|
|
|
add_pending_object(&revs, o, NULL);
|
2005-10-05 23:49:54 +02:00
|
|
|
}
|
2006-07-06 03:00:02 +02:00
|
|
|
for (i = 0; i < have_obj.nr; i++) {
|
|
|
|
struct object *o = have_obj.objects[i].item;
|
2006-10-30 20:08:43 +01:00
|
|
|
o->flags |= UNINTERESTING;
|
|
|
|
add_pending_object(&revs, o, NULL);
|
2005-10-26 16:18:56 +02:00
|
|
|
}
|
2006-10-30 20:08:43 +01:00
|
|
|
setup_revisions(0, NULL, &revs, NULL);
|
|
|
|
}
|
|
|
|
prepare_revision_walk(&revs);
|
|
|
|
mark_edges_uninteresting(revs.commits, &revs, show_edge);
|
|
|
|
traverse_commit_list(&revs, show_commit, show_object);
|
|
|
|
exit(0);
|
2005-07-05 01:35:13 +02:00
|
|
|
}
|
2006-06-21 03:26:34 +02:00
|
|
|
|
|
|
|
if (pipe(pu_pipe) < 0)
|
|
|
|
die("git-upload-pack: unable to create pipe");
|
2006-06-21 07:48:23 +02:00
|
|
|
if (pipe(pe_pipe) < 0)
|
|
|
|
die("git-upload-pack: unable to create pipe");
|
2006-06-21 03:26:34 +02:00
|
|
|
pid_pack_objects = fork();
|
|
|
|
if (pid_pack_objects < 0) {
|
|
|
|
/* daemon sets things up to ignore TERM */
|
|
|
|
kill(pid_rev_list, SIGKILL);
|
|
|
|
die("git-upload-pack: unable to fork git-pack-objects");
|
|
|
|
}
|
|
|
|
if (!pid_pack_objects) {
|
|
|
|
dup2(lp_pipe[0], 0);
|
|
|
|
dup2(pu_pipe[1], 1);
|
2006-06-21 07:48:23 +02:00
|
|
|
dup2(pe_pipe[1], 2);
|
2006-06-21 03:26:34 +02:00
|
|
|
|
|
|
|
close(lp_pipe[0]);
|
|
|
|
close(lp_pipe[1]);
|
|
|
|
close(pu_pipe[0]);
|
|
|
|
close(pu_pipe[1]);
|
2006-06-21 07:48:23 +02:00
|
|
|
close(pe_pipe[0]);
|
|
|
|
close(pe_pipe[1]);
|
2006-09-26 17:27:39 +02:00
|
|
|
execl_git_cmd("pack-objects", "--stdout", "--progress",
|
|
|
|
use_ofs_delta ? "--delta-base-offset" : NULL,
|
|
|
|
NULL);
|
2006-06-21 03:26:34 +02:00
|
|
|
kill(pid_rev_list, SIGKILL);
|
|
|
|
die("git-upload-pack: unable to exec git-pack-objects");
|
|
|
|
}
|
|
|
|
|
|
|
|
close(lp_pipe[0]);
|
|
|
|
close(lp_pipe[1]);
|
|
|
|
|
2006-06-21 07:48:23 +02:00
|
|
|
/* We read from pe_pipe[0] to capture stderr output for
|
|
|
|
* progress bar, and pu_pipe[0] to capture the pack data.
|
2006-06-21 03:26:34 +02:00
|
|
|
*/
|
2006-06-21 07:48:23 +02:00
|
|
|
close(pe_pipe[1]);
|
2006-06-21 03:26:34 +02:00
|
|
|
close(pu_pipe[1]);
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
const char *who;
|
|
|
|
struct pollfd pfd[2];
|
|
|
|
pid_t pid;
|
|
|
|
int status;
|
|
|
|
ssize_t sz;
|
2006-06-21 07:48:23 +02:00
|
|
|
int pe, pu, pollsize;
|
2006-06-21 03:26:34 +02:00
|
|
|
|
2006-07-18 19:14:51 +02:00
|
|
|
reset_timeout();
|
|
|
|
|
2006-06-21 03:26:34 +02:00
|
|
|
pollsize = 0;
|
2006-06-21 07:48:23 +02:00
|
|
|
pe = pu = -1;
|
2006-06-21 03:26:34 +02:00
|
|
|
|
|
|
|
if (0 <= pu_pipe[0]) {
|
|
|
|
pfd[pollsize].fd = pu_pipe[0];
|
|
|
|
pfd[pollsize].events = POLLIN;
|
|
|
|
pu = pollsize;
|
|
|
|
pollsize++;
|
|
|
|
}
|
2006-06-21 07:48:23 +02:00
|
|
|
if (0 <= pe_pipe[0]) {
|
|
|
|
pfd[pollsize].fd = pe_pipe[0];
|
|
|
|
pfd[pollsize].events = POLLIN;
|
|
|
|
pe = pollsize;
|
|
|
|
pollsize++;
|
|
|
|
}
|
2006-06-21 03:26:34 +02:00
|
|
|
|
|
|
|
if (pollsize) {
|
|
|
|
if (poll(pfd, pollsize, -1) < 0) {
|
|
|
|
if (errno != EINTR) {
|
|
|
|
error("poll failed, resuming: %s",
|
|
|
|
strerror(errno));
|
|
|
|
sleep(1);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (0 <= pu && (pfd[pu].revents & (POLLIN|POLLHUP))) {
|
|
|
|
/* Data ready; we keep the last byte
|
|
|
|
* to ourselves in case we detect
|
|
|
|
* broken rev-list, so that we can
|
|
|
|
* leave the stream corrupted. This
|
|
|
|
* is unfortunate -- unpack-objects
|
|
|
|
* would happily accept a valid pack
|
|
|
|
* data with trailing garbage, so
|
|
|
|
* appending garbage after we pass all
|
|
|
|
* the pack data is not good enough to
|
|
|
|
* signal breakage to downstream.
|
|
|
|
*/
|
|
|
|
char *cp = data;
|
|
|
|
ssize_t outsz = 0;
|
|
|
|
if (0 <= buffered) {
|
|
|
|
*cp++ = buffered;
|
|
|
|
outsz++;
|
|
|
|
}
|
2007-01-08 16:58:08 +01:00
|
|
|
sz = xread(pu_pipe[0], cp,
|
2006-06-21 03:26:34 +02:00
|
|
|
sizeof(data) - outsz);
|
|
|
|
if (0 < sz)
|
|
|
|
;
|
|
|
|
else if (sz == 0) {
|
|
|
|
close(pu_pipe[0]);
|
|
|
|
pu_pipe[0] = -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
goto fail;
|
|
|
|
sz += outsz;
|
|
|
|
if (1 < sz) {
|
|
|
|
buffered = data[sz-1] & 0xFF;
|
|
|
|
sz--;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
buffered = -1;
|
2006-06-21 09:30:21 +02:00
|
|
|
sz = send_client_data(1, data, sz);
|
2006-06-21 03:26:34 +02:00
|
|
|
if (sz < 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
2006-06-21 07:48:23 +02:00
|
|
|
if (0 <= pe && (pfd[pe].revents & (POLLIN|POLLHUP))) {
|
2006-06-21 09:30:21 +02:00
|
|
|
/* Status ready; we ship that in the side-band
|
|
|
|
* or dump to the standard error.
|
2006-06-21 07:48:23 +02:00
|
|
|
*/
|
2007-01-08 16:58:08 +01:00
|
|
|
sz = xread(pe_pipe[0], progress,
|
2006-06-21 07:48:23 +02:00
|
|
|
sizeof(progress));
|
|
|
|
if (0 < sz)
|
2006-06-21 09:30:21 +02:00
|
|
|
send_client_data(2, progress, sz);
|
2006-06-21 07:48:23 +02:00
|
|
|
else if (sz == 0) {
|
|
|
|
close(pe_pipe[0]);
|
|
|
|
pe_pipe[0] = -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
goto fail;
|
|
|
|
}
|
2006-06-21 03:26:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See if the children are still there */
|
|
|
|
if (pid_rev_list || pid_pack_objects) {
|
|
|
|
pid = waitpid(-1, &status, WNOHANG);
|
|
|
|
if (!pid)
|
|
|
|
continue;
|
|
|
|
who = ((pid == pid_rev_list) ? "git-rev-list" :
|
|
|
|
(pid == pid_pack_objects) ? "git-pack-objects" :
|
|
|
|
NULL);
|
|
|
|
if (!who) {
|
|
|
|
if (pid < 0) {
|
|
|
|
error("git-upload-pack: %s",
|
|
|
|
strerror(errno));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
error("git-upload-pack: we weren't "
|
|
|
|
"waiting for %d", pid);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!WIFEXITED(status) || WEXITSTATUS(status) > 0) {
|
|
|
|
error("git-upload-pack: %s died with error.",
|
|
|
|
who);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (pid == pid_rev_list)
|
|
|
|
pid_rev_list = 0;
|
|
|
|
if (pid == pid_pack_objects)
|
|
|
|
pid_pack_objects = 0;
|
|
|
|
if (pid_rev_list || pid_pack_objects)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* both died happily */
|
|
|
|
if (pollsize)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* flush the data */
|
|
|
|
if (0 <= buffered) {
|
|
|
|
data[0] = buffered;
|
2006-06-21 09:30:21 +02:00
|
|
|
sz = send_client_data(1, data, 1);
|
2006-06-21 03:26:34 +02:00
|
|
|
if (sz < 0)
|
|
|
|
goto fail;
|
|
|
|
fprintf(stderr, "flushed.\n");
|
|
|
|
}
|
2006-09-10 12:20:24 +02:00
|
|
|
if (use_sideband)
|
|
|
|
packet_flush(1);
|
2006-06-21 03:26:34 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
fail:
|
|
|
|
if (pid_pack_objects)
|
|
|
|
kill(pid_pack_objects, SIGKILL);
|
|
|
|
if (pid_rev_list)
|
|
|
|
kill(pid_rev_list, SIGKILL);
|
2006-06-21 09:30:21 +02:00
|
|
|
send_client_data(3, abort_msg, sizeof(abort_msg));
|
|
|
|
die("git-upload-pack: %s", abort_msg);
|
2005-07-05 00:29:17 +02:00
|
|
|
}
|
|
|
|
|
2005-07-04 22:26:53 +02:00
|
|
|
static int got_sha1(char *hex, unsigned char *sha1)
|
|
|
|
{
|
2006-07-06 03:00:02 +02:00
|
|
|
struct object *o;
|
2006-07-06 06:28:20 +02:00
|
|
|
int we_knew_they_have = 0;
|
2006-07-06 03:00:02 +02:00
|
|
|
|
2005-07-04 22:26:53 +02:00
|
|
|
if (get_sha1_hex(hex, sha1))
|
|
|
|
die("git-upload-pack: expected SHA1 object, got '%s'", hex);
|
2005-07-05 00:29:17 +02:00
|
|
|
if (!has_sha1_file(sha1))
|
2006-07-06 06:28:20 +02:00
|
|
|
return -1;
|
2006-07-06 03:00:02 +02:00
|
|
|
|
|
|
|
o = lookup_object(sha1);
|
|
|
|
if (!(o && o->parsed))
|
|
|
|
o = parse_object(sha1);
|
|
|
|
if (!o)
|
|
|
|
die("oops (%s)", sha1_to_hex(sha1));
|
2006-08-13 07:16:51 +02:00
|
|
|
if (o->type == OBJ_COMMIT) {
|
2006-07-06 03:00:02 +02:00
|
|
|
struct commit_list *parents;
|
2006-07-06 06:28:20 +02:00
|
|
|
struct commit *commit = (struct commit *)o;
|
2006-07-06 03:00:02 +02:00
|
|
|
if (o->flags & THEY_HAVE)
|
2006-07-06 06:28:20 +02:00
|
|
|
we_knew_they_have = 1;
|
|
|
|
else
|
|
|
|
o->flags |= THEY_HAVE;
|
|
|
|
if (!oldest_have || (commit->date < oldest_have))
|
|
|
|
oldest_have = commit->date;
|
|
|
|
for (parents = commit->parents;
|
2006-07-06 03:00:02 +02:00
|
|
|
parents;
|
|
|
|
parents = parents->next)
|
|
|
|
parents->item->object.flags |= THEY_HAVE;
|
2005-07-05 00:29:17 +02:00
|
|
|
}
|
2006-07-06 06:28:20 +02:00
|
|
|
if (!we_knew_they_have) {
|
|
|
|
add_object_array(o, NULL, &have_obj);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reachable(struct commit *want)
|
|
|
|
{
|
|
|
|
struct commit_list *work = NULL;
|
|
|
|
|
|
|
|
insert_by_date(want, &work);
|
|
|
|
while (work) {
|
|
|
|
struct commit_list *list = work->next;
|
|
|
|
struct commit *commit = work->item;
|
|
|
|
free(work);
|
|
|
|
work = list;
|
|
|
|
|
|
|
|
if (commit->object.flags & THEY_HAVE) {
|
|
|
|
want->object.flags |= COMMON_KNOWN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!commit->object.parsed)
|
|
|
|
parse_object(commit->object.sha1);
|
|
|
|
if (commit->object.flags & REACHABLE)
|
|
|
|
continue;
|
|
|
|
commit->object.flags |= REACHABLE;
|
|
|
|
if (commit->date < oldest_have)
|
|
|
|
continue;
|
|
|
|
for (list = commit->parents; list; list = list->next) {
|
|
|
|
struct commit *parent = list->item;
|
|
|
|
if (!(parent->object.flags & REACHABLE))
|
|
|
|
insert_by_date(parent, &work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
want->object.flags |= REACHABLE;
|
|
|
|
clear_commit_marks(want, REACHABLE);
|
|
|
|
free_commit_list(work);
|
|
|
|
return (want->object.flags & COMMON_KNOWN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ok_to_give_up(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!have_obj.nr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < want_obj.nr; i++) {
|
|
|
|
struct object *want = want_obj.objects[i].item;
|
|
|
|
|
|
|
|
if (want->flags & COMMON_KNOWN)
|
|
|
|
continue;
|
|
|
|
want = deref_tag(want, "a want line", 0);
|
|
|
|
if (!want || want->type != OBJ_COMMIT) {
|
|
|
|
/* no way to tell if this is reachable by
|
|
|
|
* looking at the ancestry chain alone, so
|
|
|
|
* leave a note to ourselves not to worry about
|
|
|
|
* this object anymore.
|
|
|
|
*/
|
|
|
|
want_obj.objects[i].item->flags |= COMMON_KNOWN;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!reachable((struct commit *)want))
|
|
|
|
return 0;
|
|
|
|
}
|
2005-07-05 00:29:17 +02:00
|
|
|
return 1;
|
2005-07-04 22:26:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int get_common_commits(void)
|
|
|
|
{
|
|
|
|
static char line[1000];
|
2006-07-06 03:12:12 +02:00
|
|
|
unsigned char sha1[20];
|
|
|
|
char hex[41], last_hex[41];
|
2005-07-04 22:26:53 +02:00
|
|
|
int len;
|
|
|
|
|
2005-10-28 04:48:32 +02:00
|
|
|
track_object_refs = 0;
|
|
|
|
save_commit_buffer = 0;
|
|
|
|
|
2005-07-04 22:26:53 +02:00
|
|
|
for(;;) {
|
|
|
|
len = packet_read_line(0, line, sizeof(line));
|
2005-10-19 23:27:01 +02:00
|
|
|
reset_timeout();
|
2005-07-04 22:26:53 +02:00
|
|
|
|
|
|
|
if (!len) {
|
2006-07-06 03:00:02 +02:00
|
|
|
if (have_obj.nr == 0 || multi_ack)
|
2005-10-28 04:49:16 +02:00
|
|
|
packet_write(1, "NAK\n");
|
2005-07-04 22:26:53 +02:00
|
|
|
continue;
|
|
|
|
}
|
2005-07-05 01:35:13 +02:00
|
|
|
len = strip(line, len);
|
Mechanical conversion to use prefixcmp()
This mechanically converts strncmp() to use prefixcmp(), but only when
the parameters match specific patterns, so that they can be verified
easily. Leftover from this will be fixed in a separate step, including
idiotic conversions like
if (!strncmp("foo", arg, 3))
=>
if (!(-prefixcmp(arg, "foo")))
This was done by using this script in px.perl
#!/usr/bin/perl -i.bak -p
if (/strncmp\(([^,]+), "([^\\"]*)", (\d+)\)/ && (length($2) == $3)) {
s|strncmp\(([^,]+), "([^\\"]*)", (\d+)\)|prefixcmp($1, "$2")|;
}
if (/strncmp\("([^\\"]*)", ([^,]+), (\d+)\)/ && (length($1) == $3)) {
s|strncmp\("([^\\"]*)", ([^,]+), (\d+)\)|(-prefixcmp($2, "$1"))|;
}
and running:
$ git grep -l strncmp -- '*.c' | xargs perl px.perl
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-20 10:53:29 +01:00
|
|
|
if (!prefixcmp(line, "have ")) {
|
2006-07-06 06:28:20 +02:00
|
|
|
switch (got_sha1(line+5, sha1)) {
|
|
|
|
case -1: /* they have what we do not */
|
|
|
|
if (multi_ack && ok_to_give_up())
|
|
|
|
packet_write(1, "ACK %s continue\n",
|
|
|
|
sha1_to_hex(sha1));
|
|
|
|
break;
|
|
|
|
default:
|
2006-07-06 03:12:12 +02:00
|
|
|
memcpy(hex, sha1_to_hex(sha1), 41);
|
|
|
|
if (multi_ack) {
|
|
|
|
const char *msg = "ACK %s continue\n";
|
|
|
|
packet_write(1, msg, hex);
|
|
|
|
memcpy(last_hex, hex, 41);
|
|
|
|
}
|
|
|
|
else if (have_obj.nr == 1)
|
|
|
|
packet_write(1, "ACK %s\n", hex);
|
2006-07-06 06:28:20 +02:00
|
|
|
break;
|
2005-10-25 23:55:24 +02:00
|
|
|
}
|
2005-07-04 22:26:53 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp(line, "done")) {
|
2006-07-06 03:00:02 +02:00
|
|
|
if (have_obj.nr > 0) {
|
2005-10-28 04:49:16 +02:00
|
|
|
if (multi_ack)
|
2006-07-06 03:12:12 +02:00
|
|
|
packet_write(1, "ACK %s\n", last_hex);
|
2005-10-28 04:49:16 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2005-07-04 22:26:53 +02:00
|
|
|
packet_write(1, "NAK\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
die("git-upload-pack: expected SHA1 list, got '%s'", line);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-06 03:00:02 +02:00
|
|
|
static void receive_needs(void)
|
2005-07-05 00:29:17 +02:00
|
|
|
{
|
2006-10-30 20:09:06 +01:00
|
|
|
struct object_array shallows = {0, 0, NULL};
|
2005-07-05 00:29:17 +02:00
|
|
|
static char line[1000];
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 20:09:29 +01:00
|
|
|
int len, depth = 0;
|
2005-07-05 00:29:17 +02:00
|
|
|
|
|
|
|
for (;;) {
|
2005-10-25 03:59:18 +02:00
|
|
|
struct object *o;
|
2006-07-06 02:41:39 +02:00
|
|
|
unsigned char sha1_buf[20];
|
2005-07-05 00:29:17 +02:00
|
|
|
len = packet_read_line(0, line, sizeof(line));
|
2005-10-19 23:27:01 +02:00
|
|
|
reset_timeout();
|
2005-07-05 00:29:17 +02:00
|
|
|
if (!len)
|
2006-10-30 20:09:06 +01:00
|
|
|
break;
|
2005-10-05 23:49:54 +02:00
|
|
|
|
2007-02-20 10:54:00 +01:00
|
|
|
if (!prefixcmp(line, "shallow ")) {
|
2006-10-30 20:09:06 +01:00
|
|
|
unsigned char sha1[20];
|
|
|
|
struct object *object;
|
2006-10-30 20:09:53 +01:00
|
|
|
use_thin_pack = 0;
|
2006-10-30 20:09:06 +01:00
|
|
|
if (get_sha1(line + 8, sha1))
|
|
|
|
die("invalid shallow line: %s", line);
|
|
|
|
object = parse_object(sha1);
|
|
|
|
if (!object)
|
|
|
|
die("did not find object for %s", line);
|
2006-10-30 20:09:53 +01:00
|
|
|
object->flags |= CLIENT_SHALLOW;
|
2006-10-30 20:09:06 +01:00
|
|
|
add_object_array(object, NULL, &shallows);
|
|
|
|
continue;
|
|
|
|
}
|
2007-02-20 10:54:00 +01:00
|
|
|
if (!prefixcmp(line, "deepen ")) {
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 20:09:29 +01:00
|
|
|
char *end;
|
2006-10-30 20:09:53 +01:00
|
|
|
use_thin_pack = 0;
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 20:09:29 +01:00
|
|
|
depth = strtol(line + 7, &end, 0);
|
|
|
|
if (end == line + 7 || depth <= 0)
|
|
|
|
die("Invalid deepen: %s", line);
|
|
|
|
continue;
|
|
|
|
}
|
2007-02-20 10:54:00 +01:00
|
|
|
if (prefixcmp(line, "want ") ||
|
2006-07-06 02:41:39 +02:00
|
|
|
get_sha1_hex(line+5, sha1_buf))
|
2005-10-05 23:49:54 +02:00
|
|
|
die("git-upload-pack: protocol error, "
|
|
|
|
"expected to get sha, not '%s'", line);
|
2005-10-28 04:49:16 +02:00
|
|
|
if (strstr(line+45, "multi_ack"))
|
|
|
|
multi_ack = 1;
|
2006-02-20 09:38:39 +01:00
|
|
|
if (strstr(line+45, "thin-pack"))
|
|
|
|
use_thin_pack = 1;
|
2006-09-26 17:27:39 +02:00
|
|
|
if (strstr(line+45, "ofs-delta"))
|
|
|
|
use_ofs_delta = 1;
|
2006-09-11 01:27:08 +02:00
|
|
|
if (strstr(line+45, "side-band-64k"))
|
|
|
|
use_sideband = LARGE_PACKET_MAX;
|
|
|
|
else if (strstr(line+45, "side-band"))
|
|
|
|
use_sideband = DEFAULT_PACKET_MAX;
|
2005-10-25 03:59:18 +02:00
|
|
|
|
|
|
|
/* We have sent all our refs already, and the other end
|
|
|
|
* should have chosen out of them; otherwise they are
|
|
|
|
* asking for nonsense.
|
|
|
|
*
|
|
|
|
* Hmph. We may later want to allow "want" line that
|
|
|
|
* asks for something like "master~10" (symbolic)...
|
|
|
|
* would it make sense? I don't know.
|
|
|
|
*/
|
|
|
|
o = lookup_object(sha1_buf);
|
|
|
|
if (!o || !(o->flags & OUR_REF))
|
|
|
|
die("git-upload-pack: not our ref %s", line+5);
|
|
|
|
if (!(o->flags & WANTED)) {
|
|
|
|
o->flags |= WANTED;
|
2006-07-06 03:00:02 +02:00
|
|
|
add_object_array(o, NULL, &want_obj);
|
2005-10-25 03:59:18 +02:00
|
|
|
}
|
2005-07-05 00:29:17 +02:00
|
|
|
}
|
2006-10-30 20:09:53 +01:00
|
|
|
if (depth == 0 && shallows.nr == 0)
|
|
|
|
return;
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 20:09:29 +01:00
|
|
|
if (depth > 0) {
|
|
|
|
struct commit_list *result, *backup;
|
2006-10-30 20:09:53 +01:00
|
|
|
int i;
|
|
|
|
backup = result = get_shallow_commits(&want_obj, depth,
|
|
|
|
SHALLOW, NOT_SHALLOW);
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 20:09:29 +01:00
|
|
|
while (result) {
|
2006-10-30 20:09:53 +01:00
|
|
|
struct object *object = &result->item->object;
|
2006-11-24 15:58:25 +01:00
|
|
|
if (!(object->flags & (CLIENT_SHALLOW|NOT_SHALLOW))) {
|
2006-10-30 20:09:53 +01:00
|
|
|
packet_write(1, "shallow %s",
|
|
|
|
sha1_to_hex(object->sha1));
|
|
|
|
register_shallow(object->sha1);
|
|
|
|
}
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 20:09:29 +01:00
|
|
|
result = result->next;
|
|
|
|
}
|
|
|
|
free_commit_list(backup);
|
2006-10-30 20:09:53 +01:00
|
|
|
for (i = 0; i < shallows.nr; i++) {
|
|
|
|
struct object *object = shallows.objects[i].item;
|
|
|
|
if (object->flags & NOT_SHALLOW) {
|
|
|
|
struct commit_list *parents;
|
|
|
|
packet_write(1, "unshallow %s",
|
|
|
|
sha1_to_hex(object->sha1));
|
|
|
|
object->flags &= ~CLIENT_SHALLOW;
|
|
|
|
/* make sure the real parents are parsed */
|
|
|
|
unregister_shallow(object->sha1);
|
2006-11-14 07:47:46 +01:00
|
|
|
object->parsed = 0;
|
2006-10-30 20:09:53 +01:00
|
|
|
parse_commit((struct commit *)object);
|
|
|
|
parents = ((struct commit *)object)->parents;
|
|
|
|
while (parents) {
|
|
|
|
add_object_array(&parents->item->object,
|
|
|
|
NULL, &want_obj);
|
|
|
|
parents = parents->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* make sure commit traversal conforms to client */
|
|
|
|
register_shallow(object->sha1);
|
|
|
|
}
|
|
|
|
packet_flush(1);
|
|
|
|
} else
|
|
|
|
if (shallows.nr > 0) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < shallows.nr; i++)
|
|
|
|
register_shallow(shallows.objects[i].item->sha1);
|
|
|
|
}
|
|
|
|
free(shallows.objects);
|
2005-07-05 00:29:17 +02:00
|
|
|
}
|
|
|
|
|
2006-09-21 07:02:01 +02:00
|
|
|
static int send_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
|
2005-07-04 22:26:53 +02:00
|
|
|
{
|
2006-10-30 20:09:06 +01:00
|
|
|
static const char *capabilities = "multi_ack thin-pack side-band"
|
|
|
|
" side-band-64k ofs-delta shallow";
|
2005-10-14 03:57:40 +02:00
|
|
|
struct object *o = parse_object(sha1);
|
|
|
|
|
2006-02-18 01:14:52 +01:00
|
|
|
if (!o)
|
|
|
|
die("git-upload-pack: cannot find object %s:", sha1_to_hex(sha1));
|
|
|
|
|
2005-10-28 05:56:41 +02:00
|
|
|
if (capabilities)
|
|
|
|
packet_write(1, "%s %s%c%s\n", sha1_to_hex(sha1), refname,
|
|
|
|
0, capabilities);
|
|
|
|
else
|
|
|
|
packet_write(1, "%s %s\n", sha1_to_hex(sha1), refname);
|
|
|
|
capabilities = NULL;
|
2005-10-25 03:59:18 +02:00
|
|
|
if (!(o->flags & OUR_REF)) {
|
|
|
|
o->flags |= OUR_REF;
|
|
|
|
nr_our_refs++;
|
|
|
|
}
|
2006-07-12 05:45:31 +02:00
|
|
|
if (o->type == OBJ_TAG) {
|
2005-11-03 00:19:13 +01:00
|
|
|
o = deref_tag(o, refname, 0);
|
2005-10-14 03:57:40 +02:00
|
|
|
packet_write(1, "%s %s^{}\n", sha1_to_hex(o->sha1), refname);
|
|
|
|
}
|
2005-07-04 22:26:53 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-08-14 22:40:51 +02:00
|
|
|
static void upload_pack(void)
|
2005-07-04 22:26:53 +02:00
|
|
|
{
|
2005-10-19 23:27:01 +02:00
|
|
|
reset_timeout();
|
2006-09-21 06:47:42 +02:00
|
|
|
head_ref(send_ref, NULL);
|
|
|
|
for_each_ref(send_ref, NULL);
|
2005-07-04 22:26:53 +02:00
|
|
|
packet_flush(1);
|
2006-07-06 03:00:02 +02:00
|
|
|
receive_needs();
|
2006-08-14 22:40:51 +02:00
|
|
|
if (want_obj.nr) {
|
|
|
|
get_common_commits();
|
|
|
|
create_pack_file();
|
|
|
|
}
|
2005-07-04 22:26:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int main(int argc, char **argv)
|
|
|
|
{
|
2005-11-17 20:37:14 +01:00
|
|
|
char *dir;
|
2005-10-19 23:27:01 +02:00
|
|
|
int i;
|
|
|
|
int strict = 0;
|
|
|
|
|
|
|
|
for (i = 1; i < argc; i++) {
|
|
|
|
char *arg = argv[i];
|
|
|
|
|
|
|
|
if (arg[0] != '-')
|
|
|
|
break;
|
|
|
|
if (!strcmp(arg, "--strict")) {
|
|
|
|
strict = 1;
|
|
|
|
continue;
|
|
|
|
}
|
Mechanical conversion to use prefixcmp()
This mechanically converts strncmp() to use prefixcmp(), but only when
the parameters match specific patterns, so that they can be verified
easily. Leftover from this will be fixed in a separate step, including
idiotic conversions like
if (!strncmp("foo", arg, 3))
=>
if (!(-prefixcmp(arg, "foo")))
This was done by using this script in px.perl
#!/usr/bin/perl -i.bak -p
if (/strncmp\(([^,]+), "([^\\"]*)", (\d+)\)/ && (length($2) == $3)) {
s|strncmp\(([^,]+), "([^\\"]*)", (\d+)\)|prefixcmp($1, "$2")|;
}
if (/strncmp\("([^\\"]*)", ([^,]+), (\d+)\)/ && (length($1) == $3)) {
s|strncmp\("([^\\"]*)", ([^,]+), (\d+)\)|(-prefixcmp($2, "$1"))|;
}
and running:
$ git grep -l strncmp -- '*.c' | xargs perl px.perl
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-20 10:53:29 +01:00
|
|
|
if (!prefixcmp(arg, "--timeout=")) {
|
2005-10-19 23:27:01 +02:00
|
|
|
timeout = atoi(arg+10);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp(arg, "--")) {
|
|
|
|
i++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i != argc-1)
|
2005-07-04 22:26:53 +02:00
|
|
|
usage(upload_pack_usage);
|
2005-10-19 23:27:01 +02:00
|
|
|
dir = argv[i];
|
2005-07-09 01:22:22 +02:00
|
|
|
|
2005-11-17 20:37:14 +01:00
|
|
|
if (!enter_repo(dir, strict))
|
|
|
|
die("'%s': unable to chdir or not a git archive", dir);
|
2007-01-22 07:23:58 +01:00
|
|
|
if (is_repository_shallow())
|
|
|
|
die("attempt to fetch/clone from a shallow repository");
|
2005-07-04 22:26:53 +02:00
|
|
|
upload_pack();
|
|
|
|
return 0;
|
|
|
|
}
|