2006-09-07 15:12:05 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2006 Franck Bui-Huu
|
|
|
|
*/
|
|
|
|
#include "cache.h"
|
|
|
|
#include "builtin.h"
|
|
|
|
#include "archive.h"
|
|
|
|
#include "pkt-line.h"
|
2006-09-10 12:33:34 +02:00
|
|
|
#include "sideband.h"
|
2011-11-19 08:40:04 +01:00
|
|
|
#include "run-command.h"
|
upload-archive: use argv_array to store client arguments
The current parsing scheme for upload-archive is to pack
arguments into a fixed-size buffer, separated by NULs, and
put a pointer to each argument in the buffer into a
fixed-size argv array.
This works fine, and the limits are high enough that nobody
reasonable is going to hit them, but it makes the code hard
to follow. Instead, let's just stuff the arguments into an
argv_array, which is much simpler. That lifts the "all
arguments must fit inside 4K together" limit.
We could also trivially lift the MAX_ARGS limitation (in
fact, we have to keep extra code to enforce it). But that
would mean a client could force us to allocate an arbitrary
amount of memory simply by sending us "argument" lines. By
limiting the MAX_ARGS, we limit an attacker to about 4
megabytes (64 times a maximum 64K packet buffer). That may
sound like a lot compared to the 4K limit, but it's not a
big deal compared to what git-archive will actually allocate
while working (e.g., to load blobs into memory). The
important thing is that it is bounded.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:01:26 +01:00
|
|
|
#include "argv-array.h"
|
2006-09-07 15:12:05 +02:00
|
|
|
|
|
|
|
static const char upload_archive_usage[] =
|
2008-07-13 15:36:15 +02:00
|
|
|
"git upload-archive <repo>";
|
2006-09-07 15:12:05 +02:00
|
|
|
|
2006-09-10 12:33:34 +02:00
|
|
|
static const char deadchild[] =
|
2008-07-13 15:36:15 +02:00
|
|
|
"git upload-archive: archiver died with error";
|
2006-09-07 15:12:05 +02:00
|
|
|
|
2008-07-25 12:41:23 +02:00
|
|
|
#define MAX_ARGS (64)
|
2006-09-10 12:33:34 +02:00
|
|
|
|
2011-11-19 08:40:04 +01:00
|
|
|
int cmd_upload_archive_writer(int argc, const char **argv, const char *prefix)
|
2006-09-07 15:12:05 +02:00
|
|
|
{
|
upload-archive: use argv_array to store client arguments
The current parsing scheme for upload-archive is to pack
arguments into a fixed-size buffer, separated by NULs, and
put a pointer to each argument in the buffer into a
fixed-size argv array.
This works fine, and the limits are high enough that nobody
reasonable is going to hit them, but it makes the code hard
to follow. Instead, let's just stuff the arguments into an
argv_array, which is much simpler. That lifts the "all
arguments must fit inside 4K together" limit.
We could also trivially lift the MAX_ARGS limitation (in
fact, we have to keep extra code to enforce it). But that
would mean a client could force us to allocate an arbitrary
amount of memory simply by sending us "argument" lines. By
limiting the MAX_ARGS, we limit an attacker to about 4
megabytes (64 times a maximum 64K packet buffer). That may
sound like a lot compared to the 4K limit, but it's not a
big deal compared to what git-archive will actually allocate
while working (e.g., to load blobs into memory). The
important thing is that it is bounded.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:01:26 +01:00
|
|
|
struct argv_array sent_argv = ARGV_ARRAY_INIT;
|
2006-09-07 15:12:05 +02:00
|
|
|
const char *arg_cmd = "argument ";
|
|
|
|
|
2017-05-30 07:13:43 +02:00
|
|
|
if (argc != 2 || !strcmp(argv[1], "-h"))
|
2011-11-16 00:39:33 +01:00
|
|
|
usage(upload_archive_usage);
|
|
|
|
|
2013-02-20 21:00:59 +01:00
|
|
|
if (!enter_repo(argv[1], 0))
|
|
|
|
die("'%s' does not appear to be a git repository", argv[1]);
|
2011-11-16 00:39:33 +01:00
|
|
|
|
2006-09-07 15:12:05 +02:00
|
|
|
/* put received options in sent_argv[] */
|
upload-archive: use argv_array to store client arguments
The current parsing scheme for upload-archive is to pack
arguments into a fixed-size buffer, separated by NULs, and
put a pointer to each argument in the buffer into a
fixed-size argv array.
This works fine, and the limits are high enough that nobody
reasonable is going to hit them, but it makes the code hard
to follow. Instead, let's just stuff the arguments into an
argv_array, which is much simpler. That lifts the "all
arguments must fit inside 4K together" limit.
We could also trivially lift the MAX_ARGS limitation (in
fact, we have to keep extra code to enforce it). But that
would mean a client could force us to allocate an arbitrary
amount of memory simply by sending us "argument" lines. By
limiting the MAX_ARGS, we limit an attacker to about 4
megabytes (64 times a maximum 64K packet buffer). That may
sound like a lot compared to the 4K limit, but it's not a
big deal compared to what git-archive will actually allocate
while working (e.g., to load blobs into memory). The
important thing is that it is bounded.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:01:26 +01:00
|
|
|
argv_array_push(&sent_argv, "git-upload-archive");
|
|
|
|
for (;;) {
|
pkt-line: provide a LARGE_PACKET_MAX static buffer
Most of the callers of packet_read_line just read into a
static 1000-byte buffer (callers which handle arbitrary
binary data already use LARGE_PACKET_MAX). This works fine
in practice, because:
1. The only variable-sized data in these lines is a ref
name, and refs tend to be a lot shorter than 1000
characters.
2. When sending ref lines, git-core always limits itself
to 1000 byte packets.
However, the only limit given in the protocol specification
in Documentation/technical/protocol-common.txt is
LARGE_PACKET_MAX; the 1000 byte limit is mentioned only in
pack-protocol.txt, and then only describing what we write,
not as a specific limit for readers.
This patch lets us bump the 1000-byte limit to
LARGE_PACKET_MAX. Even though git-core will never write a
packet where this makes a difference, there are two good
reasons to do this:
1. Other git implementations may have followed
protocol-common.txt and used a larger maximum size. We
don't bump into it in practice because it would involve
very long ref names.
2. We may want to increase the 1000-byte limit one day.
Since packets are transferred before any capabilities,
it's difficult to do this in a backwards-compatible
way. But if we bump the size of buffer the readers can
handle, eventually older versions of git will be
obsolete enough that we can justify bumping the
writers, as well. We don't have plans to do this
anytime soon, but there is no reason not to start the
clock ticking now.
Just bumping all of the reading bufs to LARGE_PACKET_MAX
would waste memory. Instead, since most readers just read
into a temporary buffer anyway, let's provide a single
static buffer that all callers can use. We can further wrap
this detail away by having the packet_read_line wrapper just
use the buffer transparently and return a pointer to the
static storage. That covers most of the cases, and the
remaining ones already read into their own LARGE_PACKET_MAX
buffers.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:02:57 +01:00
|
|
|
char *buf = packet_read_line(0, NULL);
|
|
|
|
if (!buf)
|
2006-09-07 15:12:05 +02:00
|
|
|
break; /* got a flush */
|
upload-archive: use argv_array to store client arguments
The current parsing scheme for upload-archive is to pack
arguments into a fixed-size buffer, separated by NULs, and
put a pointer to each argument in the buffer into a
fixed-size argv array.
This works fine, and the limits are high enough that nobody
reasonable is going to hit them, but it makes the code hard
to follow. Instead, let's just stuff the arguments into an
argv_array, which is much simpler. That lifts the "all
arguments must fit inside 4K together" limit.
We could also trivially lift the MAX_ARGS limitation (in
fact, we have to keep extra code to enforce it). But that
would mean a client could force us to allocate an arbitrary
amount of memory simply by sending us "argument" lines. By
limiting the MAX_ARGS, we limit an attacker to about 4
megabytes (64 times a maximum 64K packet buffer). That may
sound like a lot compared to the 4K limit, but it's not a
big deal compared to what git-archive will actually allocate
while working (e.g., to load blobs into memory). The
important thing is that it is bounded.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:01:26 +01:00
|
|
|
if (sent_argv.argc > MAX_ARGS)
|
|
|
|
die("Too many options (>%d)", MAX_ARGS - 1);
|
2006-09-07 15:12:05 +02:00
|
|
|
|
2013-11-30 21:55:40 +01:00
|
|
|
if (!starts_with(buf, arg_cmd))
|
upload-archive: use argv_array to store client arguments
The current parsing scheme for upload-archive is to pack
arguments into a fixed-size buffer, separated by NULs, and
put a pointer to each argument in the buffer into a
fixed-size argv array.
This works fine, and the limits are high enough that nobody
reasonable is going to hit them, but it makes the code hard
to follow. Instead, let's just stuff the arguments into an
argv_array, which is much simpler. That lifts the "all
arguments must fit inside 4K together" limit.
We could also trivially lift the MAX_ARGS limitation (in
fact, we have to keep extra code to enforce it). But that
would mean a client could force us to allocate an arbitrary
amount of memory simply by sending us "argument" lines. By
limiting the MAX_ARGS, we limit an attacker to about 4
megabytes (64 times a maximum 64K packet buffer). That may
sound like a lot compared to the 4K limit, but it's not a
big deal compared to what git-archive will actually allocate
while working (e.g., to load blobs into memory). The
important thing is that it is bounded.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-02-20 21:01:26 +01:00
|
|
|
die("'argument' token or flush expected");
|
|
|
|
argv_array_push(&sent_argv, buf + strlen(arg_cmd));
|
2006-09-07 15:12:05 +02:00
|
|
|
}
|
2011-11-16 00:39:33 +01:00
|
|
|
|
|
|
|
/* parse all options sent by the client */
|
2018-08-13 18:14:35 +02:00
|
|
|
return write_archive(sent_argv.argc, sent_argv.argv, prefix,
|
|
|
|
the_repository, NULL, 1);
|
2006-09-10 12:33:34 +02:00
|
|
|
}
|
|
|
|
|
2009-11-14 22:33:13 +01:00
|
|
|
__attribute__((format (printf, 1, 2)))
|
2006-09-12 09:26:57 +02:00
|
|
|
static void error_clnt(const char *fmt, ...)
|
|
|
|
{
|
2015-09-24 23:07:25 +02:00
|
|
|
struct strbuf buf = STRBUF_INIT;
|
2006-09-12 09:26:57 +02:00
|
|
|
va_list params;
|
|
|
|
|
|
|
|
va_start(params, fmt);
|
2015-09-24 23:07:25 +02:00
|
|
|
strbuf_vaddf(&buf, fmt, params);
|
2006-09-12 09:26:57 +02:00
|
|
|
va_end(params);
|
2015-09-24 23:07:25 +02:00
|
|
|
send_sideband(1, 3, buf.buf, buf.len, LARGE_PACKET_MAX);
|
|
|
|
die("sent error to the client: %s", buf.buf);
|
2006-09-12 09:26:57 +02:00
|
|
|
}
|
|
|
|
|
2009-06-17 12:11:10 +02:00
|
|
|
static ssize_t process_input(int child_fd, int band)
|
2006-09-12 09:26:57 +02:00
|
|
|
{
|
|
|
|
char buf[16384];
|
|
|
|
ssize_t sz = read(child_fd, buf, sizeof(buf));
|
|
|
|
if (sz < 0) {
|
2007-01-08 16:58:08 +01:00
|
|
|
if (errno != EAGAIN && errno != EINTR)
|
2006-09-12 09:26:57 +02:00
|
|
|
error_clnt("read error: %s\n", strerror(errno));
|
2009-06-17 12:11:10 +02:00
|
|
|
return sz;
|
2006-09-12 09:26:57 +02:00
|
|
|
}
|
|
|
|
send_sideband(1, band, buf, sz, LARGE_PACKET_MAX);
|
2009-06-17 12:11:10 +02:00
|
|
|
return sz;
|
2006-09-12 09:26:57 +02:00
|
|
|
}
|
|
|
|
|
2006-09-10 12:33:34 +02:00
|
|
|
int cmd_upload_archive(int argc, const char **argv, const char *prefix)
|
|
|
|
{
|
2011-11-19 08:40:04 +01:00
|
|
|
struct child_process writer = { argv };
|
|
|
|
|
2017-05-30 07:13:43 +02:00
|
|
|
if (argc == 2 && !strcmp(argv[1], "-h"))
|
|
|
|
usage(upload_archive_usage);
|
|
|
|
|
2011-11-16 00:39:33 +01:00
|
|
|
/*
|
|
|
|
* Set up sideband subprocess.
|
|
|
|
*
|
|
|
|
* We (parent) monitor and read from child, sending its fd#1 and fd#2
|
|
|
|
* multiplexed out to our fd#1. If the child dies, we tell the other
|
|
|
|
* end over channel #3.
|
|
|
|
*/
|
2011-11-19 08:40:04 +01:00
|
|
|
argv[0] = "upload-archive--writer";
|
|
|
|
writer.out = writer.err = -1;
|
|
|
|
writer.git_cmd = 1;
|
|
|
|
if (start_command(&writer)) {
|
2006-09-10 12:33:34 +02:00
|
|
|
int err = errno;
|
2016-10-17 01:20:29 +02:00
|
|
|
packet_write_fmt(1, "NACK unable to spawn subprocess\n");
|
2006-09-10 12:33:34 +02:00
|
|
|
die("upload-archive: %s", strerror(err));
|
|
|
|
}
|
|
|
|
|
2016-10-17 01:20:29 +02:00
|
|
|
packet_write_fmt(1, "ACK\n");
|
2006-09-07 15:12:05 +02:00
|
|
|
packet_flush(1);
|
|
|
|
|
2006-09-10 12:33:34 +02:00
|
|
|
while (1) {
|
|
|
|
struct pollfd pfd[2];
|
|
|
|
|
2011-11-19 08:40:04 +01:00
|
|
|
pfd[0].fd = writer.out;
|
2006-09-10 12:33:34 +02:00
|
|
|
pfd[0].events = POLLIN;
|
2011-11-19 08:40:04 +01:00
|
|
|
pfd[1].fd = writer.err;
|
2006-09-10 12:33:34 +02:00
|
|
|
pfd[1].events = POLLIN;
|
|
|
|
if (poll(pfd, 2, -1) < 0) {
|
|
|
|
if (errno != EINTR) {
|
2016-05-08 11:47:33 +02:00
|
|
|
error_errno("poll failed resuming");
|
2006-09-10 12:33:34 +02:00
|
|
|
sleep(1);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2006-09-12 09:26:57 +02:00
|
|
|
if (pfd[1].revents & POLLIN)
|
2006-09-10 12:33:34 +02:00
|
|
|
/* Status stream ready */
|
2009-11-11 23:24:42 +01:00
|
|
|
if (process_input(pfd[1].fd, 2))
|
|
|
|
continue;
|
|
|
|
if (pfd[0].revents & POLLIN)
|
|
|
|
/* Data stream ready */
|
|
|
|
if (process_input(pfd[0].fd, 1))
|
|
|
|
continue;
|
2006-09-12 09:26:57 +02:00
|
|
|
|
2011-11-19 08:40:04 +01:00
|
|
|
if (finish_command(&writer))
|
2006-09-12 09:26:57 +02:00
|
|
|
error_clnt("%s", deadchild);
|
2006-09-10 12:33:34 +02:00
|
|
|
packet_flush(1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|