Merge branch 'jk/fast-import-cleanup'
Code clean-up. * jk/fast-import-cleanup: pack.h: define largest possible encoded object size encode_in_pack_object_header: respect output buffer length fast-import: use xsnprintf for formatting headers fast-import: use xsnprintf for writing sha1s
This commit is contained in:
commit
53a0f9f7ad
@ -239,7 +239,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
|
||||
unsigned long limit, int usable_delta)
|
||||
{
|
||||
unsigned long size, datalen;
|
||||
unsigned char header[10], dheader[10];
|
||||
unsigned char header[MAX_PACK_OBJECT_HEADER],
|
||||
dheader[MAX_PACK_OBJECT_HEADER];
|
||||
unsigned hdrlen;
|
||||
enum object_type type;
|
||||
void *buf;
|
||||
@ -286,7 +287,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
|
||||
* The object header is a byte of 'type' followed by zero or
|
||||
* more bytes of length.
|
||||
*/
|
||||
hdrlen = encode_in_pack_object_header(type, size, header);
|
||||
hdrlen = encode_in_pack_object_header(header, sizeof(header),
|
||||
type, size);
|
||||
|
||||
if (type == OBJ_OFS_DELTA) {
|
||||
/*
|
||||
@ -352,13 +354,15 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
|
||||
off_t offset;
|
||||
enum object_type type = entry->type;
|
||||
off_t datalen;
|
||||
unsigned char header[10], dheader[10];
|
||||
unsigned char header[MAX_PACK_OBJECT_HEADER],
|
||||
dheader[MAX_PACK_OBJECT_HEADER];
|
||||
unsigned hdrlen;
|
||||
|
||||
if (entry->delta)
|
||||
type = (allow_ofs_delta && entry->delta->idx.offset) ?
|
||||
OBJ_OFS_DELTA : OBJ_REF_DELTA;
|
||||
hdrlen = encode_in_pack_object_header(type, entry->size, header);
|
||||
hdrlen = encode_in_pack_object_header(header, sizeof(header),
|
||||
type, entry->size);
|
||||
|
||||
offset = entry->in_pack_offset;
|
||||
revidx = find_pack_revindex(p, offset);
|
||||
|
@ -105,7 +105,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
|
||||
|
||||
git_deflate_init(&s, pack_compression_level);
|
||||
|
||||
hdrlen = encode_in_pack_object_header(type, size, obuf);
|
||||
hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size);
|
||||
s.next_out = obuf + hdrlen;
|
||||
s.avail_out = sizeof(obuf) - hdrlen;
|
||||
|
||||
|
@ -1173,7 +1173,8 @@ static int store_object(
|
||||
delta_count_by_type[type]++;
|
||||
e->depth = last->depth + 1;
|
||||
|
||||
hdrlen = encode_in_pack_object_header(OBJ_OFS_DELTA, deltalen, hdr);
|
||||
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
|
||||
OBJ_OFS_DELTA, deltalen);
|
||||
sha1write(pack_file, hdr, hdrlen);
|
||||
pack_size += hdrlen;
|
||||
|
||||
@ -1184,7 +1185,8 @@ static int store_object(
|
||||
pack_size += sizeof(hdr) - pos;
|
||||
} else {
|
||||
e->depth = 0;
|
||||
hdrlen = encode_in_pack_object_header(type, dat->len, hdr);
|
||||
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
|
||||
type, dat->len);
|
||||
sha1write(pack_file, hdr, hdrlen);
|
||||
pack_size += hdrlen;
|
||||
}
|
||||
@ -1237,9 +1239,7 @@ static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
|
||||
sha1file_checkpoint(pack_file, &checkpoint);
|
||||
offset = checkpoint.offset;
|
||||
|
||||
hdrlen = snprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
|
||||
if (out_sz <= hdrlen)
|
||||
die("impossibly large object header");
|
||||
hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
|
||||
|
||||
git_SHA1_Init(&c);
|
||||
git_SHA1_Update(&c, out_buf, hdrlen);
|
||||
@ -1248,9 +1248,7 @@ static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
|
||||
|
||||
git_deflate_init(&s, pack_compression_level);
|
||||
|
||||
hdrlen = encode_in_pack_object_header(OBJ_BLOB, len, out_buf);
|
||||
if (out_sz <= hdrlen)
|
||||
die("impossibly large object header");
|
||||
hdrlen = encode_in_pack_object_header(out_buf, out_sz, OBJ_BLOB, len);
|
||||
|
||||
s.next_out = out_buf + hdrlen;
|
||||
s.avail_out = out_sz - hdrlen;
|
||||
@ -3003,7 +3001,7 @@ static void parse_get_mark(const char *p)
|
||||
if (!oe)
|
||||
die("Unknown mark: %s", command_buf.buf);
|
||||
|
||||
snprintf(output, sizeof(output), "%s\n", sha1_to_hex(oe->idx.sha1));
|
||||
xsnprintf(output, sizeof(output), "%s\n", sha1_to_hex(oe->idx.sha1));
|
||||
cat_blob_write(output, 41);
|
||||
}
|
||||
|
||||
|
@ -304,7 +304,8 @@ char *index_pack_lockfile(int ip_out)
|
||||
* - each byte afterwards: low seven bits are size continuation,
|
||||
* with the high bit being "size continues"
|
||||
*/
|
||||
int encode_in_pack_object_header(enum object_type type, uintmax_t size, unsigned char *hdr)
|
||||
int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
|
||||
enum object_type type, uintmax_t size)
|
||||
{
|
||||
int n = 1;
|
||||
unsigned char c;
|
||||
@ -315,6 +316,8 @@ int encode_in_pack_object_header(enum object_type type, uintmax_t size, unsigned
|
||||
c = (type << 4) | (size & 15);
|
||||
size >>= 4;
|
||||
while (size) {
|
||||
if (n == hdr_len)
|
||||
die("object size is too enormous to format");
|
||||
*hdr++ = c | 0x80;
|
||||
c = size & 0x7f;
|
||||
size >>= 7;
|
||||
|
9
pack.h
9
pack.h
@ -84,7 +84,14 @@ extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uin
|
||||
extern off_t write_pack_header(struct sha1file *f, uint32_t);
|
||||
extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
|
||||
extern char *index_pack_lockfile(int fd);
|
||||
extern int encode_in_pack_object_header(enum object_type, uintmax_t, unsigned char *);
|
||||
|
||||
/*
|
||||
* The "hdr" output buffer should be at least this big, which will handle sizes
|
||||
* up to 2^67.
|
||||
*/
|
||||
#define MAX_PACK_OBJECT_HEADER 10
|
||||
extern int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
|
||||
enum object_type, uintmax_t);
|
||||
|
||||
#define PH_ERROR_EOF (-1)
|
||||
#define PH_ERROR_PACK_SIGNATURE (-2)
|
||||
|
Loading…
Reference in New Issue
Block a user