2007-05-02 18:13:14 +02:00
|
|
|
#include "cache.h"
|
|
|
|
#include "pack.h"
|
2007-06-01 21:18:05 +02:00
|
|
|
#include "csum-file.h"
|
|
|
|
|
2011-02-26 00:43:25 +01:00
|
|
|
void reset_pack_idx_option(struct pack_idx_option *opts)
|
|
|
|
{
|
|
|
|
memset(opts, 0, sizeof(*opts));
|
|
|
|
opts->version = 2;
|
|
|
|
opts->off32_limit = 0x7fffffff;
|
|
|
|
}
|
2007-06-01 21:18:05 +02:00
|
|
|
|
|
|
|
static int sha1_compare(const void *_a, const void *_b)
|
|
|
|
{
|
|
|
|
struct pack_idx_entry *a = *(struct pack_idx_entry **)_a;
|
|
|
|
struct pack_idx_entry *b = *(struct pack_idx_entry **)_b;
|
|
|
|
return hashcmp(a->sha1, b->sha1);
|
|
|
|
}
|
|
|
|
|
2011-02-26 01:55:26 +01:00
|
|
|
static int cmp_uint32(const void *a_, const void *b_)
|
|
|
|
{
|
|
|
|
uint32_t a = *((uint32_t *)a_);
|
|
|
|
uint32_t b = *((uint32_t *)b_);
|
|
|
|
|
|
|
|
return (a < b) ? -1 : (a != b);
|
|
|
|
}
|
|
|
|
|
2011-02-26 01:54:00 +01:00
|
|
|
static int need_large_offset(off_t offset, const struct pack_idx_option *opts)
|
|
|
|
{
|
2011-02-26 01:55:26 +01:00
|
|
|
uint32_t ofsval;
|
|
|
|
|
|
|
|
if ((offset >> 31) || (opts->off32_limit < offset))
|
|
|
|
return 1;
|
|
|
|
if (!opts->anomaly_nr)
|
|
|
|
return 0;
|
|
|
|
ofsval = offset;
|
|
|
|
return !!bsearch(&ofsval, opts->anomaly, opts->anomaly_nr,
|
|
|
|
sizeof(ofsval), cmp_uint32);
|
2011-02-26 01:54:00 +01:00
|
|
|
}
|
|
|
|
|
2007-06-01 21:18:05 +02:00
|
|
|
/*
|
|
|
|
* On entry *sha1 contains the pack content SHA1 hash, on exit it is
|
|
|
|
* the SHA1 hash of sorted object names. The objects array passed in
|
|
|
|
* will be sorted by SHA1 on exit.
|
|
|
|
*/
|
2010-01-22 16:55:19 +01:00
|
|
|
const char *write_idx_file(const char *index_name, struct pack_idx_entry **objects,
|
2011-02-26 00:43:25 +01:00
|
|
|
int nr_objects, const struct pack_idx_option *opts,
|
pack-objects: name pack files after trailer hash
Our current scheme for naming packfiles is to calculate the
sha1 hash of the sorted list of objects contained in the
packfile. This gives us a unique name, so we are reasonably
sure that two packs with the same name will contain the same
objects.
It does not, however, tell us that two such packs have the
exact same bytes. This makes things awkward if we repack the
same set of objects. Due to run-to-run variations, the bytes
may not be identical (e.g., changed zlib or git versions,
different source object reuse due to new packs in the
repository, or even different deltas due to races during a
multi-threaded delta search).
In theory, this could be helpful to a program that cares
that the packfile contains a certain set of objects, but
does not care about the particular representation. In
practice, no part of git makes use of that, and in many
cases it is potentially harmful. For example, if a dumb http
client fetches the .idx file, it must be sure to get the
exact .pack that matches it. Similarly, a partial transfer
of a .pack file cannot be safely resumed, as the actual
bytes may have changed. This could also affect a local
client which opened the .idx and .pack files, closes the
.pack file (due to memory or file descriptor limits), and
then re-opens a changed packfile.
In all of these cases, git can detect the problem, as we
have the sha1 of the bytes themselves in the pack trailer
(which we verify on transfer), and the .idx file references
the trailer from the matching packfile. But it would be
simpler and more efficient to actually get the correct
bytes, rather than noticing the problem and having to
restart the operation.
This patch simply uses the pack trailer sha1 as the pack
name. It should be similarly unique, but covers the exact
representation of the objects. Other parts of git should not
care, as the pack name is returned by pack-objects and is
essentially opaque.
One test needs to be updated, because it actually corrupts a
pack and expects that re-packing the corrupted bytes will
use the same name. It won't anymore, but we can easily just
use the name that pack-objects hands back.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-12-05 21:28:07 +01:00
|
|
|
const unsigned char *sha1)
|
2007-06-01 21:18:05 +02:00
|
|
|
{
|
|
|
|
struct sha1file *f;
|
|
|
|
struct pack_idx_entry **sorted_by_sha, **list, **last;
|
|
|
|
off_t last_obj_offset = 0;
|
|
|
|
uint32_t array[256];
|
|
|
|
int i, fd;
|
|
|
|
uint32_t index_version;
|
|
|
|
|
|
|
|
if (nr_objects) {
|
|
|
|
sorted_by_sha = objects;
|
|
|
|
list = sorted_by_sha;
|
|
|
|
last = sorted_by_sha + nr_objects;
|
|
|
|
for (i = 0; i < nr_objects; ++i) {
|
|
|
|
if (objects[i]->offset > last_obj_offset)
|
|
|
|
last_obj_offset = objects[i]->offset;
|
|
|
|
}
|
|
|
|
qsort(sorted_by_sha, nr_objects, sizeof(sorted_by_sha[0]),
|
|
|
|
sha1_compare);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
sorted_by_sha = list = last = NULL;
|
|
|
|
|
2011-02-03 02:29:01 +01:00
|
|
|
if (opts->flags & WRITE_IDX_VERIFY) {
|
|
|
|
assert(index_name);
|
|
|
|
f = sha1fd_check(index_name);
|
2007-06-01 21:18:05 +02:00
|
|
|
} else {
|
2011-02-03 02:29:01 +01:00
|
|
|
if (!index_name) {
|
2011-12-21 02:18:21 +01:00
|
|
|
static char tmp_file[PATH_MAX];
|
|
|
|
fd = odb_mkstemp(tmp_file, sizeof(tmp_file), "pack/tmp_idx_XXXXXX");
|
|
|
|
index_name = xstrdup(tmp_file);
|
2011-02-03 02:29:01 +01:00
|
|
|
} else {
|
|
|
|
unlink(index_name);
|
|
|
|
fd = open(index_name, O_CREAT|O_EXCL|O_WRONLY, 0600);
|
|
|
|
}
|
|
|
|
if (fd < 0)
|
|
|
|
die_errno("unable to create '%s'", index_name);
|
|
|
|
f = sha1fd(fd, index_name);
|
2007-06-01 21:18:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* if last object's offset is >= 2^31 we should use index V2 */
|
2011-02-26 01:54:00 +01:00
|
|
|
index_version = need_large_offset(last_obj_offset, opts) ? 2 : opts->version;
|
2007-06-01 21:18:05 +02:00
|
|
|
|
|
|
|
/* index versions 2 and above need a header */
|
|
|
|
if (index_version >= 2) {
|
|
|
|
struct pack_idx_header hdr;
|
|
|
|
hdr.idx_signature = htonl(PACK_IDX_SIGNATURE);
|
|
|
|
hdr.idx_version = htonl(index_version);
|
|
|
|
sha1write(f, &hdr, sizeof(hdr));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write the first-level table (the list is sorted,
|
|
|
|
* but we use a 256-entry lookup to be able to avoid
|
|
|
|
* having to do eight extra binary search iterations).
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 256; i++) {
|
|
|
|
struct pack_idx_entry **next = list;
|
|
|
|
while (next < last) {
|
|
|
|
struct pack_idx_entry *obj = *next;
|
|
|
|
if (obj->sha1[0] != i)
|
|
|
|
break;
|
|
|
|
next++;
|
|
|
|
}
|
|
|
|
array[i] = htonl(next - sorted_by_sha);
|
|
|
|
list = next;
|
|
|
|
}
|
|
|
|
sha1write(f, array, 256 * 4);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write the actual SHA1 entries..
|
|
|
|
*/
|
|
|
|
list = sorted_by_sha;
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct pack_idx_entry *obj = *list++;
|
|
|
|
if (index_version < 2) {
|
|
|
|
uint32_t offset = htonl(obj->offset);
|
|
|
|
sha1write(f, &offset, 4);
|
|
|
|
}
|
|
|
|
sha1write(f, obj->sha1, 20);
|
2011-11-17 07:04:13 +01:00
|
|
|
if ((opts->flags & WRITE_IDX_STRICT) &&
|
|
|
|
(i && !hashcmp(list[-2]->sha1, obj->sha1)))
|
|
|
|
die("The same object %s appears twice in the pack",
|
|
|
|
sha1_to_hex(obj->sha1));
|
2007-06-01 21:18:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (index_version >= 2) {
|
|
|
|
unsigned int nr_large_offset = 0;
|
|
|
|
|
|
|
|
/* write the crc32 table */
|
|
|
|
list = sorted_by_sha;
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct pack_idx_entry *obj = *list++;
|
|
|
|
uint32_t crc32_val = htonl(obj->crc32);
|
|
|
|
sha1write(f, &crc32_val, 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* write the 32-bit offset table */
|
|
|
|
list = sorted_by_sha;
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct pack_idx_entry *obj = *list++;
|
2011-02-26 01:54:00 +01:00
|
|
|
uint32_t offset;
|
|
|
|
|
|
|
|
offset = (need_large_offset(obj->offset, opts)
|
|
|
|
? (0x80000000 | nr_large_offset++)
|
|
|
|
: obj->offset);
|
2007-06-01 21:18:05 +02:00
|
|
|
offset = htonl(offset);
|
|
|
|
sha1write(f, &offset, 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* write the large offset table */
|
|
|
|
list = sorted_by_sha;
|
|
|
|
while (nr_large_offset) {
|
|
|
|
struct pack_idx_entry *obj = *list++;
|
|
|
|
uint64_t offset = obj->offset;
|
2011-02-26 01:54:00 +01:00
|
|
|
uint32_t split[2];
|
|
|
|
|
|
|
|
if (!need_large_offset(offset, opts))
|
|
|
|
continue;
|
|
|
|
split[0] = htonl(offset >> 32);
|
|
|
|
split[1] = htonl(offset & 0xffffffff);
|
|
|
|
sha1write(f, split, 8);
|
|
|
|
nr_large_offset--;
|
2007-06-01 21:18:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sha1write(f, sha1, 20);
|
2011-02-03 02:29:01 +01:00
|
|
|
sha1close(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
|
|
|
|
? CSUM_CLOSE : CSUM_FSYNC));
|
2007-06-01 21:18:05 +02:00
|
|
|
return index_name;
|
|
|
|
}
|
2007-05-02 18:13:14 +02:00
|
|
|
|
2011-10-28 20:40:48 +02:00
|
|
|
off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
|
|
|
|
{
|
|
|
|
struct pack_header hdr;
|
|
|
|
|
|
|
|
hdr.hdr_signature = htonl(PACK_SIGNATURE);
|
|
|
|
hdr.hdr_version = htonl(PACK_VERSION);
|
|
|
|
hdr.hdr_entries = htonl(nr_entries);
|
2013-12-21 15:13:25 +01:00
|
|
|
sha1write(f, &hdr, sizeof(hdr));
|
2011-10-28 20:40:48 +02:00
|
|
|
return sizeof(hdr);
|
|
|
|
}
|
|
|
|
|
2008-08-29 22:07:59 +02:00
|
|
|
/*
|
|
|
|
* Update pack header with object_count and compute new SHA1 for pack data
|
|
|
|
* associated to pack_fd, and write that SHA1 at the end. That new SHA1
|
|
|
|
* is also returned in new_pack_sha1.
|
|
|
|
*
|
|
|
|
* If partial_pack_sha1 is non null, then the SHA1 of the existing pack
|
|
|
|
* (without the header update) is computed and validated against the
|
|
|
|
* one provided in partial_pack_sha1. The validation is performed at
|
|
|
|
* partial_pack_offset bytes in the pack file. The SHA1 of the remaining
|
|
|
|
* data (i.e. from partial_pack_offset to the end) is then computed and
|
|
|
|
* returned in partial_pack_sha1.
|
|
|
|
*
|
|
|
|
* Note that new_pack_sha1 is updated last, so both new_pack_sha1 and
|
|
|
|
* partial_pack_sha1 can refer to the same buffer if the caller is not
|
|
|
|
* interested in the resulting SHA1 of pack data above partial_pack_offset.
|
|
|
|
*/
|
2007-05-02 18:13:14 +02:00
|
|
|
void fixup_pack_header_footer(int pack_fd,
|
2008-08-29 22:07:59 +02:00
|
|
|
unsigned char *new_pack_sha1,
|
2007-05-02 18:13:14 +02:00
|
|
|
const char *pack_name,
|
2008-08-29 22:07:59 +02:00
|
|
|
uint32_t object_count,
|
|
|
|
unsigned char *partial_pack_sha1,
|
|
|
|
off_t partial_pack_offset)
|
2007-05-02 18:13:14 +02:00
|
|
|
{
|
2008-08-29 22:08:02 +02:00
|
|
|
int aligned_sz, buf_sz = 8 * 1024;
|
2008-10-01 20:05:20 +02:00
|
|
|
git_SHA_CTX old_sha1_ctx, new_sha1_ctx;
|
2007-05-02 18:13:14 +02:00
|
|
|
struct pack_header hdr;
|
|
|
|
char *buf;
|
|
|
|
|
2008-10-01 20:05:20 +02:00
|
|
|
git_SHA1_Init(&old_sha1_ctx);
|
|
|
|
git_SHA1_Init(&new_sha1_ctx);
|
2008-08-29 22:07:59 +02:00
|
|
|
|
2007-05-02 18:13:14 +02:00
|
|
|
if (lseek(pack_fd, 0, SEEK_SET) != 0)
|
2009-06-27 17:58:46 +02:00
|
|
|
die_errno("Failed seeking to start of '%s'", pack_name);
|
2007-05-02 18:13:14 +02:00
|
|
|
if (read_in_full(pack_fd, &hdr, sizeof(hdr)) != sizeof(hdr))
|
2009-06-27 17:58:46 +02:00
|
|
|
die_errno("Unable to reread header of '%s'", pack_name);
|
2007-05-02 18:13:14 +02:00
|
|
|
if (lseek(pack_fd, 0, SEEK_SET) != 0)
|
2009-06-27 17:58:46 +02:00
|
|
|
die_errno("Failed seeking to start of '%s'", pack_name);
|
2008-10-01 20:05:20 +02:00
|
|
|
git_SHA1_Update(&old_sha1_ctx, &hdr, sizeof(hdr));
|
2007-05-02 18:13:14 +02:00
|
|
|
hdr.hdr_entries = htonl(object_count);
|
2008-10-01 20:05:20 +02:00
|
|
|
git_SHA1_Update(&new_sha1_ctx, &hdr, sizeof(hdr));
|
2007-05-02 18:13:14 +02:00
|
|
|
write_or_die(pack_fd, &hdr, sizeof(hdr));
|
2008-08-29 22:07:59 +02:00
|
|
|
partial_pack_offset -= sizeof(hdr);
|
2007-05-02 18:13:14 +02:00
|
|
|
|
|
|
|
buf = xmalloc(buf_sz);
|
2008-08-29 22:08:02 +02:00
|
|
|
aligned_sz = buf_sz - sizeof(hdr);
|
2007-05-02 18:13:14 +02:00
|
|
|
for (;;) {
|
2008-08-29 22:07:59 +02:00
|
|
|
ssize_t m, n;
|
2008-08-29 22:08:02 +02:00
|
|
|
m = (partial_pack_sha1 && partial_pack_offset < aligned_sz) ?
|
|
|
|
partial_pack_offset : aligned_sz;
|
2008-08-29 22:07:59 +02:00
|
|
|
n = xread(pack_fd, buf, m);
|
2007-05-02 18:13:14 +02:00
|
|
|
if (!n)
|
|
|
|
break;
|
|
|
|
if (n < 0)
|
2009-06-27 17:58:46 +02:00
|
|
|
die_errno("Failed to checksum '%s'", pack_name);
|
2008-10-01 20:05:20 +02:00
|
|
|
git_SHA1_Update(&new_sha1_ctx, buf, n);
|
2008-08-29 22:07:59 +02:00
|
|
|
|
2008-08-29 22:08:02 +02:00
|
|
|
aligned_sz -= n;
|
|
|
|
if (!aligned_sz)
|
|
|
|
aligned_sz = buf_sz;
|
|
|
|
|
2008-08-29 22:07:59 +02:00
|
|
|
if (!partial_pack_sha1)
|
|
|
|
continue;
|
|
|
|
|
2008-10-01 20:05:20 +02:00
|
|
|
git_SHA1_Update(&old_sha1_ctx, buf, n);
|
2008-08-29 22:07:59 +02:00
|
|
|
partial_pack_offset -= n;
|
|
|
|
if (partial_pack_offset == 0) {
|
|
|
|
unsigned char sha1[20];
|
2008-10-01 20:05:20 +02:00
|
|
|
git_SHA1_Final(sha1, &old_sha1_ctx);
|
2008-08-29 22:07:59 +02:00
|
|
|
if (hashcmp(sha1, partial_pack_sha1) != 0)
|
|
|
|
die("Unexpected checksum for %s "
|
|
|
|
"(disk corruption?)", pack_name);
|
|
|
|
/*
|
|
|
|
* Now let's compute the SHA1 of the remainder of the
|
|
|
|
* pack, which also means making partial_pack_offset
|
|
|
|
* big enough not to matter anymore.
|
|
|
|
*/
|
2008-10-01 20:05:20 +02:00
|
|
|
git_SHA1_Init(&old_sha1_ctx);
|
2008-08-29 22:07:59 +02:00
|
|
|
partial_pack_offset = ~partial_pack_offset;
|
|
|
|
partial_pack_offset -= MSB(partial_pack_offset, 1);
|
|
|
|
}
|
2007-05-02 18:13:14 +02:00
|
|
|
}
|
|
|
|
free(buf);
|
|
|
|
|
2008-08-29 22:07:59 +02:00
|
|
|
if (partial_pack_sha1)
|
2008-10-01 20:05:20 +02:00
|
|
|
git_SHA1_Final(partial_pack_sha1, &old_sha1_ctx);
|
|
|
|
git_SHA1_Final(new_pack_sha1, &new_sha1_ctx);
|
2008-08-29 22:07:59 +02:00
|
|
|
write_or_die(pack_fd, new_pack_sha1, 20);
|
2008-08-27 21:48:00 +02:00
|
|
|
fsync_or_die(pack_fd, pack_name);
|
2007-05-02 18:13:14 +02:00
|
|
|
}
|
2007-09-14 09:31:16 +02:00
|
|
|
|
|
|
|
char *index_pack_lockfile(int ip_out)
|
|
|
|
{
|
|
|
|
char packname[46];
|
|
|
|
|
|
|
|
/*
|
2009-02-25 08:11:29 +01:00
|
|
|
* The first thing we expect from index-pack's output
|
2007-09-14 09:31:16 +02:00
|
|
|
* is "pack\t%40s\n" or "keep\t%40s\n" (46 bytes) where
|
|
|
|
* %40s is the newly created pack SHA1 name. In the "keep"
|
|
|
|
* case, we need it to remove the corresponding .keep file
|
|
|
|
* later on. If we don't get that then tough luck with it.
|
|
|
|
*/
|
2008-05-03 15:27:26 +02:00
|
|
|
if (read_in_full(ip_out, packname, 46) == 46 && packname[45] == '\n' &&
|
|
|
|
memcmp(packname, "keep\t", 5) == 0) {
|
2007-09-14 09:31:16 +02:00
|
|
|
char path[PATH_MAX];
|
|
|
|
packname[45] = 0;
|
|
|
|
snprintf(path, sizeof(path), "%s/pack/pack-%s.keep",
|
|
|
|
get_object_directory(), packname + 5);
|
|
|
|
return xstrdup(path);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-02-23 21:02:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The per-object header is a pretty dense thing, which is
|
|
|
|
* - first byte: low four bits are "size", then three bits of "type",
|
|
|
|
* and the high bit is "size continues".
|
|
|
|
* - each byte afterwards: low seven bits are size continuation,
|
|
|
|
* with the high bit being "size continues"
|
|
|
|
*/
|
|
|
|
int encode_in_pack_object_header(enum object_type type, uintmax_t size, unsigned char *hdr)
|
|
|
|
{
|
|
|
|
int n = 1;
|
|
|
|
unsigned char c;
|
|
|
|
|
|
|
|
if (type < OBJ_COMMIT || type > OBJ_REF_DELTA)
|
|
|
|
die("bad type %d", type);
|
|
|
|
|
|
|
|
c = (type << 4) | (size & 15);
|
|
|
|
size >>= 4;
|
|
|
|
while (size) {
|
|
|
|
*hdr++ = c | 0x80;
|
|
|
|
c = size & 0x7f;
|
|
|
|
size >>= 7;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
*hdr = c;
|
|
|
|
return n;
|
|
|
|
}
|
2011-10-28 20:52:14 +02:00
|
|
|
|
|
|
|
struct sha1file *create_tmp_packfile(char **pack_tmp_name)
|
|
|
|
{
|
|
|
|
char tmpname[PATH_MAX];
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
fd = odb_mkstemp(tmpname, sizeof(tmpname), "pack/tmp_pack_XXXXXX");
|
|
|
|
*pack_tmp_name = xstrdup(tmpname);
|
|
|
|
return sha1fd(fd, *pack_tmp_name);
|
|
|
|
}
|
2011-10-28 21:34:09 +02:00
|
|
|
|
2014-03-03 10:24:29 +01:00
|
|
|
void finish_tmp_packfile(struct strbuf *name_buffer,
|
2011-10-28 21:34:09 +02:00
|
|
|
const char *pack_tmp_name,
|
|
|
|
struct pack_idx_entry **written_list,
|
|
|
|
uint32_t nr_written,
|
|
|
|
struct pack_idx_option *pack_idx_opts,
|
|
|
|
unsigned char sha1[])
|
|
|
|
{
|
|
|
|
const char *idx_tmp_name;
|
2014-03-03 10:24:29 +01:00
|
|
|
int basename_len = name_buffer->len;
|
2011-10-28 21:34:09 +02:00
|
|
|
|
|
|
|
if (adjust_shared_perm(pack_tmp_name))
|
|
|
|
die_errno("unable to make temporary pack file readable");
|
|
|
|
|
|
|
|
idx_tmp_name = write_idx_file(NULL, written_list, nr_written,
|
|
|
|
pack_idx_opts, sha1);
|
|
|
|
if (adjust_shared_perm(idx_tmp_name))
|
|
|
|
die_errno("unable to make temporary index file readable");
|
|
|
|
|
2014-03-03 10:24:29 +01:00
|
|
|
strbuf_addf(name_buffer, "%s.pack", sha1_to_hex(sha1));
|
|
|
|
free_pack_by_name(name_buffer->buf);
|
2011-10-28 21:34:09 +02:00
|
|
|
|
2014-03-03 10:24:29 +01:00
|
|
|
if (rename(pack_tmp_name, name_buffer->buf))
|
2011-10-28 21:34:09 +02:00
|
|
|
die_errno("unable to rename temporary pack file");
|
|
|
|
|
2014-03-03 10:24:29 +01:00
|
|
|
strbuf_setlen(name_buffer, basename_len);
|
|
|
|
|
|
|
|
strbuf_addf(name_buffer, "%s.idx", sha1_to_hex(sha1));
|
|
|
|
if (rename(idx_tmp_name, name_buffer->buf))
|
2011-10-28 21:34:09 +02:00
|
|
|
die_errno("unable to rename temporary index file");
|
|
|
|
|
2014-03-03 10:24:29 +01:00
|
|
|
strbuf_setlen(name_buffer, basename_len);
|
pack-objects: implement bitmap writing
This commit extends more the functionality of `pack-objects` by allowing
it to write out a `.bitmap` index next to any written packs, together
with the `.idx` index that currently gets written.
If bitmap writing is enabled for a given repository (either by calling
`pack-objects` with the `--write-bitmap-index` flag or by having
`pack.writebitmaps` set to `true` in the config) and pack-objects is
writing a packfile that would normally be indexed (i.e. not piping to
stdout), we will attempt to write the corresponding bitmap index for the
packfile.
Bitmap index writing happens after the packfile and its index has been
successfully written to disk (`finish_tmp_packfile`). The process is
performed in several steps:
1. `bitmap_writer_set_checksum`: this call stores the partial
checksum for the packfile being written; the checksum will be
written in the resulting bitmap index to verify its integrity
2. `bitmap_writer_build_type_index`: this call uses the array of
`struct object_entry` that has just been sorted when writing out
the actual packfile index to disk to generate 4 type-index bitmaps
(one for each object type).
These bitmaps have their nth bit set if the given object is of
the bitmap's type. E.g. the nth bit of the Commits bitmap will be
1 if the nth object in the packfile index is a commit.
This is a very cheap operation because the bitmap writing code has
access to the metadata stored in the `struct object_entry` array,
and hence the real type for each object in the packfile.
3. `bitmap_writer_reuse_bitmaps`: if there exists an existing bitmap
index for one of the packfiles we're trying to repack, this call
will efficiently rebuild the existing bitmaps so they can be
reused on the new index. All the existing bitmaps will be stored
in a `reuse` hash table, and the commit selection phase will
prioritize these when selecting, as they can be written directly
to the new index without having to perform a revision walk to
fill the bitmap. This can greatly speed up the repack of a
repository that already has bitmaps.
4. `bitmap_writer_select_commits`: if bitmap writing is enabled for
a given `pack-objects` run, the sequence of commits generated
during the Counting Objects phase will be stored in an array.
We then use that array to build up the list of selected commits.
Writing a bitmap in the index for each object in the repository
would be cost-prohibitive, so we use a simple heuristic to pick
the commits that will be indexed with bitmaps.
The current heuristics are a simplified version of JGit's
original implementation. We select a higher density of commits
depending on their age: the 100 most recent commits are always
selected, after that we pick 1 commit of each 100, and the gap
increases as the commits grow older. On top of that, we make sure
that every single branch that has not been merged (all the tips
that would be required from a clone) gets their own bitmap, and
when selecting commits between a gap, we tend to prioritize the
commit with the most parents.
Do note that there is no right/wrong way to perform commit
selection; different selection algorithms will result in
different commits being selected, but there's no such thing as
"missing a commit". The bitmap walker algorithm implemented in
`prepare_bitmap_walk` is able to adapt to missing bitmaps by
performing manual walks that complete the bitmap: the ideal
selection algorithm, however, would select the commits that are
more likely to be used as roots for a walk in the future (e.g.
the tips of each branch, and so on) to ensure a bitmap for them
is always available.
5. `bitmap_writer_build`: this is the computationally expensive part
of bitmap generation. Based on the list of commits that were
selected in the previous step, we perform several incremental
walks to generate the bitmap for each commit.
The walks begin from the oldest commit, and are built up
incrementally for each branch. E.g. consider this dag where A, B,
C, D, E, F are the selected commits, and a, b, c, e are a chunk
of simplified history that will not receive bitmaps.
A---a---B--b--C--c--D
\
E--e--F
We start by building the bitmap for A, using A as the root for a
revision walk and marking all the objects that are reachable
until the walk is over. Once this bitmap is stored, we reuse the
bitmap walker to perform the walk for B, assuming that once we
reach A again, the walk will be terminated because A has already
been SEEN on the previous walk.
This process is repeated for C, and D, but when we try to
generate the bitmaps for E, we can reuse neither the current walk
nor the bitmap we have generated so far.
What we do now is resetting both the walk and clearing the
bitmap, and performing the walk from scratch using E as the
origin. This new walk, however, does not need to be completed.
Once we hit B, we can lookup the bitmap we have already stored
for that commit and OR it with the existing bitmap we've composed
so far, allowing us to limit the walk early.
After all the bitmaps have been generated, another iteration
through the list of commits is performed to find the best XOR
offsets for compression before writing them to disk. Because of
the incremental nature of these bitmaps, XORing one of them with
its predecesor results in a minimal "bitmap delta" most of the
time. We can write this delta to the on-disk bitmap index, and
then re-compose the original bitmaps by XORing them again when
loaded.
This is a phase very similar to pack-object's `find_delta` (using
bitmaps instead of objects, of course), except the heuristics
have been greatly simplified: we only check the 10 bitmaps before
any given one to find best compressing one. This gives good
results in practice, because there is locality in the ordering of
the objects (and therefore bitmaps) in the packfile.
6. `bitmap_writer_finish`: the last step in the process is
serializing to disk all the bitmap data that has been generated
in the two previous steps.
The bitmap is written to a tmp file and then moved atomically to
its final destination, using the same process as
`pack-write.c:write_idx_file`.
Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-12-21 15:00:16 +01:00
|
|
|
|
2011-10-28 21:34:09 +02:00
|
|
|
free((void *)idx_tmp_name);
|
|
|
|
}
|