git-commit-vandalism/pack-objects.c

184 lines
4.0 KiB
C
Raw Normal View History

#include "cache.h"
#include "object.h"
#include "pack.h"
#include "pack-objects.h"
#include "packfile.h"
#include "config.h"
static uint32_t locate_object_entry_hash(struct packing_data *pdata,
const unsigned char *sha1,
int *found)
{
uint32_t i, mask = (pdata->index_size - 1);
i = sha1hash(sha1) & mask;
while (pdata->index[i] > 0) {
uint32_t pos = pdata->index[i] - 1;
if (!hashcmp(sha1, pdata->objects[pos].idx.oid.hash)) {
*found = 1;
return i;
}
i = (i + 1) & mask;
}
*found = 0;
return i;
}
static inline uint32_t closest_pow2(uint32_t v)
{
v = v - 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return v + 1;
}
static void rehash_objects(struct packing_data *pdata)
{
uint32_t i;
struct object_entry *entry;
pdata->index_size = closest_pow2(pdata->nr_objects * 3);
if (pdata->index_size < 1024)
pdata->index_size = 1024;
free(pdata->index);
pdata->index = xcalloc(pdata->index_size, sizeof(*pdata->index));
entry = pdata->objects;
for (i = 0; i < pdata->nr_objects; i++) {
int found;
uint32_t ix = locate_object_entry_hash(pdata,
entry->idx.oid.hash,
&found);
if (found)
BUG("Duplicate object in hash");
pdata->index[ix] = i + 1;
entry++;
}
}
struct object_entry *packlist_find(struct packing_data *pdata,
const unsigned char *sha1,
uint32_t *index_pos)
{
uint32_t i;
int found;
if (!pdata->index_size)
return NULL;
i = locate_object_entry_hash(pdata, sha1, &found);
if (index_pos)
*index_pos = i;
if (!found)
return NULL;
return &pdata->objects[pdata->index[i] - 1];
}
static void prepare_in_pack_by_idx(struct packing_data *pdata)
{
struct packed_git **mapping, *p;
int cnt = 0, nr = 1U << OE_IN_PACK_BITS;
ALLOC_ARRAY(mapping, nr);
/*
* oe_in_pack() on an all-zero'd object_entry
* (i.e. in_pack_idx also zero) should return NULL.
*/
mapping[cnt++] = NULL;
for (p = get_packed_git(the_repository); p; p = p->next, cnt++) {
if (cnt == nr) {
free(mapping);
return;
}
p->index = cnt;
mapping[cnt] = p;
}
pdata->in_pack_by_idx = mapping;
}
/*
* A new pack appears after prepare_in_pack_by_idx() has been
* run. This is likely a race.
*
* We could map this new pack to in_pack_by_idx[] array, but then we
* have to deal with full array anyway. And since it's hard to test
* this fall back code, just stay simple and fall back to using
* in_pack[] array.
*/
void oe_map_new_pack(struct packing_data *pack,
struct packed_git *p)
{
uint32_t i;
REALLOC_ARRAY(pack->in_pack, pack->nr_alloc);
for (i = 0; i < pack->nr_objects; i++)
pack->in_pack[i] = oe_in_pack(pack, pack->objects + i);
FREE_AND_NULL(pack->in_pack_by_idx);
}
/* assume pdata is already zero'd by caller */
void prepare_packing_data(struct packing_data *pdata)
{
if (git_env_bool("GIT_TEST_FULL_IN_PACK_ARRAY", 0)) {
/*
* do not initialize in_pack_by_idx[] to force the
* slow path in oe_in_pack()
*/
} else {
prepare_in_pack_by_idx(pdata);
}
pdata->oe_size_limit = git_env_ulong("GIT_TEST_OE_SIZE",
1U << OE_SIZE_BITS);
pack-objects: fix performance issues on packing large deltas Let's start with some background about oe_delta_size() and oe_set_delta_size(). If you already know, skip the next paragraph. These two are added in 0aca34e826 (pack-objects: shrink delta_size field in struct object_entry - 2018-04-14) to help reduce 'struct object_entry' size. The delta size field in this struct is reduced to only contain max 1MB. So if any new delta is produced and larger than 1MB, it's dropped because we can't really save such a large size anywhere. Fallback is provided in case existing packfiles already have large deltas, then we can retrieve it from the pack. While this should help small machines repacking large repos without large deltas (i.e. less memory pressure), dropping large deltas during the delta selection process could end up with worse pack files. And if existing packfiles already have >1MB delta and pack-objects is instructed to not reuse deltas, all of them will be dropped on the floor, and the resulting pack would be definitely bigger. There is also a regression in terms of CPU/IO if we have large on-disk deltas because fallback code needs to parse the pack every time the delta size is needed and just access to the mmap'd pack data is enough for extra page faults when memory is under pressure. Both of these issues were reported on the mailing list. Here's some numbers for comparison. Version Pack (MB) MaxRSS(kB) Time (s) ------- --------- ---------- -------- 2.17.0 5498 43513628 2494.85 2.18.0 10531 40449596 4168.94 This patch provides a better fallback that is - cheaper in terms of cpu and io because we won't have to read existing pack files as much - better in terms of pack size because the pack heuristics is back to 2.17.0 time, we do not drop large deltas at all If we encounter any delta (on-disk or created during try_delta phase) that is larger than the 1MB limit, we stop using delta_size_ field for this because it can't contain such size anyway. A new array of delta size is dynamically allocated and can hold all the deltas that 2.17.0 can. This array only contains delta sizes that delta_size_ can't contain. With this, we do not have to drop deltas in try_delta() anymore. Of course the downside is we use slightly more memory, even compared to 2.17.0. But since this is considered an uncommon case, a bit more memory consumption should not be a problem. Delta size limit is also raised from 1MB to 16MB to better cover common case and avoid that extra memory consumption (99.999% deltas in this reported repo are under 12MB; Jeff noted binary artifacts topped out at about 3MB in some other private repos). Other fields are shuffled around to keep this struct packed tight. We don't use more memory in common case even with this limit update. A note about thread synchronization. Since this code can be run in parallel during delta searching phase, we need a mutex. The realloc part in packlist_alloc() is not protected because it only happens during the object counting phase, which is always single-threaded. Access to e->delta_size_ (and by extension pack->delta_size[e - pack->objects]) is unprotected as before, the thread scheduler in pack-objects must make sure "e" is never updated by two different threads. The area under the new lock is as small as possible, avoiding locking at all in common case, since lock contention with high thread count could be expensive (most blobs are small enough that delta compute time is short and we end up taking the lock very often). The previous attempt to always hold a lock in oe_delta_size() and oe_set_delta_size() increases execution time by 33% when repacking linux.git with with 40 threads. Reported-by: Elijah Newren <newren@gmail.com> Helped-by: Elijah Newren <newren@gmail.com> Helped-by: Jeff King <peff@peff.net> Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-22 10:04:21 +02:00
pdata->oe_delta_size_limit = git_env_ulong("GIT_TEST_OE_DELTA_SIZE",
1UL << OE_DELTA_SIZE_BITS);
}
struct object_entry *packlist_alloc(struct packing_data *pdata,
const unsigned char *sha1,
uint32_t index_pos)
{
struct object_entry *new_entry;
if (pdata->nr_objects >= pdata->nr_alloc) {
pdata->nr_alloc = (pdata->nr_alloc + 1024) * 3 / 2;
REALLOC_ARRAY(pdata->objects, pdata->nr_alloc);
if (!pdata->in_pack_by_idx)
REALLOC_ARRAY(pdata->in_pack, pdata->nr_alloc);
pack-objects: fix performance issues on packing large deltas Let's start with some background about oe_delta_size() and oe_set_delta_size(). If you already know, skip the next paragraph. These two are added in 0aca34e826 (pack-objects: shrink delta_size field in struct object_entry - 2018-04-14) to help reduce 'struct object_entry' size. The delta size field in this struct is reduced to only contain max 1MB. So if any new delta is produced and larger than 1MB, it's dropped because we can't really save such a large size anywhere. Fallback is provided in case existing packfiles already have large deltas, then we can retrieve it from the pack. While this should help small machines repacking large repos without large deltas (i.e. less memory pressure), dropping large deltas during the delta selection process could end up with worse pack files. And if existing packfiles already have >1MB delta and pack-objects is instructed to not reuse deltas, all of them will be dropped on the floor, and the resulting pack would be definitely bigger. There is also a regression in terms of CPU/IO if we have large on-disk deltas because fallback code needs to parse the pack every time the delta size is needed and just access to the mmap'd pack data is enough for extra page faults when memory is under pressure. Both of these issues were reported on the mailing list. Here's some numbers for comparison. Version Pack (MB) MaxRSS(kB) Time (s) ------- --------- ---------- -------- 2.17.0 5498 43513628 2494.85 2.18.0 10531 40449596 4168.94 This patch provides a better fallback that is - cheaper in terms of cpu and io because we won't have to read existing pack files as much - better in terms of pack size because the pack heuristics is back to 2.17.0 time, we do not drop large deltas at all If we encounter any delta (on-disk or created during try_delta phase) that is larger than the 1MB limit, we stop using delta_size_ field for this because it can't contain such size anyway. A new array of delta size is dynamically allocated and can hold all the deltas that 2.17.0 can. This array only contains delta sizes that delta_size_ can't contain. With this, we do not have to drop deltas in try_delta() anymore. Of course the downside is we use slightly more memory, even compared to 2.17.0. But since this is considered an uncommon case, a bit more memory consumption should not be a problem. Delta size limit is also raised from 1MB to 16MB to better cover common case and avoid that extra memory consumption (99.999% deltas in this reported repo are under 12MB; Jeff noted binary artifacts topped out at about 3MB in some other private repos). Other fields are shuffled around to keep this struct packed tight. We don't use more memory in common case even with this limit update. A note about thread synchronization. Since this code can be run in parallel during delta searching phase, we need a mutex. The realloc part in packlist_alloc() is not protected because it only happens during the object counting phase, which is always single-threaded. Access to e->delta_size_ (and by extension pack->delta_size[e - pack->objects]) is unprotected as before, the thread scheduler in pack-objects must make sure "e" is never updated by two different threads. The area under the new lock is as small as possible, avoiding locking at all in common case, since lock contention with high thread count could be expensive (most blobs are small enough that delta compute time is short and we end up taking the lock very often). The previous attempt to always hold a lock in oe_delta_size() and oe_set_delta_size() increases execution time by 33% when repacking linux.git with with 40 threads. Reported-by: Elijah Newren <newren@gmail.com> Helped-by: Elijah Newren <newren@gmail.com> Helped-by: Jeff King <peff@peff.net> Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-22 10:04:21 +02:00
if (pdata->delta_size)
REALLOC_ARRAY(pdata->delta_size, pdata->nr_alloc);
}
new_entry = pdata->objects + pdata->nr_objects++;
memset(new_entry, 0, sizeof(*new_entry));
hashcpy(new_entry->idx.oid.hash, sha1);
if (pdata->index_size * 3 <= pdata->nr_objects * 4)
rehash_objects(pdata);
else
pdata->index[index_pos] = pdata->nr_objects;
if (pdata->in_pack)
pdata->in_pack[pdata->nr_objects - 1] = NULL;
return new_entry;
}