2011-04-07 20:23:40 +02:00
|
|
|
#include "builtin.h"
|
2017-06-14 20:07:36 +02:00
|
|
|
#include "config.h"
|
2005-10-12 21:01:31 +02:00
|
|
|
#include "delta.h"
|
|
|
|
#include "pack.h"
|
|
|
|
#include "csum-file.h"
|
2006-04-02 14:44:09 +02:00
|
|
|
#include "blob.h"
|
|
|
|
#include "commit.h"
|
|
|
|
#include "tag.h"
|
|
|
|
#include "tree.h"
|
2007-04-18 20:27:45 +02:00
|
|
|
#include "progress.h"
|
2008-02-25 22:46:12 +01:00
|
|
|
#include "fsck.h"
|
2018-04-10 23:26:18 +02:00
|
|
|
#include "exec-cmd.h"
|
2012-05-24 15:55:44 +02:00
|
|
|
#include "streaming.h"
|
2012-05-06 14:31:55 +02:00
|
|
|
#include "thread-utils.h"
|
2017-08-19 00:20:16 +02:00
|
|
|
#include "packfile.h"
|
2018-03-23 18:20:59 +01:00
|
|
|
#include "object-store.h"
|
2019-06-25 15:40:31 +02:00
|
|
|
#include "promisor-remote.h"
|
2005-10-12 21:01:31 +02:00
|
|
|
|
|
|
|
static const char index_pack_usage[] =
|
2011-02-03 02:29:01 +01:00
|
|
|
"git index-pack [-v] [-o <index-file>] [--keep | --keep=<msg>] [--verify] [--strict] (<pack-file> | --stdin [--fix-thin] [<pack-file>])";
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct object_entry {
|
2007-06-01 21:18:05 +02:00
|
|
|
struct pack_idx_entry idx;
|
2006-10-20 20:45:21 +02:00
|
|
|
unsigned long size;
|
2015-02-26 11:52:07 +01:00
|
|
|
unsigned char hdr_size;
|
|
|
|
signed char type;
|
|
|
|
signed char real_type;
|
2005-10-12 21:01:31 +02:00
|
|
|
};
|
|
|
|
|
2015-02-26 11:52:07 +01:00
|
|
|
struct object_stat {
|
2011-06-04 00:32:15 +02:00
|
|
|
unsigned delta_depth;
|
|
|
|
int base_object_no;
|
2006-09-21 06:08:33 +02:00
|
|
|
};
|
|
|
|
|
2008-07-14 04:07:44 +02:00
|
|
|
struct base_data {
|
2008-07-14 04:07:45 +02:00
|
|
|
struct base_data *base;
|
|
|
|
struct base_data *child;
|
2008-07-14 04:07:46 +02:00
|
|
|
struct object_entry *obj;
|
2008-07-14 04:07:44 +02:00
|
|
|
void *data;
|
|
|
|
unsigned long size;
|
2012-01-14 13:19:54 +01:00
|
|
|
int ref_first, ref_last;
|
|
|
|
int ofs_first, ofs_last;
|
2008-07-14 04:07:44 +02:00
|
|
|
};
|
|
|
|
|
2012-05-06 14:31:55 +02:00
|
|
|
struct thread_local {
|
|
|
|
pthread_t thread;
|
|
|
|
struct base_data *base_cache;
|
|
|
|
size_t base_cache_used;
|
2014-03-25 14:41:41 +01:00
|
|
|
int pack_fd;
|
2012-05-06 14:31:55 +02:00
|
|
|
};
|
|
|
|
|
2018-03-06 11:16:14 +01:00
|
|
|
/* Remember to update object flag allocation in object.h */
|
2008-02-25 22:46:12 +01:00
|
|
|
#define FLAG_LINK (1u<<20)
|
|
|
|
#define FLAG_CHECKED (1u<<21)
|
|
|
|
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
struct ofs_delta_entry {
|
|
|
|
off_t offset;
|
|
|
|
int obj_no;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ref_delta_entry {
|
2018-03-12 03:27:37 +01:00
|
|
|
struct object_id oid;
|
2006-10-26 05:28:17 +02:00
|
|
|
int obj_no;
|
2005-10-12 21:01:31 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct object_entry *objects;
|
2015-02-26 11:52:07 +01:00
|
|
|
static struct object_stat *obj_stat;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
static struct ofs_delta_entry *ofs_deltas;
|
|
|
|
static struct ref_delta_entry *ref_deltas;
|
2012-05-06 14:31:55 +02:00
|
|
|
static struct thread_local nothread_data;
|
2005-10-12 21:01:31 +02:00
|
|
|
static int nr_objects;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
static int nr_ofs_deltas;
|
|
|
|
static int nr_ref_deltas;
|
|
|
|
static int ref_deltas_alloc;
|
2006-10-26 05:28:17 +02:00
|
|
|
static int nr_resolved_deltas;
|
2012-05-06 14:31:55 +02:00
|
|
|
static int nr_threads;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2006-10-23 20:50:18 +02:00
|
|
|
static int from_stdin;
|
2008-02-25 22:46:12 +01:00
|
|
|
static int strict;
|
2013-05-26 03:16:17 +02:00
|
|
|
static int do_fsck_object;
|
2015-06-22 17:25:00 +02:00
|
|
|
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
|
2006-10-26 05:32:59 +02:00
|
|
|
static int verbose;
|
index-pack: add flag for showing delta-resolution progress
The index-pack command has two progress meters: one for
"receiving objects", and one for "resolving deltas". You get
neither by default, or both with "-v".
But for a push through receive-pack, we would want only the
"resolving deltas" phase, _not_ the "receiving objects"
progress. There are two reasons for this.
One is simply that existing clients are already printing
"writing objects" progress at the same time. Arguably
"receiving" from the far end is more useful, because it
tells you what has actually gotten there, as opposed to what
might be stuck in a buffer somewhere between the client and
server. But that would require a protocol extension to tell
clients not to print their progress. Possible, but
complexity for little gain.
The second reason is much more important. In a full-duplex
connection like git-over-ssh, we can print progress while
the pack is incoming, and it will immediately get to the
client. But for a half-duplex connection like git-over-http,
we should not say anything until we have received the full
request. Anything we write is subject to being stuck in a
buffer by the webserver. Worse, we can end up in a deadlock
if that buffer fills up.
So our best bet is to avoid writing anything that isn't a
small fixed size until we've received the full pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-15 12:34:22 +02:00
|
|
|
static int show_resolving_progress;
|
2013-03-19 14:01:15 +01:00
|
|
|
static int show_stat;
|
2013-05-26 03:16:17 +02:00
|
|
|
static int check_self_contained_and_connected;
|
2006-10-26 05:32:59 +02:00
|
|
|
|
2007-10-30 19:57:32 +01:00
|
|
|
static struct progress *progress;
|
2006-10-23 20:50:18 +02:00
|
|
|
|
2006-10-20 20:45:21 +02:00
|
|
|
/* We always read in 4kB chunks. */
|
|
|
|
static unsigned char input_buffer[4096];
|
2007-04-09 07:06:30 +02:00
|
|
|
static unsigned int input_offset, input_len;
|
|
|
|
static off_t consumed_bytes;
|
2016-08-24 20:41:55 +02:00
|
|
|
static off_t max_input_size;
|
2011-06-04 00:32:16 +02:00
|
|
|
static unsigned deepest_delta;
|
2018-02-01 03:18:39 +01:00
|
|
|
static git_hash_ctx input_ctx;
|
2007-04-09 07:06:32 +02:00
|
|
|
static uint32_t input_crc32;
|
2014-03-25 14:41:41 +01:00
|
|
|
static int input_fd, output_fd;
|
|
|
|
static const char *curr_pack;
|
2006-10-20 20:45:21 +02:00
|
|
|
|
2012-05-06 14:31:55 +02:00
|
|
|
static struct thread_local *thread_data;
|
|
|
|
static int nr_dispatched;
|
|
|
|
static int threads_active;
|
|
|
|
|
|
|
|
static pthread_mutex_t read_mutex;
|
|
|
|
#define read_lock() lock_mutex(&read_mutex)
|
|
|
|
#define read_unlock() unlock_mutex(&read_mutex)
|
|
|
|
|
|
|
|
static pthread_mutex_t counter_mutex;
|
|
|
|
#define counter_lock() lock_mutex(&counter_mutex)
|
|
|
|
#define counter_unlock() unlock_mutex(&counter_mutex)
|
|
|
|
|
|
|
|
static pthread_mutex_t work_mutex;
|
|
|
|
#define work_lock() lock_mutex(&work_mutex)
|
|
|
|
#define work_unlock() unlock_mutex(&work_mutex)
|
|
|
|
|
2013-03-19 14:01:15 +01:00
|
|
|
static pthread_mutex_t deepest_delta_mutex;
|
|
|
|
#define deepest_delta_lock() lock_mutex(&deepest_delta_mutex)
|
|
|
|
#define deepest_delta_unlock() unlock_mutex(&deepest_delta_mutex)
|
|
|
|
|
index-pack: fix race condition with duplicate bases
When we are resolving deltas in an indexed pack, we do it by
first selecting a potential base (either one stored in full
in the pack, or one created by resolving another delta), and
then resolving any deltas that use that base. When we
resolve a particular delta, we flip its "real_type" field
from OBJ_{REF,OFS}_DELTA to whatever the real type is.
We assume that traversing the objects this way will visit
each delta only once. This is correct for most packs; we
visit the delta only when we process its base, and each
object (and thus each base) appears only once. However, if a
base object appears multiple times in the pack, we will try
to resolve any deltas based on it once for each instance.
We can detect this case by noting that a delta we are about
to resolve has already had its real_type field flipped, and
we already do so with an assert(). However, if multiple
threads are in use, we may race with another thread on
comparing and flipping the field. We need to synchronize the
access.
The right mechanism for doing this is a compare-and-swap (we
atomically "claim" the delta for our own and find out
whether our claim was successful). We can implement this
in C by using a pthread mutex to protect the operation. This
is not the fastest way of doing a compare-and-swap; many
processors provide instructions for this, and gcc and other
compilers provide builtins to access them. However, some
experiments showed that lock contention does not cause a
significant slowdown here. Adding c-a-s support for many
compilers would increase the maintenance burden (and we
would still end up including the pthread version as a
fallback).
Note that we only need to touch the OBJ_REF_DELTA codepath
here. An OBJ_OFS_DELTA object points to its base using an
offset, and therefore has only one base, even if another
copy of that base object appears in the pack (we do still
touch it briefly because the setting of real_type is
factored out of resolve_data).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-08-29 22:57:47 +02:00
|
|
|
static pthread_mutex_t type_cas_mutex;
|
|
|
|
#define type_cas_lock() lock_mutex(&type_cas_mutex)
|
|
|
|
#define type_cas_unlock() unlock_mutex(&type_cas_mutex)
|
|
|
|
|
2012-05-06 14:31:55 +02:00
|
|
|
static pthread_key_t key;
|
|
|
|
|
|
|
|
static inline void lock_mutex(pthread_mutex_t *mutex)
|
|
|
|
{
|
|
|
|
if (threads_active)
|
|
|
|
pthread_mutex_lock(mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void unlock_mutex(pthread_mutex_t *mutex)
|
|
|
|
{
|
|
|
|
if (threads_active)
|
|
|
|
pthread_mutex_unlock(mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mutex and conditional variable can't be statically-initialized on Windows.
|
|
|
|
*/
|
|
|
|
static void init_thread(void)
|
|
|
|
{
|
2014-03-25 14:41:41 +01:00
|
|
|
int i;
|
2012-05-06 14:31:55 +02:00
|
|
|
init_recursive_mutex(&read_mutex);
|
|
|
|
pthread_mutex_init(&counter_mutex, NULL);
|
|
|
|
pthread_mutex_init(&work_mutex, NULL);
|
index-pack: fix race condition with duplicate bases
When we are resolving deltas in an indexed pack, we do it by
first selecting a potential base (either one stored in full
in the pack, or one created by resolving another delta), and
then resolving any deltas that use that base. When we
resolve a particular delta, we flip its "real_type" field
from OBJ_{REF,OFS}_DELTA to whatever the real type is.
We assume that traversing the objects this way will visit
each delta only once. This is correct for most packs; we
visit the delta only when we process its base, and each
object (and thus each base) appears only once. However, if a
base object appears multiple times in the pack, we will try
to resolve any deltas based on it once for each instance.
We can detect this case by noting that a delta we are about
to resolve has already had its real_type field flipped, and
we already do so with an assert(). However, if multiple
threads are in use, we may race with another thread on
comparing and flipping the field. We need to synchronize the
access.
The right mechanism for doing this is a compare-and-swap (we
atomically "claim" the delta for our own and find out
whether our claim was successful). We can implement this
in C by using a pthread mutex to protect the operation. This
is not the fastest way of doing a compare-and-swap; many
processors provide instructions for this, and gcc and other
compilers provide builtins to access them. However, some
experiments showed that lock contention does not cause a
significant slowdown here. Adding c-a-s support for many
compilers would increase the maintenance burden (and we
would still end up including the pthread version as a
fallback).
Note that we only need to touch the OBJ_REF_DELTA codepath
here. An OBJ_OFS_DELTA object points to its base using an
offset, and therefore has only one base, even if another
copy of that base object appears in the pack (we do still
touch it briefly because the setting of real_type is
factored out of resolve_data).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-08-29 22:57:47 +02:00
|
|
|
pthread_mutex_init(&type_cas_mutex, NULL);
|
2013-03-19 14:01:15 +01:00
|
|
|
if (show_stat)
|
|
|
|
pthread_mutex_init(&deepest_delta_mutex, NULL);
|
2012-05-06 14:31:55 +02:00
|
|
|
pthread_key_create(&key, NULL);
|
|
|
|
thread_data = xcalloc(nr_threads, sizeof(*thread_data));
|
2014-03-25 14:41:41 +01:00
|
|
|
for (i = 0; i < nr_threads; i++) {
|
|
|
|
thread_data[i].pack_fd = open(curr_pack, O_RDONLY);
|
|
|
|
if (thread_data[i].pack_fd == -1)
|
|
|
|
die_errno(_("unable to open %s"), curr_pack);
|
|
|
|
}
|
|
|
|
|
2012-05-06 14:31:55 +02:00
|
|
|
threads_active = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cleanup_thread(void)
|
|
|
|
{
|
2014-03-25 14:41:41 +01:00
|
|
|
int i;
|
2012-05-06 14:31:55 +02:00
|
|
|
if (!threads_active)
|
|
|
|
return;
|
|
|
|
threads_active = 0;
|
|
|
|
pthread_mutex_destroy(&read_mutex);
|
|
|
|
pthread_mutex_destroy(&counter_mutex);
|
|
|
|
pthread_mutex_destroy(&work_mutex);
|
index-pack: fix race condition with duplicate bases
When we are resolving deltas in an indexed pack, we do it by
first selecting a potential base (either one stored in full
in the pack, or one created by resolving another delta), and
then resolving any deltas that use that base. When we
resolve a particular delta, we flip its "real_type" field
from OBJ_{REF,OFS}_DELTA to whatever the real type is.
We assume that traversing the objects this way will visit
each delta only once. This is correct for most packs; we
visit the delta only when we process its base, and each
object (and thus each base) appears only once. However, if a
base object appears multiple times in the pack, we will try
to resolve any deltas based on it once for each instance.
We can detect this case by noting that a delta we are about
to resolve has already had its real_type field flipped, and
we already do so with an assert(). However, if multiple
threads are in use, we may race with another thread on
comparing and flipping the field. We need to synchronize the
access.
The right mechanism for doing this is a compare-and-swap (we
atomically "claim" the delta for our own and find out
whether our claim was successful). We can implement this
in C by using a pthread mutex to protect the operation. This
is not the fastest way of doing a compare-and-swap; many
processors provide instructions for this, and gcc and other
compilers provide builtins to access them. However, some
experiments showed that lock contention does not cause a
significant slowdown here. Adding c-a-s support for many
compilers would increase the maintenance burden (and we
would still end up including the pthread version as a
fallback).
Note that we only need to touch the OBJ_REF_DELTA codepath
here. An OBJ_OFS_DELTA object points to its base using an
offset, and therefore has only one base, even if another
copy of that base object appears in the pack (we do still
touch it briefly because the setting of real_type is
factored out of resolve_data).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-08-29 22:57:47 +02:00
|
|
|
pthread_mutex_destroy(&type_cas_mutex);
|
2013-03-19 14:01:15 +01:00
|
|
|
if (show_stat)
|
|
|
|
pthread_mutex_destroy(&deepest_delta_mutex);
|
2014-03-25 14:41:41 +01:00
|
|
|
for (i = 0; i < nr_threads; i++)
|
|
|
|
close(thread_data[i].pack_fd);
|
2012-05-06 14:31:55 +02:00
|
|
|
pthread_key_delete(key);
|
|
|
|
free(thread_data);
|
|
|
|
}
|
|
|
|
|
2015-06-22 17:25:00 +02:00
|
|
|
static int mark_link(struct object *obj, int type, void *data, struct fsck_options *options)
|
2008-02-25 22:46:12 +01:00
|
|
|
{
|
|
|
|
if (!obj)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (type != OBJ_ANY && obj->type != type)
|
2015-11-10 03:22:28 +01:00
|
|
|
die(_("object type mismatch at %s"), oid_to_hex(&obj->oid));
|
2008-02-25 22:46:12 +01:00
|
|
|
|
|
|
|
obj->flags |= FLAG_LINK;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The content of each linked object must have been checked
|
|
|
|
or it must be already present in the object database */
|
2013-05-26 03:16:17 +02:00
|
|
|
static unsigned check_object(struct object *obj)
|
2008-02-25 22:46:12 +01:00
|
|
|
{
|
|
|
|
if (!obj)
|
2013-05-26 03:16:17 +02:00
|
|
|
return 0;
|
2008-02-25 22:46:12 +01:00
|
|
|
|
|
|
|
if (!(obj->flags & FLAG_LINK))
|
2013-05-26 03:16:17 +02:00
|
|
|
return 0;
|
2008-02-25 22:46:12 +01:00
|
|
|
|
|
|
|
if (!(obj->flags & FLAG_CHECKED)) {
|
|
|
|
unsigned long size;
|
2018-04-25 20:20:59 +02:00
|
|
|
int type = oid_object_info(the_repository, &obj->oid, &size);
|
2014-05-12 06:38:39 +02:00
|
|
|
if (type <= 0)
|
|
|
|
die(_("did not receive expected object %s"),
|
2015-11-10 03:22:28 +01:00
|
|
|
oid_to_hex(&obj->oid));
|
2014-05-12 06:38:39 +02:00
|
|
|
if (type != obj->type)
|
|
|
|
die(_("object %s: expected type %s, found %s"),
|
2015-11-10 03:22:28 +01:00
|
|
|
oid_to_hex(&obj->oid),
|
2018-02-14 19:59:24 +01:00
|
|
|
type_name(obj->type), type_name(type));
|
2008-02-25 22:46:12 +01:00
|
|
|
obj->flags |= FLAG_CHECKED;
|
2013-05-26 03:16:17 +02:00
|
|
|
return 1;
|
2008-02-25 22:46:12 +01:00
|
|
|
}
|
2013-05-26 03:16:17 +02:00
|
|
|
|
|
|
|
return 0;
|
2008-02-25 22:46:12 +01:00
|
|
|
}
|
|
|
|
|
2013-05-26 03:16:17 +02:00
|
|
|
static unsigned check_objects(void)
|
2008-02-25 22:46:12 +01:00
|
|
|
{
|
2013-05-26 03:16:17 +02:00
|
|
|
unsigned i, max, foreign_nr = 0;
|
2008-02-25 22:46:12 +01:00
|
|
|
|
|
|
|
max = get_max_object_index();
|
2019-04-01 01:12:35 +02:00
|
|
|
|
|
|
|
if (verbose)
|
|
|
|
progress = start_delayed_progress(_("Checking objects"), max);
|
|
|
|
|
|
|
|
for (i = 0; i < max; i++) {
|
2013-05-26 03:16:17 +02:00
|
|
|
foreign_nr += check_object(get_indexed_object(i));
|
2019-04-01 01:12:35 +02:00
|
|
|
display_progress(progress, i + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
stop_progress(&progress);
|
2013-05-26 03:16:17 +02:00
|
|
|
return foreign_nr;
|
2008-02-25 22:46:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-26 05:28:17 +02:00
|
|
|
/* Discard current buffer used content. */
|
2006-11-18 13:07:06 +01:00
|
|
|
static void flush(void)
|
2006-10-26 05:28:17 +02:00
|
|
|
{
|
|
|
|
if (input_offset) {
|
|
|
|
if (output_fd >= 0)
|
|
|
|
write_or_die(output_fd, input_buffer, input_offset);
|
2018-02-01 03:18:39 +01:00
|
|
|
the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset);
|
2006-12-11 19:06:34 +01:00
|
|
|
memmove(input_buffer, input_buffer + input_offset, input_len);
|
2006-10-26 05:28:17 +02:00
|
|
|
input_offset = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-20 20:45:21 +02:00
|
|
|
/*
|
|
|
|
* Make sure at least "min" bytes are available in the buffer, and
|
|
|
|
* return the pointer to the buffer.
|
|
|
|
*/
|
2006-10-27 22:14:23 +02:00
|
|
|
static void *fill(int min)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
2006-10-20 20:45:21 +02:00
|
|
|
if (min <= input_len)
|
|
|
|
return input_buffer + input_offset;
|
|
|
|
if (min > sizeof(input_buffer))
|
2012-04-23 14:30:29 +02:00
|
|
|
die(Q_("cannot fill %d byte",
|
|
|
|
"cannot fill %d bytes",
|
|
|
|
min),
|
|
|
|
min);
|
2006-10-26 05:28:17 +02:00
|
|
|
flush();
|
2006-10-20 20:45:21 +02:00
|
|
|
do {
|
2007-05-15 14:49:22 +02:00
|
|
|
ssize_t ret = xread(input_fd, input_buffer + input_len,
|
2006-10-20 20:45:21 +02:00
|
|
|
sizeof(input_buffer) - input_len);
|
|
|
|
if (ret <= 0) {
|
|
|
|
if (!ret)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("early EOF"));
|
|
|
|
die_errno(_("read error on input"));
|
2006-10-20 20:45:21 +02:00
|
|
|
}
|
|
|
|
input_len += ret;
|
2007-11-05 04:15:41 +01:00
|
|
|
if (from_stdin)
|
|
|
|
display_throughput(progress, consumed_bytes + input_len);
|
2006-10-20 20:45:21 +02:00
|
|
|
} while (input_len < min);
|
|
|
|
return input_buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void use(int bytes)
|
|
|
|
{
|
|
|
|
if (bytes > input_len)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("used more bytes than were available"));
|
2007-04-09 07:06:32 +02:00
|
|
|
input_crc32 = crc32(input_crc32, input_buffer + input_offset, bytes);
|
2006-10-20 20:45:21 +02:00
|
|
|
input_len -= bytes;
|
|
|
|
input_offset += bytes;
|
2007-04-09 07:06:30 +02:00
|
|
|
|
|
|
|
/* make sure off_t is sufficiently large not to wrap */
|
2010-10-05 09:24:10 +02:00
|
|
|
if (signed_add_overflows(consumed_bytes, bytes))
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("pack too large for current definition of off_t"));
|
2006-10-20 20:45:21 +02:00
|
|
|
consumed_bytes += bytes;
|
2016-08-24 20:41:55 +02:00
|
|
|
if (max_input_size && consumed_bytes > max_input_size)
|
|
|
|
die(_("pack exceeds maximum allowed size"));
|
2006-10-20 20:45:21 +02:00
|
|
|
}
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2010-01-22 16:55:19 +01:00
|
|
|
static const char *open_pack_file(const char *pack_name)
|
2006-10-20 20:45:21 +02:00
|
|
|
{
|
2006-10-23 20:50:18 +02:00
|
|
|
if (from_stdin) {
|
|
|
|
input_fd = 0;
|
|
|
|
if (!pack_name) {
|
2017-03-28 21:45:43 +02:00
|
|
|
struct strbuf tmp_file = STRBUF_INIT;
|
|
|
|
output_fd = odb_mkstemp(&tmp_file,
|
2009-02-25 08:11:29 +01:00
|
|
|
"pack/tmp_pack_XXXXXX");
|
2017-03-28 21:45:43 +02:00
|
|
|
pack_name = strbuf_detach(&tmp_file, NULL);
|
2017-03-28 21:45:25 +02:00
|
|
|
} else {
|
2006-10-23 20:50:18 +02:00
|
|
|
output_fd = open(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600);
|
2017-03-28 21:45:25 +02:00
|
|
|
if (output_fd < 0)
|
|
|
|
die_errno(_("unable to create '%s'"), pack_name);
|
|
|
|
}
|
2014-03-25 14:41:41 +01:00
|
|
|
nothread_data.pack_fd = output_fd;
|
2006-10-23 20:50:18 +02:00
|
|
|
} else {
|
|
|
|
input_fd = open(pack_name, O_RDONLY);
|
|
|
|
if (input_fd < 0)
|
2012-04-23 14:30:29 +02:00
|
|
|
die_errno(_("cannot open packfile '%s'"), pack_name);
|
2006-10-23 20:50:18 +02:00
|
|
|
output_fd = -1;
|
2014-03-25 14:41:41 +01:00
|
|
|
nothread_data.pack_fd = input_fd;
|
2006-10-23 20:50:18 +02:00
|
|
|
}
|
2018-02-01 03:18:39 +01:00
|
|
|
the_hash_algo->init_fn(&input_ctx);
|
2006-10-23 20:50:18 +02:00
|
|
|
return pack_name;
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void parse_pack_header(void)
|
|
|
|
{
|
2006-10-20 20:45:21 +02:00
|
|
|
struct pack_header *hdr = fill(sizeof(struct pack_header));
|
2005-10-12 21:01:31 +02:00
|
|
|
|
|
|
|
/* Header consistency check */
|
|
|
|
if (hdr->hdr_signature != htonl(PACK_SIGNATURE))
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("pack signature mismatch"));
|
2006-02-09 23:50:04 +01:00
|
|
|
if (!pack_version_ok(hdr->hdr_version))
|
2012-08-31 14:13:04 +02:00
|
|
|
die(_("pack version %"PRIu32" unsupported"),
|
2008-07-03 17:52:09 +02:00
|
|
|
ntohl(hdr->hdr_version));
|
2005-10-12 21:01:31 +02:00
|
|
|
|
|
|
|
nr_objects = ntohl(hdr->hdr_entries);
|
2006-10-20 20:45:21 +02:00
|
|
|
use(sizeof(struct pack_header));
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
|
|
|
|
2016-07-13 17:44:01 +02:00
|
|
|
static NORETURN void bad_object(off_t offset, const char *format,
|
2009-09-30 20:05:49 +02:00
|
|
|
...) __attribute__((format (printf, 2, 3)));
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2016-07-13 17:44:01 +02:00
|
|
|
static NORETURN void bad_object(off_t offset, const char *format, ...)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
|
|
|
va_list params;
|
|
|
|
char buf[1024];
|
|
|
|
|
|
|
|
va_start(params, format);
|
|
|
|
vsnprintf(buf, sizeof(buf), format, params);
|
|
|
|
va_end(params);
|
2016-07-13 17:44:01 +02:00
|
|
|
die(_("pack has bad object at offset %"PRIuMAX": %s"),
|
|
|
|
(uintmax_t)offset, buf);
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
|
|
|
|
2012-05-06 14:31:55 +02:00
|
|
|
static inline struct thread_local *get_thread_data(void)
|
|
|
|
{
|
2018-11-03 09:48:40 +01:00
|
|
|
if (HAVE_THREADS) {
|
|
|
|
if (threads_active)
|
|
|
|
return pthread_getspecific(key);
|
|
|
|
assert(!threads_active &&
|
|
|
|
"This should only be reached when all threads are gone");
|
|
|
|
}
|
2012-05-06 14:31:55 +02:00
|
|
|
return ¬hread_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_thread_data(struct thread_local *data)
|
|
|
|
{
|
|
|
|
if (threads_active)
|
|
|
|
pthread_setspecific(key, data);
|
|
|
|
}
|
|
|
|
|
2012-01-14 13:19:54 +01:00
|
|
|
static struct base_data *alloc_base_data(void)
|
|
|
|
{
|
2014-07-19 15:56:26 +02:00
|
|
|
struct base_data *base = xcalloc(1, sizeof(struct base_data));
|
2012-01-14 13:19:54 +01:00
|
|
|
base->ref_last = -1;
|
|
|
|
base->ofs_last = -1;
|
|
|
|
return base;
|
|
|
|
}
|
|
|
|
|
2008-10-17 21:57:58 +02:00
|
|
|
static void free_base_data(struct base_data *c)
|
|
|
|
{
|
|
|
|
if (c->data) {
|
2017-06-16 01:15:46 +02:00
|
|
|
FREE_AND_NULL(c->data);
|
2012-05-06 14:31:55 +02:00
|
|
|
get_thread_data()->base_cache_used -= c->size;
|
2008-10-17 21:57:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-15 06:45:34 +02:00
|
|
|
static void prune_base_data(struct base_data *retain)
|
|
|
|
{
|
2009-03-15 22:01:20 +01:00
|
|
|
struct base_data *b;
|
2012-05-06 14:31:55 +02:00
|
|
|
struct thread_local *data = get_thread_data();
|
|
|
|
for (b = data->base_cache;
|
|
|
|
data->base_cache_used > delta_base_cache_limit && b;
|
2008-07-15 06:45:34 +02:00
|
|
|
b = b->child) {
|
2008-10-17 21:57:58 +02:00
|
|
|
if (b->data && b != retain)
|
|
|
|
free_base_data(b);
|
2008-07-15 06:45:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-14 04:07:45 +02:00
|
|
|
static void link_base_data(struct base_data *base, struct base_data *c)
|
|
|
|
{
|
|
|
|
if (base)
|
|
|
|
base->child = c;
|
|
|
|
else
|
2012-05-06 14:31:55 +02:00
|
|
|
get_thread_data()->base_cache = c;
|
2008-07-14 04:07:45 +02:00
|
|
|
|
|
|
|
c->base = base;
|
|
|
|
c->child = NULL;
|
2008-10-17 21:57:57 +02:00
|
|
|
if (c->data)
|
2012-05-06 14:31:55 +02:00
|
|
|
get_thread_data()->base_cache_used += c->size;
|
2008-07-15 06:45:34 +02:00
|
|
|
prune_base_data(c);
|
2008-07-14 04:07:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void unlink_base_data(struct base_data *c)
|
|
|
|
{
|
|
|
|
struct base_data *base = c->base;
|
|
|
|
if (base)
|
|
|
|
base->child = NULL;
|
|
|
|
else
|
2012-05-06 14:31:55 +02:00
|
|
|
get_thread_data()->base_cache = NULL;
|
2008-10-17 21:57:58 +02:00
|
|
|
free_base_data(c);
|
2008-07-14 04:07:45 +02:00
|
|
|
}
|
|
|
|
|
2012-05-23 16:09:46 +02:00
|
|
|
static int is_delta_type(enum object_type type)
|
|
|
|
{
|
|
|
|
return (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA);
|
|
|
|
}
|
|
|
|
|
2016-07-13 17:44:02 +02:00
|
|
|
static void *unpack_entry_data(off_t offset, unsigned long size,
|
2018-02-01 03:18:39 +01:00
|
|
|
enum object_type type, struct object_id *oid)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
2012-05-23 16:09:47 +02:00
|
|
|
static char fixed_buf[8192];
|
2010-04-12 18:12:06 +02:00
|
|
|
int status;
|
2011-06-10 20:52:15 +02:00
|
|
|
git_zstream stream;
|
2012-05-23 16:09:47 +02:00
|
|
|
void *buf;
|
2018-02-01 03:18:39 +01:00
|
|
|
git_hash_ctx c;
|
2012-05-23 16:09:46 +02:00
|
|
|
char hdr[32];
|
|
|
|
int hdrlen;
|
|
|
|
|
|
|
|
if (!is_delta_type(type)) {
|
2018-11-11 08:05:04 +01:00
|
|
|
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX,
|
|
|
|
type_name(type),(uintmax_t)size) + 1;
|
2018-02-01 03:18:39 +01:00
|
|
|
the_hash_algo->init_fn(&c);
|
|
|
|
the_hash_algo->update_fn(&c, hdr, hdrlen);
|
2012-05-23 16:09:46 +02:00
|
|
|
} else
|
2018-02-01 03:18:39 +01:00
|
|
|
oid = NULL;
|
2012-05-23 16:09:47 +02:00
|
|
|
if (type == OBJ_BLOB && size > big_file_threshold)
|
|
|
|
buf = fixed_buf;
|
|
|
|
else
|
2014-12-08 15:17:55 +01:00
|
|
|
buf = xmallocz(size);
|
2005-10-12 21:01:31 +02:00
|
|
|
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
2010-04-12 18:12:06 +02:00
|
|
|
git_inflate_init(&stream);
|
2005-10-12 21:01:31 +02:00
|
|
|
stream.next_out = buf;
|
2012-05-23 16:09:47 +02:00
|
|
|
stream.avail_out = buf == fixed_buf ? sizeof(fixed_buf) : size;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2010-04-12 18:12:06 +02:00
|
|
|
do {
|
2012-05-23 16:09:46 +02:00
|
|
|
unsigned char *last_out = stream.next_out;
|
2006-10-20 20:45:21 +02:00
|
|
|
stream.next_in = fill(1);
|
|
|
|
stream.avail_in = input_len;
|
2010-04-12 18:12:06 +02:00
|
|
|
status = git_inflate(&stream, 0);
|
|
|
|
use(input_len - stream.avail_in);
|
2018-02-01 03:18:39 +01:00
|
|
|
if (oid)
|
|
|
|
the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out);
|
2012-05-23 16:09:47 +02:00
|
|
|
if (buf == fixed_buf) {
|
|
|
|
stream.next_out = buf;
|
|
|
|
stream.avail_out = sizeof(fixed_buf);
|
|
|
|
}
|
2010-04-12 18:12:06 +02:00
|
|
|
} while (status == Z_OK);
|
|
|
|
if (stream.total_out != size || status != Z_STREAM_END)
|
2012-04-23 14:30:29 +02:00
|
|
|
bad_object(offset, _("inflate returned %d"), status);
|
2009-01-08 04:54:47 +01:00
|
|
|
git_inflate_end(&stream);
|
2018-02-01 03:18:39 +01:00
|
|
|
if (oid)
|
|
|
|
the_hash_algo->final_fn(oid->hash, &c);
|
2012-05-23 16:09:47 +02:00
|
|
|
return buf == fixed_buf ? NULL : buf;
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
|
|
|
|
2012-05-23 16:09:46 +02:00
|
|
|
static void *unpack_raw_entry(struct object_entry *obj,
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
off_t *ofs_offset,
|
2018-02-01 03:18:39 +01:00
|
|
|
struct object_id *ref_oid,
|
|
|
|
struct object_id *oid)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
Fix big left-shifts of unsigned char
Shifting 'unsigned char' or 'unsigned short' left can result in sign
extension errors, since the C integer promotion rules means that the
unsigned char/short will get implicitly promoted to a signed 'int' due to
the shift (or due to other operations).
This normally doesn't matter, but if you shift things up sufficiently, it
will now set the sign bit in 'int', and a subsequent cast to a bigger type
(eg 'long' or 'unsigned long') will now sign-extend the value despite the
original expression being unsigned.
One example of this would be something like
unsigned long size;
unsigned char c;
size += c << 24;
where despite all the variables being unsigned, 'c << 24' ends up being a
signed entity, and will get sign-extended when then doing the addition in
an 'unsigned long' type.
Since git uses 'unsigned char' pointers extensively, we actually have this
bug in a couple of places.
I may have missed some, but this is the result of looking at
git grep '[^0-9 ][ ]*<<[ ][a-z]' -- '*.c' '*.h'
git grep '<<[ ]*24'
which catches at least the common byte cases (shifting variables by a
variable amount, and shifting by 24 bits).
I also grepped for just 'unsigned char' variables in general, and
converted the ones that most obviously ended up getting implicitly cast
immediately anyway (eg hash_name(), encode_85()).
In addition to just avoiding 'unsigned char', this patch also tries to use
a common idiom for the delta header size thing. We had three different
variations on it: "& 0x7fUL" in one place (getting the sign extension
right), and "& ~0x80" and "& 0x7f" in two other places (not getting it
right). Apart from making them all just avoid using "unsigned char" at
all, I also unified them to then use a simple "& 0x7f".
I considered making a sparse extension which warns about doing implicit
casts from unsigned types to signed types, but it gets rather complex very
quickly, so this is just a hack.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-06-18 02:22:27 +02:00
|
|
|
unsigned char *p;
|
|
|
|
unsigned long size, c;
|
2007-04-09 07:06:30 +02:00
|
|
|
off_t base_offset;
|
2005-10-12 21:01:31 +02:00
|
|
|
unsigned shift;
|
2007-04-09 07:06:32 +02:00
|
|
|
void *data;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2007-06-01 21:18:05 +02:00
|
|
|
obj->idx.offset = consumed_bytes;
|
2011-04-03 09:06:54 +02:00
|
|
|
input_crc32 = crc32(0, NULL, 0);
|
2006-10-20 20:45:21 +02:00
|
|
|
|
|
|
|
p = fill(1);
|
|
|
|
c = *p;
|
|
|
|
use(1);
|
|
|
|
obj->type = (c >> 4) & 7;
|
2005-10-12 21:01:31 +02:00
|
|
|
size = (c & 15);
|
|
|
|
shift = 4;
|
|
|
|
while (c & 0x80) {
|
2006-10-20 20:45:21 +02:00
|
|
|
p = fill(1);
|
|
|
|
c = *p;
|
|
|
|
use(1);
|
Fix big left-shifts of unsigned char
Shifting 'unsigned char' or 'unsigned short' left can result in sign
extension errors, since the C integer promotion rules means that the
unsigned char/short will get implicitly promoted to a signed 'int' due to
the shift (or due to other operations).
This normally doesn't matter, but if you shift things up sufficiently, it
will now set the sign bit in 'int', and a subsequent cast to a bigger type
(eg 'long' or 'unsigned long') will now sign-extend the value despite the
original expression being unsigned.
One example of this would be something like
unsigned long size;
unsigned char c;
size += c << 24;
where despite all the variables being unsigned, 'c << 24' ends up being a
signed entity, and will get sign-extended when then doing the addition in
an 'unsigned long' type.
Since git uses 'unsigned char' pointers extensively, we actually have this
bug in a couple of places.
I may have missed some, but this is the result of looking at
git grep '[^0-9 ][ ]*<<[ ][a-z]' -- '*.c' '*.h'
git grep '<<[ ]*24'
which catches at least the common byte cases (shifting variables by a
variable amount, and shifting by 24 bits).
I also grepped for just 'unsigned char' variables in general, and
converted the ones that most obviously ended up getting implicitly cast
immediately anyway (eg hash_name(), encode_85()).
In addition to just avoiding 'unsigned char', this patch also tries to use
a common idiom for the delta header size thing. We had three different
variations on it: "& 0x7fUL" in one place (getting the sign extension
right), and "& ~0x80" and "& 0x7f" in two other places (not getting it
right). Apart from making them all just avoid using "unsigned char" at
all, I also unified them to then use a simple "& 0x7f".
I considered making a sparse extension which warns about doing implicit
casts from unsigned types to signed types, but it gets rather complex very
quickly, so this is just a hack.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-06-18 02:22:27 +02:00
|
|
|
size += (c & 0x7f) << shift;
|
2005-10-12 21:01:31 +02:00
|
|
|
shift += 7;
|
|
|
|
}
|
2006-10-20 20:45:21 +02:00
|
|
|
obj->size = size;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2006-10-20 20:45:21 +02:00
|
|
|
switch (obj->type) {
|
2006-09-21 06:06:49 +02:00
|
|
|
case OBJ_REF_DELTA:
|
2018-02-01 03:18:39 +01:00
|
|
|
hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz));
|
|
|
|
use(the_hash_algo->rawsz);
|
2006-09-21 06:08:33 +02:00
|
|
|
break;
|
|
|
|
case OBJ_OFS_DELTA:
|
2006-10-20 20:45:21 +02:00
|
|
|
p = fill(1);
|
|
|
|
c = *p;
|
|
|
|
use(1);
|
2006-09-21 06:08:33 +02:00
|
|
|
base_offset = c & 127;
|
|
|
|
while (c & 128) {
|
|
|
|
base_offset += 1;
|
2007-04-09 07:06:29 +02:00
|
|
|
if (!base_offset || MSB(base_offset, 7))
|
2012-04-23 14:30:29 +02:00
|
|
|
bad_object(obj->idx.offset, _("offset value overflow for delta base object"));
|
2006-10-20 20:45:21 +02:00
|
|
|
p = fill(1);
|
|
|
|
c = *p;
|
|
|
|
use(1);
|
2006-09-21 06:08:33 +02:00
|
|
|
base_offset = (base_offset << 7) + (c & 127);
|
|
|
|
}
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
*ofs_offset = obj->idx.offset - base_offset;
|
|
|
|
if (*ofs_offset <= 0 || *ofs_offset >= obj->idx.offset)
|
2012-04-23 14:30:29 +02:00
|
|
|
bad_object(obj->idx.offset, _("delta base offset is out of bound"));
|
2006-09-21 06:08:33 +02:00
|
|
|
break;
|
2005-10-12 21:01:31 +02:00
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_BLOB:
|
|
|
|
case OBJ_TAG:
|
|
|
|
break;
|
|
|
|
default:
|
2012-04-23 14:30:29 +02:00
|
|
|
bad_object(obj->idx.offset, _("unknown object type %d"), obj->type);
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
2007-06-01 21:18:05 +02:00
|
|
|
obj->hdr_size = consumed_bytes - obj->idx.offset;
|
2006-10-20 20:45:21 +02:00
|
|
|
|
2018-02-01 03:18:39 +01:00
|
|
|
data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
|
2007-06-01 21:18:05 +02:00
|
|
|
obj->idx.crc32 = input_crc32;
|
2007-04-09 07:06:32 +02:00
|
|
|
return data;
|
2006-10-20 20:45:21 +02:00
|
|
|
}
|
|
|
|
|
2012-05-23 16:09:48 +02:00
|
|
|
static void *unpack_data(struct object_entry *obj,
|
|
|
|
int (*consume)(const unsigned char *, unsigned long, void *),
|
|
|
|
void *cb_data)
|
2006-10-20 20:45:21 +02:00
|
|
|
{
|
2007-11-11 05:29:10 +01:00
|
|
|
off_t from = obj[0].idx.offset + obj[0].hdr_size;
|
2016-07-13 17:44:00 +02:00
|
|
|
off_t len = obj[1].idx.offset - from;
|
2010-04-12 18:11:07 +02:00
|
|
|
unsigned char *data, *inbuf;
|
2011-06-10 20:52:15 +02:00
|
|
|
git_zstream stream;
|
2010-04-12 18:11:07 +02:00
|
|
|
int status;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2014-12-08 15:17:55 +01:00
|
|
|
data = xmallocz(consume ? 64*1024 : obj->size);
|
2016-07-13 17:44:00 +02:00
|
|
|
inbuf = xmalloc((len < 64*1024) ? (int)len : 64*1024);
|
2010-04-12 18:11:07 +02:00
|
|
|
|
2006-10-20 20:45:21 +02:00
|
|
|
memset(&stream, 0, sizeof(stream));
|
2010-04-12 18:11:07 +02:00
|
|
|
git_inflate_init(&stream);
|
2006-10-20 20:45:21 +02:00
|
|
|
stream.next_out = data;
|
2012-05-23 16:09:48 +02:00
|
|
|
stream.avail_out = consume ? 64*1024 : obj->size;
|
2010-04-12 18:11:07 +02:00
|
|
|
|
|
|
|
do {
|
2016-07-13 17:44:00 +02:00
|
|
|
ssize_t n = (len < 64*1024) ? (ssize_t)len : 64*1024;
|
2014-06-03 21:06:42 +02:00
|
|
|
n = xpread(get_thread_data()->pack_fd, inbuf, n, from);
|
2010-04-12 18:11:07 +02:00
|
|
|
if (n < 0)
|
2012-04-23 14:30:29 +02:00
|
|
|
die_errno(_("cannot pread pack file"));
|
2010-04-12 18:11:07 +02:00
|
|
|
if (!n)
|
2016-07-13 17:44:00 +02:00
|
|
|
die(Q_("premature end of pack file, %"PRIuMAX" byte missing",
|
|
|
|
"premature end of pack file, %"PRIuMAX" bytes missing",
|
|
|
|
(unsigned int)len),
|
|
|
|
(uintmax_t)len);
|
2010-04-12 18:11:07 +02:00
|
|
|
from += n;
|
|
|
|
len -= n;
|
|
|
|
stream.next_in = inbuf;
|
|
|
|
stream.avail_in = n;
|
index-pack: loop while inflating objects in unpack_data
When the unpack_data function is given a consume() callback,
it unpacks only 64K of the input at a time, feeding it to
git_inflate along with a 64K output buffer. However,
because we are inflating, there is a good chance that the
output buffer will fill before consuming all of the input.
In this case, we need to loop on git_inflate until we have
fed the whole input buffer, feeding each chunk of output to
the consume buffer.
The current code does not do this, and as a result, will
fail the loop condition and trigger a fatal "serious inflate
inconsistency" error in this case.
While we're rearranging the loop, let's get rid of the
extra last_out pointer. It is meant to point to the
beginning of the buffer that we feed to git_inflate, but in
practice this is always the beginning of our same 64K
buffer, because:
1. At the beginning of the loop, we are feeding the
buffer.
2. At the end of the loop, if we are using a consume()
function, we reset git_inflate's pointer to the
beginning of the buffer. If we are not using a
consume() function, then we do not care about the value
of last_out at all.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-07-04 09:12:14 +02:00
|
|
|
if (!consume)
|
|
|
|
status = git_inflate(&stream, 0);
|
|
|
|
else {
|
|
|
|
do {
|
|
|
|
status = git_inflate(&stream, 0);
|
|
|
|
if (consume(data, stream.next_out - data, cb_data)) {
|
|
|
|
free(inbuf);
|
|
|
|
free(data);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
stream.next_out = data;
|
|
|
|
stream.avail_out = 64*1024;
|
|
|
|
} while (status == Z_OK && stream.avail_in);
|
2012-05-23 16:09:48 +02:00
|
|
|
}
|
2010-04-12 18:11:07 +02:00
|
|
|
} while (len && status == Z_OK && !stream.avail_in);
|
|
|
|
|
|
|
|
/* This has been inflated OK when first encountered, so... */
|
|
|
|
if (status != Z_STREAM_END || stream.total_out != obj->size)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("serious inflate inconsistency"));
|
2010-04-12 18:11:07 +02:00
|
|
|
|
|
|
|
git_inflate_end(&stream);
|
|
|
|
free(inbuf);
|
2012-05-23 16:09:48 +02:00
|
|
|
if (consume) {
|
2017-06-16 01:15:46 +02:00
|
|
|
FREE_AND_NULL(data);
|
2012-05-23 16:09:48 +02:00
|
|
|
}
|
2005-10-12 21:01:31 +02:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2012-05-23 16:09:48 +02:00
|
|
|
static void *get_data_from_pack(struct object_entry *obj)
|
|
|
|
{
|
|
|
|
return unpack_data(obj, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
static int compare_ofs_delta_bases(off_t offset1, off_t offset2,
|
|
|
|
enum object_type type1,
|
|
|
|
enum object_type type2)
|
2011-02-02 19:06:51 +01:00
|
|
|
{
|
|
|
|
int cmp = type1 - type2;
|
|
|
|
if (cmp)
|
|
|
|
return cmp;
|
index-pack: fix truncation of off_t in comparison
Commit c6458e6 (index-pack: kill union delta_base to save
memory, 2015-04-18) refactored the comparison functions used
in sorting and binary searching our delta list. The
resulting code does something like:
int cmp_offsets(off_t a, off_t b)
{
return a - b;
}
This works most of the time, but produces nonsensical
results when the difference between the two offsets is
larger than what can be stored in an "int". This can lead to
unresolved deltas if the packsize is larger than 2G (even on
64-bit systems, an int is still typically 32 bits):
$ git clone git://github.com/mozilla/gecko-dev
Cloning into 'gecko-dev'...
remote: Counting objects: 4800161, done.
remote: Compressing objects: 100% (178/178), done.
remote: Total 4800161 (delta 88), reused 0 (delta 0), pack-reused 4799978
Receiving objects: 100% (4800161/4800161), 2.21 GiB | 3.26 MiB/s, done.
Resolving deltas: 99% (3808820/3811944), completed with 0 local objects.
fatal: pack has 3124 unresolved deltas
fatal: index-pack failed
We can fix it by doing direct comparisons between the
offsets and returning constants; the callers only care about
the sign of the comparison, not the magnitude.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-06-04 14:35:42 +02:00
|
|
|
return offset1 < offset2 ? -1 :
|
|
|
|
offset1 > offset2 ? 1 :
|
|
|
|
0;
|
2011-02-02 19:06:51 +01:00
|
|
|
}
|
|
|
|
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
static int find_ofs_delta(const off_t offset, enum object_type type)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
int first = 0, last = nr_ofs_deltas;
|
|
|
|
|
|
|
|
while (first < last) {
|
2017-10-08 20:29:37 +02:00
|
|
|
int next = first + (last - first) / 2;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
struct ofs_delta_entry *delta = &ofs_deltas[next];
|
|
|
|
int cmp;
|
|
|
|
|
|
|
|
cmp = compare_ofs_delta_bases(offset, delta->offset,
|
|
|
|
type, objects[delta->obj_no].type);
|
|
|
|
if (!cmp)
|
|
|
|
return next;
|
|
|
|
if (cmp < 0) {
|
|
|
|
last = next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
first = next+1;
|
|
|
|
}
|
|
|
|
return -first-1;
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
|
|
|
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
static void find_ofs_delta_children(off_t offset,
|
|
|
|
int *first_index, int *last_index,
|
|
|
|
enum object_type type)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
int first = find_ofs_delta(offset, type);
|
2005-10-12 21:01:31 +02:00
|
|
|
int last = first;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
int end = nr_ofs_deltas - 1;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2008-10-17 21:57:58 +02:00
|
|
|
if (first < 0) {
|
|
|
|
*first_index = 0;
|
|
|
|
*last_index = -1;
|
|
|
|
return;
|
|
|
|
}
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
while (first > 0 && ofs_deltas[first - 1].offset == offset)
|
2005-10-12 21:01:31 +02:00
|
|
|
--first;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
while (last < end && ofs_deltas[last + 1].offset == offset)
|
|
|
|
++last;
|
|
|
|
*first_index = first;
|
|
|
|
*last_index = last;
|
|
|
|
}
|
|
|
|
|
2018-03-12 03:27:37 +01:00
|
|
|
static int compare_ref_delta_bases(const struct object_id *oid1,
|
|
|
|
const struct object_id *oid2,
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
enum object_type type1,
|
|
|
|
enum object_type type2)
|
|
|
|
{
|
|
|
|
int cmp = type1 - type2;
|
|
|
|
if (cmp)
|
|
|
|
return cmp;
|
2018-03-12 03:27:37 +01:00
|
|
|
return oidcmp(oid1, oid2);
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
}
|
|
|
|
|
2018-03-12 03:27:37 +01:00
|
|
|
static int find_ref_delta(const struct object_id *oid, enum object_type type)
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
{
|
|
|
|
int first = 0, last = nr_ref_deltas;
|
|
|
|
|
|
|
|
while (first < last) {
|
2017-10-08 20:29:37 +02:00
|
|
|
int next = first + (last - first) / 2;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
struct ref_delta_entry *delta = &ref_deltas[next];
|
|
|
|
int cmp;
|
|
|
|
|
2018-03-12 03:27:37 +01:00
|
|
|
cmp = compare_ref_delta_bases(oid, &delta->oid,
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
type, objects[delta->obj_no].type);
|
|
|
|
if (!cmp)
|
|
|
|
return next;
|
|
|
|
if (cmp < 0) {
|
|
|
|
last = next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
first = next+1;
|
|
|
|
}
|
|
|
|
return -first-1;
|
|
|
|
}
|
|
|
|
|
2018-03-12 03:27:37 +01:00
|
|
|
static void find_ref_delta_children(const struct object_id *oid,
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
int *first_index, int *last_index,
|
|
|
|
enum object_type type)
|
|
|
|
{
|
2018-03-12 03:27:37 +01:00
|
|
|
int first = find_ref_delta(oid, type);
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
int last = first;
|
|
|
|
int end = nr_ref_deltas - 1;
|
|
|
|
|
|
|
|
if (first < 0) {
|
|
|
|
*first_index = 0;
|
|
|
|
*last_index = -1;
|
|
|
|
return;
|
|
|
|
}
|
convert "oidcmp() == 0" to oideq()
Using the more restrictive oideq() should, in the long run,
give the compiler more opportunities to optimize these
callsites. For now, this conversion should be a complete
noop with respect to the generated code.
The result is also perhaps a little more readable, as it
avoids the "zero is equal" idiom. Since it's so prevalent in
C, I think seasoned programmers tend not to even notice it
anymore, but it can sometimes make for awkward double
negations (e.g., we can drop a few !!oidcmp() instances
here).
This patch was generated almost entirely by the included
coccinelle patch. This mechanical conversion should be
completely safe, because we check explicitly for cases where
oidcmp() is compared to 0, which is what oideq() is doing
under the hood. Note that we don't have to catch "!oidcmp()"
separately; coccinelle's standard isomorphisms make sure the
two are treated equivalently.
I say "almost" because I did hand-edit the coccinelle output
to fix up a few style violations (it mostly keeps the
original formatting, but sometimes unwraps long lines).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-28 23:22:40 +02:00
|
|
|
while (first > 0 && oideq(&ref_deltas[first - 1].oid, oid))
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
--first;
|
convert "oidcmp() == 0" to oideq()
Using the more restrictive oideq() should, in the long run,
give the compiler more opportunities to optimize these
callsites. For now, this conversion should be a complete
noop with respect to the generated code.
The result is also perhaps a little more readable, as it
avoids the "zero is equal" idiom. Since it's so prevalent in
C, I think seasoned programmers tend not to even notice it
anymore, but it can sometimes make for awkward double
negations (e.g., we can drop a few !!oidcmp() instances
here).
This patch was generated almost entirely by the included
coccinelle patch. This mechanical conversion should be
completely safe, because we check explicitly for cases where
oidcmp() is compared to 0, which is what oideq() is doing
under the hood. Note that we don't have to catch "!oidcmp()"
separately; coccinelle's standard isomorphisms make sure the
two are treated equivalently.
I say "almost" because I did hand-edit the coccinelle output
to fix up a few style violations (it mostly keeps the
original formatting, but sometimes unwraps long lines).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-28 23:22:40 +02:00
|
|
|
while (last < end && oideq(&ref_deltas[last + 1].oid, oid))
|
2005-10-12 21:01:31 +02:00
|
|
|
++last;
|
|
|
|
*first_index = first;
|
|
|
|
*last_index = last;
|
|
|
|
}
|
|
|
|
|
2012-05-24 15:55:44 +02:00
|
|
|
struct compare_data {
|
|
|
|
struct object_entry *entry;
|
|
|
|
struct git_istream *st;
|
|
|
|
unsigned char *buf;
|
|
|
|
unsigned long buf_size;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int compare_objects(const unsigned char *buf, unsigned long size,
|
|
|
|
void *cb_data)
|
|
|
|
{
|
|
|
|
struct compare_data *data = cb_data;
|
|
|
|
|
|
|
|
if (data->buf_size < size) {
|
|
|
|
free(data->buf);
|
|
|
|
data->buf = xmalloc(size);
|
|
|
|
data->buf_size = size;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
ssize_t len = read_istream(data->st, data->buf, size);
|
|
|
|
if (len == 0)
|
|
|
|
die(_("SHA1 COLLISION FOUND WITH %s !"),
|
2017-05-07 00:10:11 +02:00
|
|
|
oid_to_hex(&data->entry->idx.oid));
|
2012-05-24 15:55:44 +02:00
|
|
|
if (len < 0)
|
|
|
|
die(_("unable to read %s"),
|
2017-05-07 00:10:11 +02:00
|
|
|
oid_to_hex(&data->entry->idx.oid));
|
2012-05-24 15:55:44 +02:00
|
|
|
if (memcmp(buf, data->buf, len))
|
|
|
|
die(_("SHA1 COLLISION FOUND WITH %s !"),
|
2017-05-07 00:10:11 +02:00
|
|
|
oid_to_hex(&data->entry->idx.oid));
|
2012-05-24 15:55:44 +02:00
|
|
|
size -= len;
|
|
|
|
buf += len;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int check_collison(struct object_entry *entry)
|
|
|
|
{
|
|
|
|
struct compare_data data;
|
|
|
|
enum object_type type;
|
|
|
|
unsigned long size;
|
|
|
|
|
|
|
|
if (entry->size <= big_file_threshold || entry->type != OBJ_BLOB)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
memset(&data, 0, sizeof(data));
|
|
|
|
data.entry = entry;
|
2020-01-30 21:32:20 +01:00
|
|
|
data.st = open_istream(the_repository, &entry->idx.oid, &type, &size,
|
|
|
|
NULL);
|
2012-05-24 15:55:44 +02:00
|
|
|
if (!data.st)
|
|
|
|
return -1;
|
|
|
|
if (size != entry->size || type != entry->type)
|
|
|
|
die(_("SHA1 COLLISION FOUND WITH %s !"),
|
2017-05-07 00:10:11 +02:00
|
|
|
oid_to_hex(&entry->idx.oid));
|
2012-05-24 15:55:44 +02:00
|
|
|
unpack_data(entry, compare_objects, &data);
|
|
|
|
close_istream(data.st);
|
|
|
|
free(data.buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-05-23 16:09:47 +02:00
|
|
|
static void sha1_object(const void *data, struct object_entry *obj_entry,
|
|
|
|
unsigned long size, enum object_type type,
|
2017-05-07 00:10:13 +02:00
|
|
|
const struct object_id *oid)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
2012-05-23 16:09:47 +02:00
|
|
|
void *new_data = NULL;
|
2016-12-16 22:43:22 +01:00
|
|
|
int collision_test_needed = 0;
|
2012-05-23 16:09:47 +02:00
|
|
|
|
|
|
|
assert(data || obj_entry);
|
|
|
|
|
2016-12-16 22:43:22 +01:00
|
|
|
if (startup_info->have_repository) {
|
|
|
|
read_lock();
|
2017-06-22 02:40:24 +02:00
|
|
|
collision_test_needed =
|
2019-01-07 09:37:54 +01:00
|
|
|
has_object_file_with_flags(oid, OBJECT_INFO_QUICK);
|
2016-12-16 22:43:22 +01:00
|
|
|
read_unlock();
|
|
|
|
}
|
2012-05-24 15:55:44 +02:00
|
|
|
|
|
|
|
if (collision_test_needed && !data) {
|
|
|
|
read_lock();
|
|
|
|
if (!check_collison(obj_entry))
|
|
|
|
collision_test_needed = 0;
|
|
|
|
read_unlock();
|
|
|
|
}
|
|
|
|
if (collision_test_needed) {
|
2007-03-20 20:32:35 +01:00
|
|
|
void *has_data;
|
|
|
|
enum object_type has_type;
|
|
|
|
unsigned long has_size;
|
2012-05-24 15:55:44 +02:00
|
|
|
read_lock();
|
2018-04-25 20:20:59 +02:00
|
|
|
has_type = oid_object_info(the_repository, oid, &has_size);
|
index-pack: detect local corruption in collision check
When we notice that we have a local copy of an incoming
object, we compare the two objects to make sure we haven't
found a collision. Before we get to the actual object
bytes, though, we compare the type and size from
sha1_object_info().
If our local object is corrupted, then the type will be
OBJ_BAD, which obviously will not match the incoming type,
and we'll report "SHA1 COLLISION FOUND" (with capital
letters and everything). This is confusing, as the problem
is not a collision but rather local corruption. We should
report that instead (just like we do if reading the rest of
the object content fails a few lines later).
Note that we _could_ just ignore the error and mark it as a
non-collision. That would let you "git fetch" to replace a
corrupted object. But it's not a very reliable method for
repairing a repository. The earlier want/have negotiation
tries to get the other side to omit objects we already have,
and it would not realize that we are "missing" this
corrupted object. So we're better off complaining loudly
when we see corruption, and letting the user take more
drastic measures to repair (like making a full clone
elsewhere and copying the pack into place).
Note that the test sets transfer.unpackLimit in the
receiving repository so that we use index-pack (which is
what does the collision check). Normally for such a small
push we'd use unpack-objects, which would simply try to
write the loose object, and discard the new one when we see
that there's already an old one.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-04-01 10:09:32 +02:00
|
|
|
if (has_type < 0)
|
2017-05-07 00:10:13 +02:00
|
|
|
die(_("cannot read existing object info %s"), oid_to_hex(oid));
|
2012-05-24 15:55:44 +02:00
|
|
|
if (has_type != type || has_size != size)
|
2017-05-07 00:10:13 +02:00
|
|
|
die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:53 +01:00
|
|
|
has_data = read_object_file(oid, &has_type, &has_size);
|
2012-05-06 14:31:55 +02:00
|
|
|
read_unlock();
|
2012-05-24 15:55:44 +02:00
|
|
|
if (!data)
|
|
|
|
data = new_data = get_data_from_pack(obj_entry);
|
2007-03-20 20:32:35 +01:00
|
|
|
if (!has_data)
|
2017-05-07 00:10:13 +02:00
|
|
|
die(_("cannot read existing object %s"), oid_to_hex(oid));
|
2007-03-20 20:32:35 +01:00
|
|
|
if (size != has_size || type != has_type ||
|
|
|
|
memcmp(data, has_data, size) != 0)
|
2017-05-07 00:10:13 +02:00
|
|
|
die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
|
2007-04-03 18:33:46 +02:00
|
|
|
free(has_data);
|
2012-05-24 15:55:44 +02:00
|
|
|
}
|
2012-05-06 14:31:55 +02:00
|
|
|
|
2018-03-14 19:42:40 +01:00
|
|
|
if (strict || do_fsck_object) {
|
2012-05-06 14:31:55 +02:00
|
|
|
read_lock();
|
2008-02-25 22:46:12 +01:00
|
|
|
if (type == OBJ_BLOB) {
|
2018-06-29 03:21:55 +02:00
|
|
|
struct blob *blob = lookup_blob(the_repository, oid);
|
2008-02-25 22:46:12 +01:00
|
|
|
if (blob)
|
|
|
|
blob->object.flags |= FLAG_CHECKED;
|
|
|
|
else
|
2017-05-07 00:10:13 +02:00
|
|
|
die(_("invalid blob object %s"), oid_to_hex(oid));
|
index-pack: check .gitmodules files with --strict
Now that the internal fsck code has all of the plumbing we
need, we can start checking incoming .gitmodules files.
Naively, it seems like we would just need to add a call to
fsck_finish() after we've processed all of the objects. And
that would be enough to cover the initial test included
here. But there are two extra bits:
1. We currently don't bother calling fsck_object() at all
for blobs, since it has traditionally been a noop. We'd
actually catch these blobs in fsck_finish() at the end,
but it's more efficient to check them when we already
have the object loaded in memory.
2. The second pass done by fsck_finish() needs to access
the objects, but we're actually indexing the pack in
this process. In theory we could give the fsck code a
special callback for accessing the in-pack data, but
it's actually quite tricky:
a. We don't have an internal efficient index mapping
oids to packfile offsets. We only generate it on
the fly as part of writing out the .idx file.
b. We'd still have to reconstruct deltas, which means
we'd basically have to replicate all of the
reading logic in packfile.c.
Instead, let's avoid running fsck_finish() until after
we've written out the .idx file, and then just add it
to our internal packed_git list.
This does mean that the objects are "in the repository"
before we finish our fsck checks. But unpack-objects
already exhibits this same behavior, and it's an
acceptable tradeoff here for the same reason: the
quarantine mechanism means that pushes will be
fully protected.
In addition to a basic push test in t7415, we add a sneaky
pack that reverses the usual object order in the pack,
requiring that index-pack access the tree and blob during
the "finish" step.
This already works for unpack-objects (since it will have
written out loose objects), but we'll check it with this
sneaky pack for good measure.
Signed-off-by: Jeff King <peff@peff.net>
2018-05-05 01:45:01 +02:00
|
|
|
if (do_fsck_object &&
|
|
|
|
fsck_object(&blob->object, (void *)data, size, &fsck_options))
|
|
|
|
die(_("fsck error in packed object"));
|
2008-02-25 22:46:12 +01:00
|
|
|
} else {
|
|
|
|
struct object *obj;
|
|
|
|
int eaten;
|
|
|
|
void *buf = (void *) data;
|
|
|
|
|
2013-05-26 03:16:16 +02:00
|
|
|
assert(data && "data can only be NULL for large _blobs_");
|
2012-05-23 16:09:47 +02:00
|
|
|
|
2008-02-25 22:46:12 +01:00
|
|
|
/*
|
|
|
|
* we do not need to free the memory here, as the
|
|
|
|
* buf is deleted by the caller.
|
|
|
|
*/
|
2018-06-29 03:21:53 +02:00
|
|
|
obj = parse_object_buffer(the_repository, oid, type,
|
|
|
|
size, buf,
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-07 00:10:38 +02:00
|
|
|
&eaten);
|
2008-02-25 22:46:12 +01:00
|
|
|
if (!obj)
|
2018-02-14 19:59:24 +01:00
|
|
|
die(_("invalid %s"), type_name(type));
|
2013-05-26 03:16:17 +02:00
|
|
|
if (do_fsck_object &&
|
2015-06-22 17:25:00 +02:00
|
|
|
fsck_object(obj, buf, size, &fsck_options))
|
2018-05-02 22:37:09 +02:00
|
|
|
die(_("fsck error in packed object"));
|
2018-03-14 19:42:40 +01:00
|
|
|
if (strict && fsck_walk(obj, NULL, &fsck_options))
|
2015-11-10 03:22:28 +01:00
|
|
|
die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid));
|
2008-02-25 22:46:12 +01:00
|
|
|
|
|
|
|
if (obj->type == OBJ_TREE) {
|
|
|
|
struct tree *item = (struct tree *) obj;
|
|
|
|
item->buffer = NULL;
|
2013-06-06 00:37:39 +02:00
|
|
|
obj->parsed = 0;
|
2008-02-25 22:46:12 +01:00
|
|
|
}
|
|
|
|
if (obj->type == OBJ_COMMIT) {
|
|
|
|
struct commit *commit = (struct commit *) obj;
|
2014-06-10 23:44:13 +02:00
|
|
|
if (detach_commit_buffer(commit, NULL) != data)
|
2018-05-02 11:38:39 +02:00
|
|
|
BUG("parse_object_buffer transmogrified our buffer");
|
2008-02-25 22:46:12 +01:00
|
|
|
}
|
|
|
|
obj->flags |= FLAG_CHECKED;
|
|
|
|
}
|
2012-05-06 14:31:55 +02:00
|
|
|
read_unlock();
|
2008-02-25 22:46:12 +01:00
|
|
|
}
|
2012-05-23 16:09:47 +02:00
|
|
|
|
|
|
|
free(new_data);
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
|
|
|
|
2012-01-14 13:19:55 +01:00
|
|
|
/*
|
|
|
|
* This function is part of find_unresolved_deltas(). There are two
|
|
|
|
* walkers going in the opposite ways.
|
|
|
|
*
|
|
|
|
* The first one in find_unresolved_deltas() traverses down from
|
|
|
|
* parent node to children, deflating nodes along the way. However,
|
|
|
|
* memory for deflated nodes is limited by delta_base_cache_limit, so
|
|
|
|
* at some point parent node's deflated content may be freed.
|
|
|
|
*
|
|
|
|
* The second walker is this function, which goes from current node up
|
|
|
|
* to top parent if necessary to deflate the node. In normal
|
|
|
|
* situation, its parent node would be already deflated, so it just
|
|
|
|
* needs to apply delta.
|
|
|
|
*
|
|
|
|
* In the worst case scenario, parent node is no longer deflated because
|
|
|
|
* we're running out of delta_base_cache_limit; we need to re-deflate
|
|
|
|
* parents, possibly up to the top base.
|
|
|
|
*
|
|
|
|
* All deflated objects here are subject to be freed if we exceed
|
|
|
|
* delta_base_cache_limit, just like in find_unresolved_deltas(), we
|
|
|
|
* just need to make sure the last node is not freed.
|
|
|
|
*/
|
2008-07-15 06:45:34 +02:00
|
|
|
static void *get_base_data(struct base_data *c)
|
|
|
|
{
|
|
|
|
if (!c->data) {
|
|
|
|
struct object_entry *obj = c->obj;
|
2012-01-14 13:19:55 +01:00
|
|
|
struct base_data **delta = NULL;
|
|
|
|
int delta_nr = 0, delta_alloc = 0;
|
2008-07-15 06:45:34 +02:00
|
|
|
|
2012-01-14 13:19:55 +01:00
|
|
|
while (is_delta_type(c->obj->type) && !c->data) {
|
|
|
|
ALLOC_GROW(delta, delta_nr + 1, delta_alloc);
|
|
|
|
delta[delta_nr++] = c;
|
|
|
|
c = c->base;
|
|
|
|
}
|
|
|
|
if (!delta_nr) {
|
|
|
|
c->data = get_data_from_pack(obj);
|
|
|
|
c->size = obj->size;
|
2012-05-06 14:31:55 +02:00
|
|
|
get_thread_data()->base_cache_used += c->size;
|
2012-01-14 13:19:55 +01:00
|
|
|
prune_base_data(c);
|
|
|
|
}
|
|
|
|
for (; delta_nr > 0; delta_nr--) {
|
|
|
|
void *base, *raw;
|
|
|
|
c = delta[delta_nr - 1];
|
|
|
|
obj = c->obj;
|
|
|
|
base = get_base_data(c->base);
|
|
|
|
raw = get_data_from_pack(obj);
|
2008-07-15 06:45:34 +02:00
|
|
|
c->data = patch_delta(
|
|
|
|
base, c->base->size,
|
|
|
|
raw, obj->size,
|
|
|
|
&c->size);
|
|
|
|
free(raw);
|
|
|
|
if (!c->data)
|
2012-04-23 14:30:29 +02:00
|
|
|
bad_object(obj->idx.offset, _("failed to apply delta"));
|
2012-05-06 14:31:55 +02:00
|
|
|
get_thread_data()->base_cache_used += c->size;
|
2012-01-14 13:19:55 +01:00
|
|
|
prune_base_data(c);
|
2008-10-17 21:57:57 +02:00
|
|
|
}
|
2012-01-14 13:19:55 +01:00
|
|
|
free(delta);
|
2008-07-15 06:45:34 +02:00
|
|
|
}
|
|
|
|
return c->data;
|
|
|
|
}
|
|
|
|
|
2008-07-14 04:07:44 +02:00
|
|
|
static void resolve_delta(struct object_entry *delta_obj,
|
2008-10-17 21:57:57 +02:00
|
|
|
struct base_data *base, struct base_data *result)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
2008-10-20 22:46:19 +02:00
|
|
|
void *base_data, *delta_data;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2013-03-19 14:01:15 +01:00
|
|
|
if (show_stat) {
|
2015-02-26 11:52:07 +01:00
|
|
|
int i = delta_obj - objects;
|
|
|
|
int j = base->obj - objects;
|
|
|
|
obj_stat[i].delta_depth = obj_stat[j].delta_depth + 1;
|
2013-03-19 14:01:15 +01:00
|
|
|
deepest_delta_lock();
|
2015-02-26 11:52:07 +01:00
|
|
|
if (deepest_delta < obj_stat[i].delta_depth)
|
|
|
|
deepest_delta = obj_stat[i].delta_depth;
|
2013-03-19 14:01:15 +01:00
|
|
|
deepest_delta_unlock();
|
2015-02-26 11:52:07 +01:00
|
|
|
obj_stat[i].base_object_no = j;
|
2013-03-19 14:01:15 +01:00
|
|
|
}
|
2006-10-26 05:28:17 +02:00
|
|
|
delta_data = get_data_from_pack(delta_obj);
|
2008-10-20 22:46:19 +02:00
|
|
|
base_data = get_base_data(base);
|
2008-10-17 21:57:57 +02:00
|
|
|
result->obj = delta_obj;
|
2008-10-20 22:46:19 +02:00
|
|
|
result->data = patch_delta(base_data, base->size,
|
|
|
|
delta_data, delta_obj->size, &result->size);
|
2005-10-12 21:01:31 +02:00
|
|
|
free(delta_data);
|
2008-10-17 21:57:57 +02:00
|
|
|
if (!result->data)
|
2012-04-23 14:30:29 +02:00
|
|
|
bad_object(delta_obj->idx.offset, _("failed to apply delta"));
|
2020-01-30 21:32:22 +01:00
|
|
|
hash_object_file(the_hash_algo, result->data, result->size,
|
2018-03-06 23:54:07 +01:00
|
|
|
type_name(delta_obj->real_type), &delta_obj->idx.oid);
|
2012-05-23 16:09:47 +02:00
|
|
|
sha1_object(result->data, NULL, result->size, delta_obj->real_type,
|
2017-05-07 00:10:13 +02:00
|
|
|
&delta_obj->idx.oid);
|
2012-05-06 14:31:55 +02:00
|
|
|
counter_lock();
|
2006-10-26 05:28:17 +02:00
|
|
|
nr_resolved_deltas++;
|
2012-05-06 14:31:55 +02:00
|
|
|
counter_unlock();
|
2008-10-17 21:57:57 +02:00
|
|
|
}
|
|
|
|
|
index-pack: fix race condition with duplicate bases
When we are resolving deltas in an indexed pack, we do it by
first selecting a potential base (either one stored in full
in the pack, or one created by resolving another delta), and
then resolving any deltas that use that base. When we
resolve a particular delta, we flip its "real_type" field
from OBJ_{REF,OFS}_DELTA to whatever the real type is.
We assume that traversing the objects this way will visit
each delta only once. This is correct for most packs; we
visit the delta only when we process its base, and each
object (and thus each base) appears only once. However, if a
base object appears multiple times in the pack, we will try
to resolve any deltas based on it once for each instance.
We can detect this case by noting that a delta we are about
to resolve has already had its real_type field flipped, and
we already do so with an assert(). However, if multiple
threads are in use, we may race with another thread on
comparing and flipping the field. We need to synchronize the
access.
The right mechanism for doing this is a compare-and-swap (we
atomically "claim" the delta for our own and find out
whether our claim was successful). We can implement this
in C by using a pthread mutex to protect the operation. This
is not the fastest way of doing a compare-and-swap; many
processors provide instructions for this, and gcc and other
compilers provide builtins to access them. However, some
experiments showed that lock contention does not cause a
significant slowdown here. Adding c-a-s support for many
compilers would increase the maintenance burden (and we
would still end up including the pthread version as a
fallback).
Note that we only need to touch the OBJ_REF_DELTA codepath
here. An OBJ_OFS_DELTA object points to its base using an
offset, and therefore has only one base, even if another
copy of that base object appears in the pack (we do still
touch it briefly because the setting of real_type is
factored out of resolve_data).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-08-29 22:57:47 +02:00
|
|
|
/*
|
|
|
|
* Standard boolean compare-and-swap: atomically check whether "*type" is
|
|
|
|
* "want"; if so, swap in "set" and return true. Otherwise, leave it untouched
|
|
|
|
* and return false.
|
|
|
|
*/
|
2015-02-26 11:52:07 +01:00
|
|
|
static int compare_and_swap_type(signed char *type,
|
index-pack: fix race condition with duplicate bases
When we are resolving deltas in an indexed pack, we do it by
first selecting a potential base (either one stored in full
in the pack, or one created by resolving another delta), and
then resolving any deltas that use that base. When we
resolve a particular delta, we flip its "real_type" field
from OBJ_{REF,OFS}_DELTA to whatever the real type is.
We assume that traversing the objects this way will visit
each delta only once. This is correct for most packs; we
visit the delta only when we process its base, and each
object (and thus each base) appears only once. However, if a
base object appears multiple times in the pack, we will try
to resolve any deltas based on it once for each instance.
We can detect this case by noting that a delta we are about
to resolve has already had its real_type field flipped, and
we already do so with an assert(). However, if multiple
threads are in use, we may race with another thread on
comparing and flipping the field. We need to synchronize the
access.
The right mechanism for doing this is a compare-and-swap (we
atomically "claim" the delta for our own and find out
whether our claim was successful). We can implement this
in C by using a pthread mutex to protect the operation. This
is not the fastest way of doing a compare-and-swap; many
processors provide instructions for this, and gcc and other
compilers provide builtins to access them. However, some
experiments showed that lock contention does not cause a
significant slowdown here. Adding c-a-s support for many
compilers would increase the maintenance burden (and we
would still end up including the pthread version as a
fallback).
Note that we only need to touch the OBJ_REF_DELTA codepath
here. An OBJ_OFS_DELTA object points to its base using an
offset, and therefore has only one base, even if another
copy of that base object appears in the pack (we do still
touch it briefly because the setting of real_type is
factored out of resolve_data).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-08-29 22:57:47 +02:00
|
|
|
enum object_type want,
|
|
|
|
enum object_type set)
|
|
|
|
{
|
|
|
|
enum object_type old;
|
|
|
|
|
|
|
|
type_cas_lock();
|
|
|
|
old = *type;
|
|
|
|
if (old == want)
|
|
|
|
*type = set;
|
|
|
|
type_cas_unlock();
|
|
|
|
|
|
|
|
return old == want;
|
|
|
|
}
|
|
|
|
|
2012-01-14 13:19:54 +01:00
|
|
|
static struct base_data *find_unresolved_deltas_1(struct base_data *base,
|
|
|
|
struct base_data *prev_base)
|
2008-10-17 21:57:57 +02:00
|
|
|
{
|
2012-01-14 13:19:54 +01:00
|
|
|
if (base->ref_last == -1 && base->ofs_last == -1) {
|
2018-03-12 03:27:37 +01:00
|
|
|
find_ref_delta_children(&base->obj->idx.oid,
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
&base->ref_first, &base->ref_last,
|
|
|
|
OBJ_REF_DELTA);
|
2008-10-17 21:57:57 +02:00
|
|
|
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
find_ofs_delta_children(base->obj->idx.offset,
|
|
|
|
&base->ofs_first, &base->ofs_last,
|
|
|
|
OBJ_OFS_DELTA);
|
2008-07-14 04:07:45 +02:00
|
|
|
|
2012-01-14 13:19:54 +01:00
|
|
|
if (base->ref_last == -1 && base->ofs_last == -1) {
|
|
|
|
free(base->data);
|
|
|
|
return NULL;
|
|
|
|
}
|
2006-09-21 06:08:33 +02:00
|
|
|
|
2012-01-14 13:19:54 +01:00
|
|
|
link_base_data(prev_base, base);
|
|
|
|
}
|
2008-07-14 04:07:45 +02:00
|
|
|
|
2012-01-14 13:19:54 +01:00
|
|
|
if (base->ref_first <= base->ref_last) {
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
struct object_entry *child = objects + ref_deltas[base->ref_first].obj_no;
|
2012-01-14 13:19:54 +01:00
|
|
|
struct base_data *result = alloc_base_data();
|
2011-02-02 19:06:51 +01:00
|
|
|
|
index-pack: fix race condition with duplicate bases
When we are resolving deltas in an indexed pack, we do it by
first selecting a potential base (either one stored in full
in the pack, or one created by resolving another delta), and
then resolving any deltas that use that base. When we
resolve a particular delta, we flip its "real_type" field
from OBJ_{REF,OFS}_DELTA to whatever the real type is.
We assume that traversing the objects this way will visit
each delta only once. This is correct for most packs; we
visit the delta only when we process its base, and each
object (and thus each base) appears only once. However, if a
base object appears multiple times in the pack, we will try
to resolve any deltas based on it once for each instance.
We can detect this case by noting that a delta we are about
to resolve has already had its real_type field flipped, and
we already do so with an assert(). However, if multiple
threads are in use, we may race with another thread on
comparing and flipping the field. We need to synchronize the
access.
The right mechanism for doing this is a compare-and-swap (we
atomically "claim" the delta for our own and find out
whether our claim was successful). We can implement this
in C by using a pthread mutex to protect the operation. This
is not the fastest way of doing a compare-and-swap; many
processors provide instructions for this, and gcc and other
compilers provide builtins to access them. However, some
experiments showed that lock contention does not cause a
significant slowdown here. Adding c-a-s support for many
compilers would increase the maintenance burden (and we
would still end up including the pthread version as a
fallback).
Note that we only need to touch the OBJ_REF_DELTA codepath
here. An OBJ_OFS_DELTA object points to its base using an
offset, and therefore has only one base, even if another
copy of that base object appears in the pack (we do still
touch it briefly because the setting of real_type is
factored out of resolve_data).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-08-29 22:57:47 +02:00
|
|
|
if (!compare_and_swap_type(&child->real_type, OBJ_REF_DELTA,
|
|
|
|
base->obj->real_type))
|
index-pack: downgrade twice-resolved REF_DELTA to die()
When we're resolving a REF_DELTA, we compare-and-swap its type from
REF_DELTA to whatever real type the base object has, as discussed in
ab791dd138 (index-pack: fix race condition with duplicate bases,
2014-08-29). If the old type wasn't a REF_DELTA, we consider that a
BUG(). But as discussed in that commit, we might see this case whenever
we try to resolve an object twice, which may happen because we have
multiple copies of the base object.
So this isn't a bug at all, but rather a sign that the input pack is
broken. And indeed, this case is triggered already in t5309.5 and
t5309.6, which create packs with delta cycles and duplicate bases. But
we never noticed because those tests are marked expect_failure.
Those tests were added by b2ef3d9ebb (test index-pack on packs with
recoverable delta cycles, 2013-08-23), which was leaving the door open
for cases that we theoretically _could_ handle. And when we see an
already-resolved object like this, in theory we could keep going after
confirming that the previously resolved child->real_type matches
base->obj->real_type. But:
- enforcing the "only resolve once" rule here saves us from an
infinite loop in other parts of the code. If we keep going, then the
delta cycle in t5309.5 causes us to loop infinitely, as
find_ref_delta_children() doesn't realize which objects have already
been resolved. So there would be more changes needed to make this
case work, and in the meantime we'd be worse off.
- any pack that triggers this is broken anyway. It either has a
duplicate base object, or it has a cycle which causes us to bring in
a duplicate via --fix-thin. In either case, we'd end up rejecting
the pack in write_idx_file(), which also detects duplicates.
So the tests have little value in documenting what we _could_ be doing
(and have been neglected for 6+ years). Let's switch them to confirming
that we handle this case cleanly (and switch out the BUG() for a more
informative die() so that we do so).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-03 15:40:55 +01:00
|
|
|
die("REF_DELTA at offset %"PRIuMAX" already resolved (duplicate base %s?)",
|
|
|
|
(uintmax_t)child->idx.offset,
|
|
|
|
oid_to_hex(&base->obj->idx.oid));
|
index-pack: fix race condition with duplicate bases
When we are resolving deltas in an indexed pack, we do it by
first selecting a potential base (either one stored in full
in the pack, or one created by resolving another delta), and
then resolving any deltas that use that base. When we
resolve a particular delta, we flip its "real_type" field
from OBJ_{REF,OFS}_DELTA to whatever the real type is.
We assume that traversing the objects this way will visit
each delta only once. This is correct for most packs; we
visit the delta only when we process its base, and each
object (and thus each base) appears only once. However, if a
base object appears multiple times in the pack, we will try
to resolve any deltas based on it once for each instance.
We can detect this case by noting that a delta we are about
to resolve has already had its real_type field flipped, and
we already do so with an assert(). However, if multiple
threads are in use, we may race with another thread on
comparing and flipping the field. We need to synchronize the
access.
The right mechanism for doing this is a compare-and-swap (we
atomically "claim" the delta for our own and find out
whether our claim was successful). We can implement this
in C by using a pthread mutex to protect the operation. This
is not the fastest way of doing a compare-and-swap; many
processors provide instructions for this, and gcc and other
compilers provide builtins to access them. However, some
experiments showed that lock contention does not cause a
significant slowdown here. Adding c-a-s support for many
compilers would increase the maintenance burden (and we
would still end up including the pthread version as a
fallback).
Note that we only need to touch the OBJ_REF_DELTA codepath
here. An OBJ_OFS_DELTA object points to its base using an
offset, and therefore has only one base, even if another
copy of that base object appears in the pack (we do still
touch it briefly because the setting of real_type is
factored out of resolve_data).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-08-29 22:57:47 +02:00
|
|
|
|
2012-01-14 13:19:54 +01:00
|
|
|
resolve_delta(child, base, result);
|
|
|
|
if (base->ref_first == base->ref_last && base->ofs_last == -1)
|
2011-02-02 19:06:51 +01:00
|
|
|
free_base_data(base);
|
2012-01-14 13:19:54 +01:00
|
|
|
|
|
|
|
base->ref_first++;
|
|
|
|
return result;
|
2006-09-21 06:08:33 +02:00
|
|
|
}
|
|
|
|
|
2012-01-14 13:19:54 +01:00
|
|
|
if (base->ofs_first <= base->ofs_last) {
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
struct object_entry *child = objects + ofs_deltas[base->ofs_first].obj_no;
|
2012-01-14 13:19:54 +01:00
|
|
|
struct base_data *result = alloc_base_data();
|
2011-02-02 19:06:51 +01:00
|
|
|
|
|
|
|
assert(child->real_type == OBJ_OFS_DELTA);
|
index-pack: fix race condition with duplicate bases
When we are resolving deltas in an indexed pack, we do it by
first selecting a potential base (either one stored in full
in the pack, or one created by resolving another delta), and
then resolving any deltas that use that base. When we
resolve a particular delta, we flip its "real_type" field
from OBJ_{REF,OFS}_DELTA to whatever the real type is.
We assume that traversing the objects this way will visit
each delta only once. This is correct for most packs; we
visit the delta only when we process its base, and each
object (and thus each base) appears only once. However, if a
base object appears multiple times in the pack, we will try
to resolve any deltas based on it once for each instance.
We can detect this case by noting that a delta we are about
to resolve has already had its real_type field flipped, and
we already do so with an assert(). However, if multiple
threads are in use, we may race with another thread on
comparing and flipping the field. We need to synchronize the
access.
The right mechanism for doing this is a compare-and-swap (we
atomically "claim" the delta for our own and find out
whether our claim was successful). We can implement this
in C by using a pthread mutex to protect the operation. This
is not the fastest way of doing a compare-and-swap; many
processors provide instructions for this, and gcc and other
compilers provide builtins to access them. However, some
experiments showed that lock contention does not cause a
significant slowdown here. Adding c-a-s support for many
compilers would increase the maintenance burden (and we
would still end up including the pthread version as a
fallback).
Note that we only need to touch the OBJ_REF_DELTA codepath
here. An OBJ_OFS_DELTA object points to its base using an
offset, and therefore has only one base, even if another
copy of that base object appears in the pack (we do still
touch it briefly because the setting of real_type is
factored out of resolve_data).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-08-29 22:57:47 +02:00
|
|
|
child->real_type = base->obj->real_type;
|
2012-01-14 13:19:54 +01:00
|
|
|
resolve_delta(child, base, result);
|
|
|
|
if (base->ofs_first == base->ofs_last)
|
2011-02-02 19:06:51 +01:00
|
|
|
free_base_data(base);
|
2012-01-14 13:19:54 +01:00
|
|
|
|
|
|
|
base->ofs_first++;
|
|
|
|
return result;
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
2006-09-21 06:08:33 +02:00
|
|
|
|
2008-10-17 21:57:57 +02:00
|
|
|
unlink_base_data(base);
|
2012-01-14 13:19:54 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void find_unresolved_deltas(struct base_data *base)
|
|
|
|
{
|
|
|
|
struct base_data *new_base, *prev_base = NULL;
|
|
|
|
for (;;) {
|
|
|
|
new_base = find_unresolved_deltas_1(base, prev_base);
|
|
|
|
|
|
|
|
if (new_base) {
|
|
|
|
prev_base = base;
|
|
|
|
base = new_base;
|
|
|
|
} else {
|
|
|
|
free(base);
|
|
|
|
base = prev_base;
|
|
|
|
if (!base)
|
|
|
|
return;
|
|
|
|
prev_base = base->base;
|
|
|
|
}
|
|
|
|
}
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
|
|
|
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
static int compare_ofs_delta_entry(const void *a, const void *b)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
const struct ofs_delta_entry *delta_a = a;
|
|
|
|
const struct ofs_delta_entry *delta_b = b;
|
2011-02-02 19:06:51 +01:00
|
|
|
|
index-pack: fix truncation of off_t in comparison
Commit c6458e6 (index-pack: kill union delta_base to save
memory, 2015-04-18) refactored the comparison functions used
in sorting and binary searching our delta list. The
resulting code does something like:
int cmp_offsets(off_t a, off_t b)
{
return a - b;
}
This works most of the time, but produces nonsensical
results when the difference between the two offsets is
larger than what can be stored in an "int". This can lead to
unresolved deltas if the packsize is larger than 2G (even on
64-bit systems, an int is still typically 32 bits):
$ git clone git://github.com/mozilla/gecko-dev
Cloning into 'gecko-dev'...
remote: Counting objects: 4800161, done.
remote: Compressing objects: 100% (178/178), done.
remote: Total 4800161 (delta 88), reused 0 (delta 0), pack-reused 4799978
Receiving objects: 100% (4800161/4800161), 2.21 GiB | 3.26 MiB/s, done.
Resolving deltas: 99% (3808820/3811944), completed with 0 local objects.
fatal: pack has 3124 unresolved deltas
fatal: index-pack failed
We can fix it by doing direct comparisons between the
offsets and returning constants; the callers only care about
the sign of the comparison, not the magnitude.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-06-04 14:35:42 +02:00
|
|
|
return delta_a->offset < delta_b->offset ? -1 :
|
|
|
|
delta_a->offset > delta_b->offset ? 1 :
|
|
|
|
0;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int compare_ref_delta_entry(const void *a, const void *b)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
const struct ref_delta_entry *delta_a = a;
|
|
|
|
const struct ref_delta_entry *delta_b = b;
|
2011-02-02 19:06:51 +01:00
|
|
|
|
2018-03-12 03:27:37 +01:00
|
|
|
return oidcmp(&delta_a->oid, &delta_b->oid);
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
|
|
|
|
2012-05-06 14:31:54 +02:00
|
|
|
static void resolve_base(struct object_entry *obj)
|
|
|
|
{
|
|
|
|
struct base_data *base_obj = alloc_base_data();
|
|
|
|
base_obj->obj = obj;
|
|
|
|
base_obj->data = NULL;
|
|
|
|
find_unresolved_deltas(base_obj);
|
|
|
|
}
|
|
|
|
|
2012-05-06 14:31:55 +02:00
|
|
|
static void *threaded_second_pass(void *data)
|
|
|
|
{
|
|
|
|
set_thread_data(data);
|
|
|
|
for (;;) {
|
|
|
|
int i;
|
2013-03-19 15:16:41 +01:00
|
|
|
counter_lock();
|
2012-05-06 14:31:55 +02:00
|
|
|
display_progress(progress, nr_resolved_deltas);
|
2013-03-19 15:16:41 +01:00
|
|
|
counter_unlock();
|
|
|
|
work_lock();
|
2012-05-06 14:31:55 +02:00
|
|
|
while (nr_dispatched < nr_objects &&
|
|
|
|
is_delta_type(objects[nr_dispatched].type))
|
|
|
|
nr_dispatched++;
|
|
|
|
if (nr_dispatched >= nr_objects) {
|
|
|
|
work_unlock();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
i = nr_dispatched++;
|
|
|
|
work_unlock();
|
|
|
|
|
|
|
|
resolve_base(&objects[i]);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-05-06 14:31:54 +02:00
|
|
|
/*
|
|
|
|
* First pass:
|
|
|
|
* - find locations of all objects;
|
|
|
|
* - calculate SHA1 of all non-delta objects;
|
|
|
|
* - remember base (SHA1 or offset) for all deltas.
|
|
|
|
*/
|
2018-02-01 03:18:39 +01:00
|
|
|
static void parse_pack_objects(unsigned char *hash)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
2012-05-23 16:09:47 +02:00
|
|
|
int i, nr_delays = 0;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
struct ofs_delta_entry *ofs_delta = ofs_deltas;
|
2018-02-01 03:18:39 +01:00
|
|
|
struct object_id ref_delta_oid;
|
2006-10-20 20:45:21 +02:00
|
|
|
struct stat st;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2007-04-20 20:10:07 +02:00
|
|
|
if (verbose)
|
2007-10-30 19:57:35 +01:00
|
|
|
progress = start_progress(
|
2012-04-23 14:30:29 +02:00
|
|
|
from_stdin ? _("Receiving objects") : _("Indexing objects"),
|
2007-10-30 19:57:35 +01:00
|
|
|
nr_objects);
|
2005-10-12 21:01:31 +02:00
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct object_entry *obj = &objects[i];
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
void *data = unpack_raw_entry(obj, &ofs_delta->offset,
|
2018-02-01 03:18:39 +01:00
|
|
|
&ref_delta_oid,
|
|
|
|
&obj->idx.oid);
|
2005-10-12 21:01:31 +02:00
|
|
|
obj->real_type = obj->type;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
if (obj->type == OBJ_OFS_DELTA) {
|
|
|
|
nr_ofs_deltas++;
|
|
|
|
ofs_delta->obj_no = i;
|
|
|
|
ofs_delta++;
|
|
|
|
} else if (obj->type == OBJ_REF_DELTA) {
|
|
|
|
ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
|
2018-03-12 03:27:37 +01:00
|
|
|
oidcpy(&ref_deltas[nr_ref_deltas].oid, &ref_delta_oid);
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
ref_deltas[nr_ref_deltas].obj_no = i;
|
|
|
|
nr_ref_deltas++;
|
2012-05-23 16:09:47 +02:00
|
|
|
} else if (!data) {
|
|
|
|
/* large blobs, check later */
|
|
|
|
obj->real_type = OBJ_BAD;
|
|
|
|
nr_delays++;
|
2005-10-12 21:01:31 +02:00
|
|
|
} else
|
2017-05-07 00:10:11 +02:00
|
|
|
sha1_object(data, NULL, obj->size, obj->type,
|
2017-05-07 00:10:13 +02:00
|
|
|
&obj->idx.oid);
|
2005-10-12 21:01:31 +02:00
|
|
|
free(data);
|
2007-10-30 19:57:33 +01:00
|
|
|
display_progress(progress, i+1);
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
2007-06-01 21:18:05 +02:00
|
|
|
objects[i].idx.offset = consumed_bytes;
|
2007-10-30 19:57:33 +01:00
|
|
|
stop_progress(&progress);
|
2006-10-20 20:45:21 +02:00
|
|
|
|
|
|
|
/* Check pack integrity */
|
2006-10-26 05:28:17 +02:00
|
|
|
flush();
|
2018-02-01 03:18:39 +01:00
|
|
|
the_hash_algo->final_fn(hash, &input_ctx);
|
2018-08-28 23:22:52 +02:00
|
|
|
if (!hasheq(fill(the_hash_algo->rawsz), hash))
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("pack is corrupted (SHA1 mismatch)"));
|
2018-02-01 03:18:39 +01:00
|
|
|
use(the_hash_algo->rawsz);
|
2006-10-20 20:45:21 +02:00
|
|
|
|
|
|
|
/* If input_fd is a file, we should have reached its end now. */
|
|
|
|
if (fstat(input_fd, &st))
|
2012-04-23 14:30:29 +02:00
|
|
|
die_errno(_("cannot fstat packfile"));
|
git-bundle: assorted fixes
This patch fixes issues mentioned by Junio, Nico and Simon:
- I forgot to convert the usage string when removing the "--" from
the subcommands,
- a style fix in the bundle_header,
- use xread() instead of read(),
- use write_or_die() instead of write(),
- make the bundle header extensible,
- fail if the whitespace after a sha1 of a reference is missing,
- close() the fds passed to a subprocess,
- in verify_bundle(), do not use "rev-list --stdin", but rather
pass the revs directly (avoiding a fork()),
- fix a corrupted comment in show_object(), and
- fix the size check in index_pack.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-22 19:14:14 +01:00
|
|
|
if (S_ISREG(st.st_mode) &&
|
|
|
|
lseek(input_fd, 0, SEEK_CUR) - input_len != st.st_size)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("pack has junk at the end"));
|
2012-05-23 16:09:47 +02:00
|
|
|
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct object_entry *obj = &objects[i];
|
|
|
|
if (obj->real_type != OBJ_BAD)
|
|
|
|
continue;
|
|
|
|
obj->real_type = obj->type;
|
2017-05-07 00:10:11 +02:00
|
|
|
sha1_object(NULL, obj, obj->size, obj->type,
|
2017-05-07 00:10:13 +02:00
|
|
|
&obj->idx.oid);
|
2012-05-23 16:09:47 +02:00
|
|
|
nr_delays--;
|
|
|
|
}
|
|
|
|
if (nr_delays)
|
|
|
|
die(_("confusion beyond insanity in parse_pack_objects()"));
|
2012-05-06 14:31:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Second pass:
|
|
|
|
* - for all non-delta objects, look if it is used as a base for
|
|
|
|
* deltas;
|
|
|
|
* - if used as a base, uncompress the object and apply all deltas,
|
|
|
|
* recursively checking if the resulting object is used as a base
|
|
|
|
* for some more deltas.
|
|
|
|
*/
|
|
|
|
static void resolve_deltas(void)
|
|
|
|
{
|
|
|
|
int i;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
if (!nr_ofs_deltas && !nr_ref_deltas)
|
2006-10-26 05:32:59 +02:00
|
|
|
return;
|
|
|
|
|
2006-09-21 06:08:33 +02:00
|
|
|
/* Sort deltas by base SHA1/offset for fast searching */
|
2016-09-29 17:27:31 +02:00
|
|
|
QSORT(ofs_deltas, nr_ofs_deltas, compare_ofs_delta_entry);
|
|
|
|
QSORT(ref_deltas, nr_ref_deltas, compare_ref_delta_entry);
|
2005-10-12 21:01:31 +02:00
|
|
|
|
index-pack: add flag for showing delta-resolution progress
The index-pack command has two progress meters: one for
"receiving objects", and one for "resolving deltas". You get
neither by default, or both with "-v".
But for a push through receive-pack, we would want only the
"resolving deltas" phase, _not_ the "receiving objects"
progress. There are two reasons for this.
One is simply that existing clients are already printing
"writing objects" progress at the same time. Arguably
"receiving" from the far end is more useful, because it
tells you what has actually gotten there, as opposed to what
might be stuck in a buffer somewhere between the client and
server. But that would require a protocol extension to tell
clients not to print their progress. Possible, but
complexity for little gain.
The second reason is much more important. In a full-duplex
connection like git-over-ssh, we can print progress while
the pack is incoming, and it will immediately get to the
client. But for a half-duplex connection like git-over-http,
we should not say anything until we have received the full
request. Anything we write is subject to being stuck in a
buffer by the webserver. Worse, we can end up in a deadlock
if that buffer fills up.
So our best bet is to avoid writing anything that isn't a
small fixed size until we've received the full pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-15 12:34:22 +02:00
|
|
|
if (verbose || show_resolving_progress)
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
progress = start_progress(_("Resolving deltas"),
|
|
|
|
nr_ref_deltas + nr_ofs_deltas);
|
2012-05-06 14:31:55 +02:00
|
|
|
|
|
|
|
nr_dispatched = 0;
|
|
|
|
if (nr_threads > 1 || getenv("GIT_FORCE_THREADS")) {
|
|
|
|
init_thread();
|
|
|
|
for (i = 0; i < nr_threads; i++) {
|
|
|
|
int ret = pthread_create(&thread_data[i].thread, NULL,
|
|
|
|
threaded_second_pass, thread_data + i);
|
|
|
|
if (ret)
|
2012-08-31 14:13:04 +02:00
|
|
|
die(_("unable to create thread: %s"),
|
|
|
|
strerror(ret));
|
2012-05-06 14:31:55 +02:00
|
|
|
}
|
|
|
|
for (i = 0; i < nr_threads; i++)
|
|
|
|
pthread_join(thread_data[i].thread, NULL);
|
|
|
|
cleanup_thread();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-10-12 21:01:31 +02:00
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct object_entry *obj = &objects[i];
|
|
|
|
|
2011-06-04 00:32:14 +02:00
|
|
|
if (is_delta_type(obj->type))
|
2005-10-12 21:01:31 +02:00
|
|
|
continue;
|
2012-05-06 14:31:54 +02:00
|
|
|
resolve_base(obj);
|
2007-10-30 19:57:33 +01:00
|
|
|
display_progress(progress, nr_resolved_deltas);
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
2006-10-26 05:28:17 +02:00
|
|
|
}
|
|
|
|
|
2012-05-06 14:31:54 +02:00
|
|
|
/*
|
|
|
|
* Third pass:
|
|
|
|
* - append objects to convert thin pack to full pack if required
|
2018-02-01 03:18:39 +01:00
|
|
|
* - write the final pack hash
|
2012-05-06 14:31:54 +02:00
|
|
|
*/
|
2018-02-01 03:18:46 +01:00
|
|
|
static void fix_unresolved_deltas(struct hashfile *f);
|
2018-02-01 03:18:39 +01:00
|
|
|
static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
|
2012-05-06 14:31:54 +02:00
|
|
|
{
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
|
2012-05-06 14:31:54 +02:00
|
|
|
stop_progress(&progress);
|
2018-02-01 03:18:39 +01:00
|
|
|
/* Flush remaining pack final hash. */
|
2012-05-06 14:31:54 +02:00
|
|
|
flush();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fix_thin_pack) {
|
2018-02-01 03:18:46 +01:00
|
|
|
struct hashfile *f;
|
2018-02-01 03:18:39 +01:00
|
|
|
unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
|
2013-03-16 02:25:18 +01:00
|
|
|
struct strbuf msg = STRBUF_INIT;
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
|
2012-05-06 14:31:54 +02:00
|
|
|
int nr_objects_initial = nr_objects;
|
|
|
|
if (nr_unresolved <= 0)
|
2012-05-14 20:50:40 +02:00
|
|
|
die(_("confusion beyond insanity"));
|
2014-09-16 20:56:57 +02:00
|
|
|
REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
|
2013-03-19 17:17:22 +01:00
|
|
|
memset(objects + nr_objects + 1, 0,
|
|
|
|
nr_unresolved * sizeof(*objects));
|
2018-02-01 03:18:46 +01:00
|
|
|
f = hashfd(output_fd, curr_pack);
|
index-pack: fix allocation of sorted_by_pos array
When c6458e60 (index-pack: kill union delta_base to save memory,
2015-04-18) attempted to reduce the memory footprint of index-pack,
one of the key thing it did was to keep track of ref-deltas and
ofs-deltas separately.
In fix_unresolved_deltas(), however it forgot that it now wants to
look only at ref deltas in one place. The code allocated an array
for nr_unresolved, which is sum of number of ref- and ofs-deltas
minus nr_resolved, which may be larger or smaller than the number
ref-deltas. Depending on nr_resolved, this was either under or over
allocating.
Also, the old code before this change had to use 'i' and 'n' because
some of the things we see in the (old) deltas[] array we scanned
with 'i' would not make it into the sorted_by_pos[] array in the old
world order, but now because you have only ref delta in a separate
ref_deltas[] array, they increment lock&step. We no longer need
separate variables. And most importantly, we shouldn't pass the
nr_unresolved parameter, as this number does not play a role in the
working of this helper function.
Helped-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-07-03 18:51:57 +02:00
|
|
|
fix_unresolved_deltas(f);
|
2016-04-08 22:02:39 +02:00
|
|
|
strbuf_addf(&msg, Q_("completed with %d local object",
|
|
|
|
"completed with %d local objects",
|
|
|
|
nr_objects - nr_objects_initial),
|
2013-03-16 02:25:18 +01:00
|
|
|
nr_objects - nr_objects_initial);
|
|
|
|
stop_progress_msg(&progress, msg.buf);
|
|
|
|
strbuf_release(&msg);
|
2018-04-02 22:34:14 +02:00
|
|
|
finalize_hashfile(f, tail_hash, 0);
|
2018-02-01 03:18:39 +01:00
|
|
|
hashcpy(read_hash, pack_hash);
|
|
|
|
fixup_pack_header_footer(output_fd, pack_hash,
|
2012-05-06 14:31:54 +02:00
|
|
|
curr_pack, nr_objects,
|
2018-02-01 03:18:39 +01:00
|
|
|
read_hash, consumed_bytes-the_hash_algo->rawsz);
|
2018-08-28 23:22:52 +02:00
|
|
|
if (!hasheq(read_hash, tail_hash))
|
2012-08-31 14:13:04 +02:00
|
|
|
die(_("Unexpected tail checksum for %s "
|
|
|
|
"(disk corruption?)"), curr_pack);
|
2012-05-06 14:31:54 +02:00
|
|
|
}
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
if (nr_ofs_deltas + nr_ref_deltas != nr_resolved_deltas)
|
2012-05-14 20:50:40 +02:00
|
|
|
die(Q_("pack has %d unresolved delta",
|
|
|
|
"pack has %d unresolved deltas",
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas),
|
|
|
|
nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
|
2012-05-06 14:31:54 +02:00
|
|
|
}
|
|
|
|
|
2018-02-01 03:18:46 +01:00
|
|
|
static int write_compressed(struct hashfile *f, void *in, unsigned int size)
|
2006-10-26 05:28:17 +02:00
|
|
|
{
|
2011-06-10 20:52:15 +02:00
|
|
|
git_zstream stream;
|
2010-04-12 22:50:35 +02:00
|
|
|
int status;
|
|
|
|
unsigned char outbuf[4096];
|
2006-10-26 05:28:17 +02:00
|
|
|
|
2011-06-10 19:55:10 +02:00
|
|
|
git_deflate_init(&stream, zlib_compression_level);
|
2006-10-26 05:28:17 +02:00
|
|
|
stream.next_in = in;
|
|
|
|
stream.avail_in = size;
|
|
|
|
|
2010-04-12 22:50:35 +02:00
|
|
|
do {
|
|
|
|
stream.next_out = outbuf;
|
|
|
|
stream.avail_out = sizeof(outbuf);
|
2011-06-10 19:55:10 +02:00
|
|
|
status = git_deflate(&stream, Z_FINISH);
|
2018-02-01 03:18:46 +01:00
|
|
|
hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
|
2010-04-12 22:50:35 +02:00
|
|
|
} while (status == Z_OK);
|
|
|
|
|
|
|
|
if (status != Z_STREAM_END)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("unable to deflate appended object (%d)"), status);
|
2006-10-26 05:28:17 +02:00
|
|
|
size = stream.total_out;
|
2011-06-10 19:55:10 +02:00
|
|
|
git_deflate_end(&stream);
|
2006-10-26 05:28:17 +02:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2018-02-01 03:18:46 +01:00
|
|
|
static struct object_entry *append_obj_to_pack(struct hashfile *f,
|
2008-07-14 04:07:46 +02:00
|
|
|
const unsigned char *sha1, void *buf,
|
2006-10-26 05:28:17 +02:00
|
|
|
unsigned long size, enum object_type type)
|
|
|
|
{
|
|
|
|
struct object_entry *obj = &objects[nr_objects++];
|
|
|
|
unsigned char header[10];
|
|
|
|
unsigned long s = size;
|
|
|
|
int n = 0;
|
|
|
|
unsigned char c = (type << 4) | (s & 15);
|
|
|
|
s >>= 4;
|
|
|
|
while (s) {
|
|
|
|
header[n++] = c | 0x80;
|
|
|
|
c = s & 0x7f;
|
|
|
|
s >>= 7;
|
|
|
|
}
|
|
|
|
header[n++] = c;
|
2008-08-29 22:08:01 +02:00
|
|
|
crc32_begin(f);
|
2018-02-01 03:18:46 +01:00
|
|
|
hashwrite(f, header, n);
|
2008-07-24 19:32:00 +02:00
|
|
|
obj[0].size = size;
|
|
|
|
obj[0].hdr_size = n;
|
|
|
|
obj[0].type = type;
|
|
|
|
obj[0].real_type = type;
|
2007-06-01 21:18:05 +02:00
|
|
|
obj[1].idx.offset = obj[0].idx.offset + n;
|
2008-08-29 22:08:01 +02:00
|
|
|
obj[1].idx.offset += write_compressed(f, buf, size);
|
|
|
|
obj[0].idx.crc32 = crc32_end(f);
|
2018-02-01 03:18:46 +01:00
|
|
|
hashflush(f);
|
2017-05-07 00:10:11 +02:00
|
|
|
hashcpy(obj->idx.oid.hash, sha1);
|
2008-07-14 04:07:46 +02:00
|
|
|
return obj;
|
2006-10-26 05:28:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int delta_pos_compare(const void *_a, const void *_b)
|
|
|
|
{
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
struct ref_delta_entry *a = *(struct ref_delta_entry **)_a;
|
|
|
|
struct ref_delta_entry *b = *(struct ref_delta_entry **)_b;
|
2006-10-26 05:28:17 +02:00
|
|
|
return a->obj_no - b->obj_no;
|
|
|
|
}
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2018-02-01 03:18:46 +01:00
|
|
|
static void fix_unresolved_deltas(struct hashfile *f)
|
2006-10-26 05:28:17 +02:00
|
|
|
{
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
struct ref_delta_entry **sorted_by_pos;
|
index-pack: fix allocation of sorted_by_pos array
When c6458e60 (index-pack: kill union delta_base to save memory,
2015-04-18) attempted to reduce the memory footprint of index-pack,
one of the key thing it did was to keep track of ref-deltas and
ofs-deltas separately.
In fix_unresolved_deltas(), however it forgot that it now wants to
look only at ref deltas in one place. The code allocated an array
for nr_unresolved, which is sum of number of ref- and ofs-deltas
minus nr_resolved, which may be larger or smaller than the number
ref-deltas. Depending on nr_resolved, this was either under or over
allocating.
Also, the old code before this change had to use 'i' and 'n' because
some of the things we see in the (old) deltas[] array we scanned
with 'i' would not make it into the sorted_by_pos[] array in the old
world order, but now because you have only ref delta in a separate
ref_deltas[] array, they increment lock&step. We no longer need
separate variables. And most importantly, we shouldn't pass the
nr_unresolved parameter, as this number does not play a role in the
working of this helper function.
Helped-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-07-03 18:51:57 +02:00
|
|
|
int i;
|
2006-10-26 05:28:17 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since many unresolved deltas may well be themselves base objects
|
|
|
|
* for more unresolved deltas, we really want to include the
|
|
|
|
* smallest number of base objects that would cover as much delta
|
|
|
|
* as possible by picking the
|
|
|
|
* trunc deltas first, allowing for other deltas to resolve without
|
|
|
|
* additional base objects. Since most base objects are to be found
|
|
|
|
* before deltas depending on them, a good heuristic is to start
|
|
|
|
* resolving deltas in the same order as their position in the pack.
|
|
|
|
*/
|
2016-02-22 23:44:25 +01:00
|
|
|
ALLOC_ARRAY(sorted_by_pos, nr_ref_deltas);
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
for (i = 0; i < nr_ref_deltas; i++)
|
index-pack: fix allocation of sorted_by_pos array
When c6458e60 (index-pack: kill union delta_base to save memory,
2015-04-18) attempted to reduce the memory footprint of index-pack,
one of the key thing it did was to keep track of ref-deltas and
ofs-deltas separately.
In fix_unresolved_deltas(), however it forgot that it now wants to
look only at ref deltas in one place. The code allocated an array
for nr_unresolved, which is sum of number of ref- and ofs-deltas
minus nr_resolved, which may be larger or smaller than the number
ref-deltas. Depending on nr_resolved, this was either under or over
allocating.
Also, the old code before this change had to use 'i' and 'n' because
some of the things we see in the (old) deltas[] array we scanned
with 'i' would not make it into the sorted_by_pos[] array in the old
world order, but now because you have only ref delta in a separate
ref_deltas[] array, they increment lock&step. We no longer need
separate variables. And most importantly, we shouldn't pass the
nr_unresolved parameter, as this number does not play a role in the
working of this helper function.
Helped-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-07-03 18:51:57 +02:00
|
|
|
sorted_by_pos[i] = &ref_deltas[i];
|
2016-09-29 17:27:31 +02:00
|
|
|
QSORT(sorted_by_pos, nr_ref_deltas, delta_pos_compare);
|
2006-10-26 05:28:17 +02:00
|
|
|
|
2019-06-25 15:40:31 +02:00
|
|
|
if (has_promisor_remote()) {
|
index-pack: prefetch missing REF_DELTA bases
When fetching, the client sends "have" commit IDs indicating that the
server does not need to send any object referenced by those commits,
reducing network I/O. When the client is a partial clone, the client
still sends "have"s in this way, even if it does not have every object
referenced by a commit it sent as "have".
If a server omits such an object, it is fine: the client could lazily
fetch that object before this fetch, and it can still do so after.
The issue is when the server sends a thin pack containing an object that
is a REF_DELTA against such a missing object: index-pack fails to fix
the thin pack. When support for lazily fetching missing objects was
added in 8b4c0103a9 ("sha1_file: support lazily fetching missing
objects", 2017-12-08), support in index-pack was turned off in the
belief that it accesses the repo only to do hash collision checks.
However, this is not true: it also needs to access the repo to resolve
REF_DELTA bases.
Support for lazy fetching should still generally be turned off in
index-pack because it is used as part of the lazy fetching process
itself (if not, infinite loops may occur), but we do need to fetch the
REF_DELTA bases. (When fetching REF_DELTA bases, it is unlikely that
those are REF_DELTA themselves, because we do not send "have" when
making such fetches.)
To resolve this, prefetch all missing REF_DELTA bases before attempting
to resolve them. This both ensures that all bases are attempted to be
fetched, and ensures that we make only one request per index-pack
invocation, and not one request per missing object.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-05-14 23:10:55 +02:00
|
|
|
/*
|
|
|
|
* Prefetch the delta bases.
|
|
|
|
*/
|
|
|
|
struct oid_array to_fetch = OID_ARRAY_INIT;
|
|
|
|
for (i = 0; i < nr_ref_deltas; i++) {
|
|
|
|
struct ref_delta_entry *d = sorted_by_pos[i];
|
|
|
|
if (!oid_object_info_extended(the_repository, &d->oid,
|
|
|
|
NULL,
|
|
|
|
OBJECT_INFO_FOR_PREFETCH))
|
|
|
|
continue;
|
|
|
|
oid_array_append(&to_fetch, &d->oid);
|
|
|
|
}
|
2020-04-02 21:19:16 +02:00
|
|
|
promisor_remote_get_direct(the_repository,
|
|
|
|
to_fetch.oid, to_fetch.nr);
|
index-pack: prefetch missing REF_DELTA bases
When fetching, the client sends "have" commit IDs indicating that the
server does not need to send any object referenced by those commits,
reducing network I/O. When the client is a partial clone, the client
still sends "have"s in this way, even if it does not have every object
referenced by a commit it sent as "have".
If a server omits such an object, it is fine: the client could lazily
fetch that object before this fetch, and it can still do so after.
The issue is when the server sends a thin pack containing an object that
is a REF_DELTA against such a missing object: index-pack fails to fix
the thin pack. When support for lazily fetching missing objects was
added in 8b4c0103a9 ("sha1_file: support lazily fetching missing
objects", 2017-12-08), support in index-pack was turned off in the
belief that it accesses the repo only to do hash collision checks.
However, this is not true: it also needs to access the repo to resolve
REF_DELTA bases.
Support for lazy fetching should still generally be turned off in
index-pack because it is used as part of the lazy fetching process
itself (if not, infinite loops may occur), but we do need to fetch the
REF_DELTA bases. (When fetching REF_DELTA bases, it is unlikely that
those are REF_DELTA themselves, because we do not send "have" when
making such fetches.)
To resolve this, prefetch all missing REF_DELTA bases before attempting
to resolve them. This both ensures that all bases are attempted to be
fetched, and ensures that we make only one request per index-pack
invocation, and not one request per missing object.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-05-14 23:10:55 +02:00
|
|
|
oid_array_clear(&to_fetch);
|
|
|
|
}
|
|
|
|
|
index-pack: fix allocation of sorted_by_pos array
When c6458e60 (index-pack: kill union delta_base to save memory,
2015-04-18) attempted to reduce the memory footprint of index-pack,
one of the key thing it did was to keep track of ref-deltas and
ofs-deltas separately.
In fix_unresolved_deltas(), however it forgot that it now wants to
look only at ref deltas in one place. The code allocated an array
for nr_unresolved, which is sum of number of ref- and ofs-deltas
minus nr_resolved, which may be larger or smaller than the number
ref-deltas. Depending on nr_resolved, this was either under or over
allocating.
Also, the old code before this change had to use 'i' and 'n' because
some of the things we see in the (old) deltas[] array we scanned
with 'i' would not make it into the sorted_by_pos[] array in the old
world order, but now because you have only ref delta in a separate
ref_deltas[] array, they increment lock&step. We no longer need
separate variables. And most importantly, we shouldn't pass the
nr_unresolved parameter, as this number does not play a role in the
working of this helper function.
Helped-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-07-03 18:51:57 +02:00
|
|
|
for (i = 0; i < nr_ref_deltas; i++) {
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
struct ref_delta_entry *d = sorted_by_pos[i];
|
2007-02-26 20:55:59 +01:00
|
|
|
enum object_type type;
|
2012-01-14 13:19:54 +01:00
|
|
|
struct base_data *base_obj = alloc_base_data();
|
2006-10-26 05:28:17 +02:00
|
|
|
|
|
|
|
if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
|
|
|
|
continue;
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:53 +01:00
|
|
|
base_obj->data = read_object_file(&d->oid, &type,
|
|
|
|
&base_obj->size);
|
2012-01-14 13:19:54 +01:00
|
|
|
if (!base_obj->data)
|
2006-10-26 05:28:17 +02:00
|
|
|
continue;
|
|
|
|
|
2020-01-30 21:32:23 +01:00
|
|
|
if (check_object_signature(the_repository, &d->oid,
|
|
|
|
base_obj->data, base_obj->size,
|
|
|
|
type_name(type)))
|
2018-03-12 03:27:37 +01:00
|
|
|
die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
|
|
|
|
base_obj->obj = append_obj_to_pack(f, d->oid.hash,
|
2012-01-14 13:19:54 +01:00
|
|
|
base_obj->data, base_obj->size, type);
|
|
|
|
find_unresolved_deltas(base_obj);
|
2007-10-30 19:57:33 +01:00
|
|
|
display_progress(progress, nr_resolved_deltas);
|
2006-10-26 05:28:17 +02:00
|
|
|
}
|
|
|
|
free(sorted_by_pos);
|
|
|
|
}
|
|
|
|
|
2017-12-05 17:58:48 +01:00
|
|
|
static const char *derive_filename(const char *pack_name, const char *suffix,
|
|
|
|
struct strbuf *buf)
|
|
|
|
{
|
|
|
|
size_t len;
|
|
|
|
if (!strip_suffix(pack_name, ".pack", &len))
|
|
|
|
die(_("packfile name '%s' does not end with '.pack'"),
|
|
|
|
pack_name);
|
|
|
|
strbuf_add(buf, pack_name, len);
|
|
|
|
strbuf_addch(buf, '.');
|
|
|
|
strbuf_addstr(buf, suffix);
|
|
|
|
return buf->buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_special_file(const char *suffix, const char *msg,
|
2018-02-15 23:55:47 +01:00
|
|
|
const char *pack_name, const unsigned char *hash,
|
2017-12-05 17:58:48 +01:00
|
|
|
const char **report)
|
|
|
|
{
|
|
|
|
struct strbuf name_buf = STRBUF_INIT;
|
|
|
|
const char *filename;
|
|
|
|
int fd;
|
|
|
|
int msg_len = strlen(msg);
|
|
|
|
|
|
|
|
if (pack_name)
|
|
|
|
filename = derive_filename(pack_name, suffix, &name_buf);
|
|
|
|
else
|
2018-02-15 23:55:47 +01:00
|
|
|
filename = odb_pack_name(&name_buf, hash, suffix);
|
2017-12-05 17:58:48 +01:00
|
|
|
|
|
|
|
fd = odb_pack_keep(filename);
|
|
|
|
if (fd < 0) {
|
|
|
|
if (errno != EEXIST)
|
|
|
|
die_errno(_("cannot write %s file '%s'"),
|
|
|
|
suffix, filename);
|
|
|
|
} else {
|
|
|
|
if (msg_len > 0) {
|
|
|
|
write_or_die(fd, msg, msg_len);
|
|
|
|
write_or_die(fd, "\n", 1);
|
|
|
|
}
|
|
|
|
if (close(fd) != 0)
|
|
|
|
die_errno(_("cannot close written %s file '%s'"),
|
|
|
|
suffix, filename);
|
introduce fetch-object: fetch one promisor object
Introduce fetch-object, providing the ability to fetch one object from a
promisor remote.
This uses fetch-pack. To do this, the transport mechanism has been
updated with 2 flags, "from-promisor" to indicate that the resulting
pack comes from a promisor remote (and thus should be annotated as such
by index-pack), and "no-dependents" to indicate that only the objects
themselves need to be fetched (but fetching additional objects is
nevertheless safe).
Whenever "no-dependents" is used, fetch-pack will refrain from using any
object flags, because it is most likely invoked as part of a dynamic
object fetch by another Git command (which may itself use object flags).
An alternative to this is to leave fetch-pack alone, and instead update
the allocation of flags so that fetch-pack's flags never overlap with
any others, but this will end up shrinking the number of flags available
to nearly every other Git command (that is, every Git command that
accesses objects), so the approach in this commit was used instead.
This will be tested in a subsequent commit.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-05 17:58:49 +01:00
|
|
|
if (report)
|
|
|
|
*report = suffix;
|
2017-12-05 17:58:48 +01:00
|
|
|
}
|
|
|
|
strbuf_release(&name_buf);
|
|
|
|
}
|
|
|
|
|
2006-10-23 20:50:18 +02:00
|
|
|
static void final(const char *final_pack_name, const char *curr_pack_name,
|
|
|
|
const char *final_index_name, const char *curr_index_name,
|
introduce fetch-object: fetch one promisor object
Introduce fetch-object, providing the ability to fetch one object from a
promisor remote.
This uses fetch-pack. To do this, the transport mechanism has been
updated with 2 flags, "from-promisor" to indicate that the resulting
pack comes from a promisor remote (and thus should be annotated as such
by index-pack), and "no-dependents" to indicate that only the objects
themselves need to be fetched (but fetching additional objects is
nevertheless safe).
Whenever "no-dependents" is used, fetch-pack will refrain from using any
object flags, because it is most likely invoked as part of a dynamic
object fetch by another Git command (which may itself use object flags).
An alternative to this is to leave fetch-pack alone, and instead update
the allocation of flags so that fetch-pack's flags never overlap with
any others, but this will end up shrinking the number of flags available
to nearly every other Git command (that is, every Git command that
accesses objects), so the approach in this commit was used instead.
This will be tested in a subsequent commit.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-05 17:58:49 +01:00
|
|
|
const char *keep_msg, const char *promisor_msg,
|
2018-02-01 03:18:39 +01:00
|
|
|
unsigned char *hash)
|
2006-10-23 20:50:18 +02:00
|
|
|
{
|
2007-03-07 02:44:17 +01:00
|
|
|
const char *report = "pack";
|
index-pack: make pointer-alias fallbacks safer
The final() function accepts a NULL value for certain
parameters, and falls back to writing into a reusable "name"
buffer, and then either:
1. For "keep_name", requiring all uses to do "keep_name ?
keep_name : name.buf". This is awkward, and it's easy
to accidentally look at the maybe-NULL keep_name.
2. For "final_index_name" and "final_pack_name", aliasing
those pointers to the "name" buffer. This is easier to
use, but the aliased pointers become invalid after the
buffer is reused (this isn't a bug now, but it's a
potential pitfall).
One way to make this safer would be to introduce an extra
pointer to do the aliasing, and have its lifetime match the
validity of the "name" buffer. But it's still easy to
accidentally use the wrong name (i.e., to use
"final_pack_name" instead of the aliased pointer).
Instead, let's use three separate buffers that will remain
valid through the function. That makes it safe to alias the
pointers and use them consistently. The extra allocations
shouldn't matter, as this function is not performance
sensitive.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-16 15:27:20 +01:00
|
|
|
struct strbuf pack_name = STRBUF_INIT;
|
|
|
|
struct strbuf index_name = STRBUF_INIT;
|
2006-10-23 20:50:18 +02:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!from_stdin) {
|
|
|
|
close(input_fd);
|
|
|
|
} else {
|
2008-05-30 17:42:16 +02:00
|
|
|
fsync_or_die(output_fd, curr_pack_name);
|
2006-10-23 20:50:18 +02:00
|
|
|
err = close(output_fd);
|
|
|
|
if (err)
|
2012-04-23 14:30:29 +02:00
|
|
|
die_errno(_("error while closing pack file"));
|
2006-10-23 20:50:18 +02:00
|
|
|
}
|
|
|
|
|
2017-12-05 17:58:48 +01:00
|
|
|
if (keep_msg)
|
2018-02-15 23:55:47 +01:00
|
|
|
write_special_file("keep", keep_msg, final_pack_name, hash,
|
2017-12-05 17:58:48 +01:00
|
|
|
&report);
|
introduce fetch-object: fetch one promisor object
Introduce fetch-object, providing the ability to fetch one object from a
promisor remote.
This uses fetch-pack. To do this, the transport mechanism has been
updated with 2 flags, "from-promisor" to indicate that the resulting
pack comes from a promisor remote (and thus should be annotated as such
by index-pack), and "no-dependents" to indicate that only the objects
themselves need to be fetched (but fetching additional objects is
nevertheless safe).
Whenever "no-dependents" is used, fetch-pack will refrain from using any
object flags, because it is most likely invoked as part of a dynamic
object fetch by another Git command (which may itself use object flags).
An alternative to this is to leave fetch-pack alone, and instead update
the allocation of flags so that fetch-pack's flags never overlap with
any others, but this will end up shrinking the number of flags available
to nearly every other Git command (that is, every Git command that
accesses objects), so the approach in this commit was used instead.
This will be tested in a subsequent commit.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-05 17:58:49 +01:00
|
|
|
if (promisor_msg)
|
|
|
|
write_special_file("promisor", promisor_msg, final_pack_name,
|
2018-02-15 23:55:47 +01:00
|
|
|
hash, NULL);
|
2006-10-29 10:41:59 +01:00
|
|
|
|
2006-10-23 20:50:18 +02:00
|
|
|
if (final_pack_name != curr_pack_name) {
|
2017-03-16 15:27:15 +01:00
|
|
|
if (!final_pack_name)
|
2018-02-01 03:18:39 +01:00
|
|
|
final_pack_name = odb_pack_name(&pack_name, hash, "pack");
|
2015-08-07 23:40:24 +02:00
|
|
|
if (finalize_object_file(curr_pack_name, final_pack_name))
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("cannot store pack file"));
|
2009-03-26 16:16:47 +01:00
|
|
|
} else if (from_stdin)
|
2008-10-03 12:20:43 +02:00
|
|
|
chmod(final_pack_name, 0444);
|
2006-10-23 20:50:18 +02:00
|
|
|
|
|
|
|
if (final_index_name != curr_index_name) {
|
2017-03-16 15:27:15 +01:00
|
|
|
if (!final_index_name)
|
2018-02-01 03:18:39 +01:00
|
|
|
final_index_name = odb_pack_name(&index_name, hash, "idx");
|
2015-08-07 23:40:24 +02:00
|
|
|
if (finalize_object_file(curr_index_name, final_index_name))
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("cannot store index file"));
|
2009-03-26 16:16:47 +01:00
|
|
|
} else
|
|
|
|
chmod(final_index_name, 0444);
|
2006-11-01 23:06:25 +01:00
|
|
|
|
index-pack: handle --strict checks of non-repo packs
Commit 73c3f0f704 (index-pack: check .gitmodules files with
--strict, 2018-05-04) added a call to add_packed_git(), with
the intent that the newly-indexed objects would be available
to the process when we run fsck_finish(). But that's not
what add_packed_git() does. It only allocates the struct,
and you must install_packed_git() on the result. So that
call was effectively doing nothing (except leaking a
struct).
But wait, we passed all of the tests! Does that mean we
don't need the call at all?
For normal cases, no. When we run "index-pack --stdin"
inside a repository, we write the new pack into the object
directory. If fsck_finish() needs to access one of the new
objects, then our initial lookup will fail to find it, but
we'll follow up by running reprepare_packed_git() and
looking again. That logic was meant to handle somebody else
repacking simultaneously, but it ends up working for us
here.
But there is a case that does need this, that we were not
testing. You can run "git index-pack foo.pack" on any file,
even when it is not inside the object directory. Or you may
not even be in a repository at all! This case fails without
doing the proper install_packed_git() call.
We can make this work by adding the install call.
Note that we should be prepared to handle add_packed_git()
failing. We can just silently ignore this case, though. If
fsck_finish() later needs the objects and they're not
available, it will complain itself. And if it doesn't
(because we were able to resolve the whole fsck in the first
pass), then it actually isn't an interesting error at all.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-06-01 00:45:31 +02:00
|
|
|
if (do_fsck_object) {
|
|
|
|
struct packed_git *p;
|
|
|
|
p = add_packed_git(final_index_name, strlen(final_index_name), 0);
|
|
|
|
if (p)
|
2018-06-13 21:50:46 +02:00
|
|
|
install_packed_git(the_repository, p);
|
index-pack: handle --strict checks of non-repo packs
Commit 73c3f0f704 (index-pack: check .gitmodules files with
--strict, 2018-05-04) added a call to add_packed_git(), with
the intent that the newly-indexed objects would be available
to the process when we run fsck_finish(). But that's not
what add_packed_git() does. It only allocates the struct,
and you must install_packed_git() on the result. So that
call was effectively doing nothing (except leaking a
struct).
But wait, we passed all of the tests! Does that mean we
don't need the call at all?
For normal cases, no. When we run "index-pack --stdin"
inside a repository, we write the new pack into the object
directory. If fsck_finish() needs to access one of the new
objects, then our initial lookup will fail to find it, but
we'll follow up by running reprepare_packed_git() and
looking again. That logic was meant to handle somebody else
repacking simultaneously, but it ends up working for us
here.
But there is a case that does need this, that we were not
testing. You can run "git index-pack foo.pack" on any file,
even when it is not inside the object directory. Or you may
not even be in a repository at all! This case fails without
doing the proper install_packed_git() call.
We can make this work by adding the install call.
Note that we should be prepared to handle add_packed_git()
failing. We can just silently ignore this case, though. If
fsck_finish() later needs the objects and they're not
available, it will complain itself. And if it doesn't
(because we were able to resolve the whole fsck in the first
pass), then it actually isn't an interesting error at all.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-06-01 00:45:31 +02:00
|
|
|
}
|
index-pack: check .gitmodules files with --strict
Now that the internal fsck code has all of the plumbing we
need, we can start checking incoming .gitmodules files.
Naively, it seems like we would just need to add a call to
fsck_finish() after we've processed all of the objects. And
that would be enough to cover the initial test included
here. But there are two extra bits:
1. We currently don't bother calling fsck_object() at all
for blobs, since it has traditionally been a noop. We'd
actually catch these blobs in fsck_finish() at the end,
but it's more efficient to check them when we already
have the object loaded in memory.
2. The second pass done by fsck_finish() needs to access
the objects, but we're actually indexing the pack in
this process. In theory we could give the fsck code a
special callback for accessing the in-pack data, but
it's actually quite tricky:
a. We don't have an internal efficient index mapping
oids to packfile offsets. We only generate it on
the fly as part of writing out the .idx file.
b. We'd still have to reconstruct deltas, which means
we'd basically have to replicate all of the
reading logic in packfile.c.
Instead, let's avoid running fsck_finish() until after
we've written out the .idx file, and then just add it
to our internal packed_git list.
This does mean that the objects are "in the repository"
before we finish our fsck checks. But unpack-objects
already exhibits this same behavior, and it's an
acceptable tradeoff here for the same reason: the
quarantine mechanism means that pushes will be
fully protected.
In addition to a basic push test in t7415, we add a sneaky
pack that reverses the usual object order in the pack,
requiring that index-pack access the tree and blob during
the "finish" step.
This already works for unpack-objects (since it will have
written out loose objects), but we'll check it with this
sneaky pack for good measure.
Signed-off-by: Jeff King <peff@peff.net>
2018-05-05 01:45:01 +02:00
|
|
|
|
2006-11-01 23:06:25 +01:00
|
|
|
if (!from_stdin) {
|
2019-08-18 22:04:23 +02:00
|
|
|
printf("%s\n", hash_to_hex(hash));
|
2006-11-01 23:06:25 +01:00
|
|
|
} else {
|
2017-03-28 21:46:50 +02:00
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
|
2019-08-18 22:04:23 +02:00
|
|
|
strbuf_addf(&buf, "%s\t%s\n", report, hash_to_hex(hash));
|
2017-03-28 21:46:50 +02:00
|
|
|
write_or_die(1, buf.buf, buf.len);
|
|
|
|
strbuf_release(&buf);
|
2006-11-01 23:06:25 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Let's just mimic git-unpack-objects here and write
|
|
|
|
* the last part of the input buffer to stdout.
|
|
|
|
*/
|
|
|
|
while (input_len) {
|
|
|
|
err = xwrite(1, input_buffer + input_offset, input_len);
|
|
|
|
if (err <= 0)
|
|
|
|
break;
|
|
|
|
input_len -= err;
|
|
|
|
input_offset += err;
|
|
|
|
}
|
|
|
|
}
|
2017-03-16 15:27:15 +01:00
|
|
|
|
index-pack: make pointer-alias fallbacks safer
The final() function accepts a NULL value for certain
parameters, and falls back to writing into a reusable "name"
buffer, and then either:
1. For "keep_name", requiring all uses to do "keep_name ?
keep_name : name.buf". This is awkward, and it's easy
to accidentally look at the maybe-NULL keep_name.
2. For "final_index_name" and "final_pack_name", aliasing
those pointers to the "name" buffer. This is easier to
use, but the aliased pointers become invalid after the
buffer is reused (this isn't a bug now, but it's a
potential pitfall).
One way to make this safer would be to introduce an extra
pointer to do the aliasing, and have its lifetime match the
validity of the "name" buffer. But it's still easy to
accidentally use the wrong name (i.e., to use
"final_pack_name" instead of the aliased pointer).
Instead, let's use three separate buffers that will remain
valid through the function. That makes it safe to alias the
pointers and use them consistently. The extra allocations
shouldn't matter, as this function is not performance
sensitive.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-16 15:27:20 +01:00
|
|
|
strbuf_release(&index_name);
|
|
|
|
strbuf_release(&pack_name);
|
2005-10-12 21:01:31 +02:00
|
|
|
}
|
|
|
|
|
2008-05-14 19:46:53 +02:00
|
|
|
static int git_index_pack_config(const char *k, const char *v, void *cb)
|
2007-11-02 04:26:04 +01:00
|
|
|
{
|
2011-02-26 00:43:25 +01:00
|
|
|
struct pack_idx_option *opts = cb;
|
|
|
|
|
2007-11-02 04:26:04 +01:00
|
|
|
if (!strcmp(k, "pack.indexversion")) {
|
2011-02-26 00:43:25 +01:00
|
|
|
opts->version = git_config_int(k, v);
|
|
|
|
if (opts->version > 2)
|
2012-08-31 14:13:04 +02:00
|
|
|
die(_("bad pack.indexversion=%"PRIu32), opts->version);
|
2007-11-02 04:26:04 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2012-05-06 14:31:55 +02:00
|
|
|
if (!strcmp(k, "pack.threads")) {
|
|
|
|
nr_threads = git_config_int(k, v);
|
|
|
|
if (nr_threads < 0)
|
2012-08-31 14:13:04 +02:00
|
|
|
die(_("invalid number of threads specified (%d)"),
|
2012-05-06 14:31:55 +02:00
|
|
|
nr_threads);
|
2018-11-03 09:48:40 +01:00
|
|
|
if (!HAVE_THREADS && nr_threads != 1) {
|
2012-08-31 14:13:04 +02:00
|
|
|
warning(_("no threads support, ignoring %s"), k);
|
2018-11-03 09:48:40 +01:00
|
|
|
nr_threads = 1;
|
|
|
|
}
|
2012-05-06 14:31:55 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2008-05-14 19:46:53 +02:00
|
|
|
return git_default_config(k, v, cb);
|
2007-11-02 04:26:04 +01:00
|
|
|
}
|
|
|
|
|
2011-02-26 01:55:26 +01:00
|
|
|
static int cmp_uint32(const void *a_, const void *b_)
|
|
|
|
{
|
|
|
|
uint32_t a = *((uint32_t *)a_);
|
|
|
|
uint32_t b = *((uint32_t *)b_);
|
|
|
|
|
|
|
|
return (a < b) ? -1 : (a != b);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void read_v2_anomalous_offsets(struct packed_git *p,
|
|
|
|
struct pack_idx_option *opts)
|
|
|
|
{
|
|
|
|
const uint32_t *idx1, *idx2;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
/* The address of the 4-byte offset table */
|
2020-05-25 21:59:10 +02:00
|
|
|
idx1 = (((const uint32_t *)((const uint8_t *)p->index_data + p->crc_offset))
|
2011-02-26 01:55:26 +01:00
|
|
|
+ p->num_objects /* CRC32 table */
|
|
|
|
);
|
|
|
|
|
|
|
|
/* The address of the 8-byte offset table */
|
|
|
|
idx2 = idx1 + p->num_objects;
|
|
|
|
|
|
|
|
for (i = 0; i < p->num_objects; i++) {
|
|
|
|
uint32_t off = ntohl(idx1[i]);
|
|
|
|
if (!(off & 0x80000000))
|
|
|
|
continue;
|
|
|
|
off = off & 0x7fffffff;
|
2016-02-25 15:22:52 +01:00
|
|
|
check_pack_index_ptr(p, &idx2[off * 2]);
|
2011-02-26 01:55:26 +01:00
|
|
|
if (idx2[off * 2])
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* The real offset is ntohl(idx2[off * 2]) in high 4
|
|
|
|
* octets, and ntohl(idx2[off * 2 + 1]) in low 4
|
|
|
|
* octets. But idx2[off * 2] is Zero!!!
|
|
|
|
*/
|
|
|
|
ALLOC_GROW(opts->anomaly, opts->anomaly_nr + 1, opts->anomaly_alloc);
|
|
|
|
opts->anomaly[opts->anomaly_nr++] = ntohl(idx2[off * 2 + 1]);
|
|
|
|
}
|
|
|
|
|
2016-09-30 01:40:14 +02:00
|
|
|
QSORT(opts->anomaly, opts->anomaly_nr, cmp_uint32);
|
2011-02-26 01:55:26 +01:00
|
|
|
}
|
|
|
|
|
2011-02-03 02:29:01 +01:00
|
|
|
static void read_idx_option(struct pack_idx_option *opts, const char *pack_name)
|
|
|
|
{
|
|
|
|
struct packed_git *p = add_packed_git(pack_name, strlen(pack_name), 1);
|
|
|
|
|
|
|
|
if (!p)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("Cannot open existing pack file '%s'"), pack_name);
|
2011-02-03 02:29:01 +01:00
|
|
|
if (open_pack_index(p))
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("Cannot open existing pack idx file for '%s'"), pack_name);
|
2011-02-03 02:29:01 +01:00
|
|
|
|
|
|
|
/* Read the attributes from the existing idx file */
|
|
|
|
opts->version = p->index_version;
|
|
|
|
|
2011-02-26 01:55:26 +01:00
|
|
|
if (opts->version == 2)
|
|
|
|
read_v2_anomalous_offsets(p, opts);
|
|
|
|
|
2011-02-03 02:29:01 +01:00
|
|
|
/*
|
|
|
|
* Get rid of the idx file as we do not need it anymore.
|
|
|
|
* NEEDSWORK: extract this bit from free_pack_by_name() in
|
2018-04-10 23:26:20 +02:00
|
|
|
* sha1-file.c, perhaps? It shouldn't matter very much as we
|
2011-02-03 02:29:01 +01:00
|
|
|
* know we haven't installed this pack (hence we never have
|
|
|
|
* read anything from it).
|
|
|
|
*/
|
|
|
|
close_pack_index(p);
|
|
|
|
free(p);
|
|
|
|
}
|
|
|
|
|
2011-06-04 00:32:15 +02:00
|
|
|
static void show_pack_info(int stat_only)
|
|
|
|
{
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
int i, baseobjects = nr_objects - nr_ref_deltas - nr_ofs_deltas;
|
2011-06-04 00:32:16 +02:00
|
|
|
unsigned long *chain_histogram = NULL;
|
|
|
|
|
|
|
|
if (deepest_delta)
|
|
|
|
chain_histogram = xcalloc(deepest_delta, sizeof(unsigned long));
|
|
|
|
|
2011-06-04 00:32:15 +02:00
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
struct object_entry *obj = &objects[i];
|
|
|
|
|
2011-06-04 00:32:16 +02:00
|
|
|
if (is_delta_type(obj->type))
|
2015-02-26 11:52:07 +01:00
|
|
|
chain_histogram[obj_stat[i].delta_depth - 1]++;
|
2011-06-04 00:32:15 +02:00
|
|
|
if (stat_only)
|
|
|
|
continue;
|
2018-11-11 08:05:04 +01:00
|
|
|
printf("%s %-6s %"PRIuMAX" %"PRIuMAX" %"PRIuMAX,
|
2017-05-07 00:10:11 +02:00
|
|
|
oid_to_hex(&obj->idx.oid),
|
2018-11-11 08:05:04 +01:00
|
|
|
type_name(obj->real_type), (uintmax_t)obj->size,
|
|
|
|
(uintmax_t)(obj[1].idx.offset - obj->idx.offset),
|
2011-06-04 00:32:15 +02:00
|
|
|
(uintmax_t)obj->idx.offset);
|
|
|
|
if (is_delta_type(obj->type)) {
|
2015-02-26 11:52:07 +01:00
|
|
|
struct object_entry *bobj = &objects[obj_stat[i].base_object_no];
|
2017-05-07 00:10:11 +02:00
|
|
|
printf(" %u %s", obj_stat[i].delta_depth,
|
|
|
|
oid_to_hex(&bobj->idx.oid));
|
2011-06-04 00:32:15 +02:00
|
|
|
}
|
|
|
|
putchar('\n');
|
|
|
|
}
|
2011-06-04 00:32:16 +02:00
|
|
|
|
|
|
|
if (baseobjects)
|
2012-04-23 14:30:29 +02:00
|
|
|
printf_ln(Q_("non delta: %d object",
|
|
|
|
"non delta: %d objects",
|
|
|
|
baseobjects),
|
|
|
|
baseobjects);
|
2011-06-04 00:32:16 +02:00
|
|
|
for (i = 0; i < deepest_delta; i++) {
|
|
|
|
if (!chain_histogram[i])
|
|
|
|
continue;
|
2012-04-23 14:30:29 +02:00
|
|
|
printf_ln(Q_("chain length = %d: %lu object",
|
|
|
|
"chain length = %d: %lu objects",
|
|
|
|
chain_histogram[i]),
|
|
|
|
i + 1,
|
|
|
|
chain_histogram[i]);
|
2011-06-04 00:32:16 +02:00
|
|
|
}
|
2011-06-04 00:32:15 +02:00
|
|
|
}
|
|
|
|
|
2010-01-22 16:55:19 +01:00
|
|
|
int cmd_index_pack(int argc, const char **argv, const char *prefix)
|
2005-10-12 21:01:31 +02:00
|
|
|
{
|
2013-03-19 14:01:15 +01:00
|
|
|
int i, fix_thin_pack = 0, verify = 0, stat_only = 0;
|
2014-03-25 14:41:41 +01:00
|
|
|
const char *curr_index;
|
2010-01-22 16:55:19 +01:00
|
|
|
const char *index_name = NULL, *pack_name = NULL;
|
2017-12-05 17:58:48 +01:00
|
|
|
const char *keep_msg = NULL;
|
introduce fetch-object: fetch one promisor object
Introduce fetch-object, providing the ability to fetch one object from a
promisor remote.
This uses fetch-pack. To do this, the transport mechanism has been
updated with 2 flags, "from-promisor" to indicate that the resulting
pack comes from a promisor remote (and thus should be annotated as such
by index-pack), and "no-dependents" to indicate that only the objects
themselves need to be fetched (but fetching additional objects is
nevertheless safe).
Whenever "no-dependents" is used, fetch-pack will refrain from using any
object flags, because it is most likely invoked as part of a dynamic
object fetch by another Git command (which may itself use object flags).
An alternative to this is to leave fetch-pack alone, and instead update
the allocation of flags so that fetch-pack's flags never overlap with
any others, but this will end up shrinking the number of flags available
to nearly every other Git command (that is, every Git command that
accesses objects), so the approach in this commit was used instead.
This will be tested in a subsequent commit.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-05 17:58:49 +01:00
|
|
|
const char *promisor_msg = NULL;
|
2017-12-05 17:58:48 +01:00
|
|
|
struct strbuf index_name_buf = STRBUF_INIT;
|
2007-06-01 21:18:05 +02:00
|
|
|
struct pack_idx_entry **idx_objects;
|
2011-02-26 00:43:25 +01:00
|
|
|
struct pack_idx_option opts;
|
2018-02-01 03:18:39 +01:00
|
|
|
unsigned char pack_hash[GIT_MAX_RAWSZ];
|
2013-05-26 03:16:17 +02:00
|
|
|
unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
|
receive-pack: send keepalives during quiet periods
After a client has sent us the complete pack, we may spend
some time processing the data and running hooks. If the
client asked us to be quiet, receive-pack won't send any
progress data during the index-pack or connectivity-check
steps. And hooks may or may not produce their own progress
output. In these cases, the network connection is totally
silent from both ends.
Git itself doesn't care about this (it will wait forever),
but other parts of the system (e.g., firewalls,
load-balancers, etc) might hang up the connection. So we'd
like to send some sort of keepalive to let the network and
the client side know that we're still alive and processing.
We can use the same trick we did in 05e9515 (upload-pack:
send keepalive packets during pack computation, 2013-09-08).
Namely, we will send an empty sideband data packet every `N`
seconds that we do not relay any stderr data over the
sideband channel. As with 05e9515, this means that we won't
bother sending keepalives when there's actual progress data,
but will kick in when it has been disabled (or if there is a
lull in the progress data).
The concept is simple, but the details are subtle enough
that they need discussing here.
Before the client sends us the pack, we don't want to do any
keepalives. We'll have sent our ref advertisement, and we're
waiting for them to send us the pack (and tell us that they
support sidebands at all).
While we're receiving the pack from the client (or waiting
for it to start), there's no need for keepalives; it's up to
them to keep the connection active by sending data.
Moreover, it would be wrong for us to do so. When we are the
server in the smart-http protocol, we must treat our
connection as half-duplex. So any keepalives we send while
receiving the pack would potentially be buffered by the
webserver. Not only does this make them useless (since they
would not be delivered in a timely manner), but it could
actually cause a deadlock if we fill up the buffer with
keepalives. (It wouldn't be wrong to send keepalives in this
phase for a full-duplex connection like ssh; it's simply
pointless, as it is the client's responsibility to speak).
As soon as we've gotten all of the pack data, then the
client is waiting for us to speak, and we should start
keepalives immediately. From here until the end of the
connection, we send one any time we are not otherwise
sending data.
But there's a catch. Receive-pack doesn't know the moment
we've gotten all the data. It passes the descriptor to
index-pack, who reads all of the data, and then starts
resolving the deltas. We have to communicate that back.
To make this work, we instruct the sideband muxer to enable
keepalives in three phases:
1. In the beginning, not at all.
2. While reading from index-pack, wait for a signal
indicating end-of-input, and then start them.
3. Afterwards, always.
The signal from index-pack in phase 2 has to come over the
stderr channel which the muxer is reading. We can't use an
extra pipe because the portable run-command interface only
gives us stderr and stdout.
Stdout is already used to pass the .keep filename back to
receive-pack. We could also send a signal there, but then we
would find out about it in the main thread. And the
keepalive needs to be done by the async muxer thread (since
it's the one writing sideband data back to the client). And
we can't reliably signal the async thread from the main
thread, because the async code sometimes uses threads and
sometimes uses forked processes.
Therefore the signal must come over the stderr channel,
where it may be interspersed with other random
human-readable messages from index-pack. This patch makes
the signal a single NUL byte. This is easy to parse, should
not appear in any normal stderr output, and we don't have to
worry about any timing issues (like seeing half the signal
bytes in one read(), and half in a subsequent one).
This is a bit ugly, but it's simple to code and should work
reliably.
Another option would be to stop using an async thread for
muxing entirely, and just poll() both stderr and stdout of
index-pack from the main thread. This would work for
index-pack (because we aren't doing anything useful in the
main thread while it runs anyway). But it would make the
connectivity check and the hook muxers much more
complicated, as they need to simultaneously feed the
sub-programs while reading their stderr.
The index-pack phase is the only one that needs this
signaling, so it could simply behave differently than the
other two. That would mean having two separate
implementations of copy_to_sideband (and the keepalive
code), though. And it still doesn't get rid of the
signaling; it just means we can write a nicer message like
"END_OF_INPUT" or something on stdout, since we don't have
to worry about separating it from the stderr cruft.
One final note: this signaling trick is only done with
index-pack, not with unpack-objects. There's no point in
doing it for the latter, because by definition it only kicks
in for a small number of objects, where keepalives are not
as useful (and this conveniently lets us avoid duplicating
the implementation).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-15 12:43:47 +02:00
|
|
|
int report_end_of_input = 0;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2017-12-08 16:27:14 +01:00
|
|
|
/*
|
index-pack: prefetch missing REF_DELTA bases
When fetching, the client sends "have" commit IDs indicating that the
server does not need to send any object referenced by those commits,
reducing network I/O. When the client is a partial clone, the client
still sends "have"s in this way, even if it does not have every object
referenced by a commit it sent as "have".
If a server omits such an object, it is fine: the client could lazily
fetch that object before this fetch, and it can still do so after.
The issue is when the server sends a thin pack containing an object that
is a REF_DELTA against such a missing object: index-pack fails to fix
the thin pack. When support for lazily fetching missing objects was
added in 8b4c0103a9 ("sha1_file: support lazily fetching missing
objects", 2017-12-08), support in index-pack was turned off in the
belief that it accesses the repo only to do hash collision checks.
However, this is not true: it also needs to access the repo to resolve
REF_DELTA bases.
Support for lazy fetching should still generally be turned off in
index-pack because it is used as part of the lazy fetching process
itself (if not, infinite loops may occur), but we do need to fetch the
REF_DELTA bases. (When fetching REF_DELTA bases, it is unlikely that
those are REF_DELTA themselves, because we do not send "have" when
making such fetches.)
To resolve this, prefetch all missing REF_DELTA bases before attempting
to resolve them. This both ensures that all bases are attempted to be
fetched, and ensures that we make only one request per index-pack
invocation, and not one request per missing object.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-05-14 23:10:55 +02:00
|
|
|
* index-pack never needs to fetch missing objects except when
|
|
|
|
* REF_DELTA bases are missing (which are explicitly handled). It only
|
|
|
|
* accesses the repo to do hash collision checks and to check which
|
|
|
|
* REF_DELTA bases need to be fetched.
|
2017-12-08 16:27:14 +01:00
|
|
|
*/
|
|
|
|
fetch_if_missing = 0;
|
|
|
|
|
2009-11-09 16:05:01 +01:00
|
|
|
if (argc == 2 && !strcmp(argv[1], "-h"))
|
|
|
|
usage(index_pack_usage);
|
|
|
|
|
2018-07-18 22:45:20 +02:00
|
|
|
read_replace_refs = 0;
|
2015-06-22 17:25:00 +02:00
|
|
|
fsck_options.walk = mark_link;
|
2010-08-12 16:18:12 +02:00
|
|
|
|
2011-02-26 00:43:25 +01:00
|
|
|
reset_pack_idx_option(&opts);
|
|
|
|
git_config(git_index_pack_config, &opts);
|
2010-07-24 13:30:49 +02:00
|
|
|
if (prefix && chdir(prefix))
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("Cannot come back to cwd"));
|
2007-11-02 04:26:04 +01:00
|
|
|
|
2005-10-12 21:01:31 +02:00
|
|
|
for (i = 1; i < argc; i++) {
|
2010-01-22 16:55:19 +01:00
|
|
|
const char *arg = argv[i];
|
2005-10-12 21:01:31 +02:00
|
|
|
|
|
|
|
if (*arg == '-') {
|
2006-10-23 20:50:18 +02:00
|
|
|
if (!strcmp(arg, "--stdin")) {
|
|
|
|
from_stdin = 1;
|
2006-10-26 05:28:17 +02:00
|
|
|
} else if (!strcmp(arg, "--fix-thin")) {
|
|
|
|
fix_thin_pack = 1;
|
2017-12-09 21:40:08 +01:00
|
|
|
} else if (skip_to_optional_arg(arg, "--strict", &arg)) {
|
2015-06-22 17:25:31 +02:00
|
|
|
strict = 1;
|
|
|
|
do_fsck_object = 1;
|
|
|
|
fsck_set_msg_types(&fsck_options, arg);
|
2013-05-26 03:16:17 +02:00
|
|
|
} else if (!strcmp(arg, "--check-self-contained-and-connected")) {
|
|
|
|
strict = 1;
|
|
|
|
check_self_contained_and_connected = 1;
|
2018-03-14 19:42:40 +01:00
|
|
|
} else if (!strcmp(arg, "--fsck-objects")) {
|
|
|
|
do_fsck_object = 1;
|
2011-02-03 02:29:01 +01:00
|
|
|
} else if (!strcmp(arg, "--verify")) {
|
|
|
|
verify = 1;
|
2011-06-04 00:32:15 +02:00
|
|
|
} else if (!strcmp(arg, "--verify-stat")) {
|
|
|
|
verify = 1;
|
2013-03-19 14:01:15 +01:00
|
|
|
show_stat = 1;
|
2011-06-04 00:32:15 +02:00
|
|
|
} else if (!strcmp(arg, "--verify-stat-only")) {
|
|
|
|
verify = 1;
|
2013-03-19 14:01:15 +01:00
|
|
|
show_stat = 1;
|
2011-06-04 00:32:15 +02:00
|
|
|
stat_only = 1;
|
2017-12-09 21:40:08 +01:00
|
|
|
} else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) {
|
|
|
|
; /* nothing to do */
|
2018-02-13 22:39:03 +01:00
|
|
|
} else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) {
|
|
|
|
; /* already parsed */
|
2013-11-30 21:55:40 +01:00
|
|
|
} else if (starts_with(arg, "--threads=")) {
|
2012-05-06 14:31:55 +02:00
|
|
|
char *end;
|
|
|
|
nr_threads = strtoul(arg+10, &end, 0);
|
|
|
|
if (!arg[10] || *end || nr_threads < 0)
|
|
|
|
usage(index_pack_usage);
|
2018-11-03 09:48:40 +01:00
|
|
|
if (!HAVE_THREADS && nr_threads != 1) {
|
|
|
|
warning(_("no threads support, ignoring %s"), arg);
|
|
|
|
nr_threads = 1;
|
|
|
|
}
|
2013-11-30 21:55:40 +01:00
|
|
|
} else if (starts_with(arg, "--pack_header=")) {
|
2006-11-01 23:06:20 +01:00
|
|
|
struct pack_header *hdr;
|
|
|
|
char *c;
|
|
|
|
|
|
|
|
hdr = (struct pack_header *)input_buffer;
|
|
|
|
hdr->hdr_signature = htonl(PACK_SIGNATURE);
|
|
|
|
hdr->hdr_version = htonl(strtoul(arg + 14, &c, 10));
|
|
|
|
if (*c != ',')
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("bad %s"), arg);
|
2006-11-01 23:06:20 +01:00
|
|
|
hdr->hdr_entries = htonl(strtoul(c + 1, &c, 10));
|
|
|
|
if (*c)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("bad %s"), arg);
|
2006-11-01 23:06:20 +01:00
|
|
|
input_len = sizeof(*hdr);
|
2006-10-26 05:32:59 +02:00
|
|
|
} else if (!strcmp(arg, "-v")) {
|
|
|
|
verbose = 1;
|
index-pack: add flag for showing delta-resolution progress
The index-pack command has two progress meters: one for
"receiving objects", and one for "resolving deltas". You get
neither by default, or both with "-v".
But for a push through receive-pack, we would want only the
"resolving deltas" phase, _not_ the "receiving objects"
progress. There are two reasons for this.
One is simply that existing clients are already printing
"writing objects" progress at the same time. Arguably
"receiving" from the far end is more useful, because it
tells you what has actually gotten there, as opposed to what
might be stuck in a buffer somewhere between the client and
server. But that would require a protocol extension to tell
clients not to print their progress. Possible, but
complexity for little gain.
The second reason is much more important. In a full-duplex
connection like git-over-ssh, we can print progress while
the pack is incoming, and it will immediately get to the
client. But for a half-duplex connection like git-over-http,
we should not say anything until we have received the full
request. Anything we write is subject to being stuck in a
buffer by the webserver. Worse, we can end up in a deadlock
if that buffer fills up.
So our best bet is to avoid writing anything that isn't a
small fixed size until we've received the full pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-15 12:34:22 +02:00
|
|
|
} else if (!strcmp(arg, "--show-resolving-progress")) {
|
|
|
|
show_resolving_progress = 1;
|
receive-pack: send keepalives during quiet periods
After a client has sent us the complete pack, we may spend
some time processing the data and running hooks. If the
client asked us to be quiet, receive-pack won't send any
progress data during the index-pack or connectivity-check
steps. And hooks may or may not produce their own progress
output. In these cases, the network connection is totally
silent from both ends.
Git itself doesn't care about this (it will wait forever),
but other parts of the system (e.g., firewalls,
load-balancers, etc) might hang up the connection. So we'd
like to send some sort of keepalive to let the network and
the client side know that we're still alive and processing.
We can use the same trick we did in 05e9515 (upload-pack:
send keepalive packets during pack computation, 2013-09-08).
Namely, we will send an empty sideband data packet every `N`
seconds that we do not relay any stderr data over the
sideband channel. As with 05e9515, this means that we won't
bother sending keepalives when there's actual progress data,
but will kick in when it has been disabled (or if there is a
lull in the progress data).
The concept is simple, but the details are subtle enough
that they need discussing here.
Before the client sends us the pack, we don't want to do any
keepalives. We'll have sent our ref advertisement, and we're
waiting for them to send us the pack (and tell us that they
support sidebands at all).
While we're receiving the pack from the client (or waiting
for it to start), there's no need for keepalives; it's up to
them to keep the connection active by sending data.
Moreover, it would be wrong for us to do so. When we are the
server in the smart-http protocol, we must treat our
connection as half-duplex. So any keepalives we send while
receiving the pack would potentially be buffered by the
webserver. Not only does this make them useless (since they
would not be delivered in a timely manner), but it could
actually cause a deadlock if we fill up the buffer with
keepalives. (It wouldn't be wrong to send keepalives in this
phase for a full-duplex connection like ssh; it's simply
pointless, as it is the client's responsibility to speak).
As soon as we've gotten all of the pack data, then the
client is waiting for us to speak, and we should start
keepalives immediately. From here until the end of the
connection, we send one any time we are not otherwise
sending data.
But there's a catch. Receive-pack doesn't know the moment
we've gotten all the data. It passes the descriptor to
index-pack, who reads all of the data, and then starts
resolving the deltas. We have to communicate that back.
To make this work, we instruct the sideband muxer to enable
keepalives in three phases:
1. In the beginning, not at all.
2. While reading from index-pack, wait for a signal
indicating end-of-input, and then start them.
3. Afterwards, always.
The signal from index-pack in phase 2 has to come over the
stderr channel which the muxer is reading. We can't use an
extra pipe because the portable run-command interface only
gives us stderr and stdout.
Stdout is already used to pass the .keep filename back to
receive-pack. We could also send a signal there, but then we
would find out about it in the main thread. And the
keepalive needs to be done by the async muxer thread (since
it's the one writing sideband data back to the client). And
we can't reliably signal the async thread from the main
thread, because the async code sometimes uses threads and
sometimes uses forked processes.
Therefore the signal must come over the stderr channel,
where it may be interspersed with other random
human-readable messages from index-pack. This patch makes
the signal a single NUL byte. This is easy to parse, should
not appear in any normal stderr output, and we don't have to
worry about any timing issues (like seeing half the signal
bytes in one read(), and half in a subsequent one).
This is a bit ugly, but it's simple to code and should work
reliably.
Another option would be to stop using an async thread for
muxing entirely, and just poll() both stderr and stdout of
index-pack from the main thread. This would work for
index-pack (because we aren't doing anything useful in the
main thread while it runs anyway). But it would make the
connectivity check and the hook muxers much more
complicated, as they need to simultaneously feed the
sub-programs while reading their stderr.
The index-pack phase is the only one that needs this
signaling, so it could simply behave differently than the
other two. That would mean having two separate
implementations of copy_to_sideband (and the keepalive
code), though. And it still doesn't get rid of the
signaling; it just means we can write a nicer message like
"END_OF_INPUT" or something on stdout, since we don't have
to worry about separating it from the stderr cruft.
One final note: this signaling trick is only done with
index-pack, not with unpack-objects. There's no point in
doing it for the latter, because by definition it only kicks
in for a small number of objects, where keepalives are not
as useful (and this conveniently lets us avoid duplicating
the implementation).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-15 12:43:47 +02:00
|
|
|
} else if (!strcmp(arg, "--report-end-of-input")) {
|
|
|
|
report_end_of_input = 1;
|
2006-10-23 20:50:18 +02:00
|
|
|
} else if (!strcmp(arg, "-o")) {
|
2005-10-12 21:01:31 +02:00
|
|
|
if (index_name || (i+1) >= argc)
|
|
|
|
usage(index_pack_usage);
|
|
|
|
index_name = argv[++i];
|
2013-11-30 21:55:40 +01:00
|
|
|
} else if (starts_with(arg, "--index-version=")) {
|
2007-04-09 23:32:03 +02:00
|
|
|
char *c;
|
2011-02-26 00:43:25 +01:00
|
|
|
opts.version = strtoul(arg + 16, &c, 10);
|
|
|
|
if (opts.version > 2)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("bad %s"), arg);
|
2007-04-09 23:32:03 +02:00
|
|
|
if (*c == ',')
|
2011-02-26 00:43:25 +01:00
|
|
|
opts.off32_limit = strtoul(c+1, &c, 0);
|
|
|
|
if (*c || opts.off32_limit & 0x80000000)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("bad %s"), arg);
|
2016-08-24 20:41:55 +02:00
|
|
|
} else if (skip_prefix(arg, "--max-input-size=", &arg)) {
|
|
|
|
max_input_size = strtoumax(arg, NULL, 10);
|
2005-10-12 21:01:31 +02:00
|
|
|
} else
|
|
|
|
usage(index_pack_usage);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pack_name)
|
|
|
|
usage(index_pack_usage);
|
|
|
|
pack_name = arg;
|
|
|
|
}
|
|
|
|
|
2006-10-23 20:50:18 +02:00
|
|
|
if (!pack_name && !from_stdin)
|
2005-10-12 21:01:31 +02:00
|
|
|
usage(index_pack_usage);
|
2006-10-26 05:28:17 +02:00
|
|
|
if (fix_thin_pack && !from_stdin)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("--fix-thin cannot be used without --stdin"));
|
2016-12-16 03:30:59 +01:00
|
|
|
if (from_stdin && !startup_info->have_repository)
|
|
|
|
die(_("--stdin requires a git repository"));
|
2016-03-03 20:29:09 +01:00
|
|
|
if (!index_name && pack_name)
|
2017-12-05 17:58:48 +01:00
|
|
|
index_name = derive_filename(pack_name, "idx", &index_name_buf);
|
2016-03-03 20:29:09 +01:00
|
|
|
|
2011-02-03 02:29:01 +01:00
|
|
|
if (verify) {
|
|
|
|
if (!index_name)
|
2012-04-23 14:30:29 +02:00
|
|
|
die(_("--verify with no packfile name given"));
|
2011-02-03 02:29:01 +01:00
|
|
|
read_idx_option(&opts, index_name);
|
2011-11-17 07:04:13 +01:00
|
|
|
opts.flags |= WRITE_IDX_VERIFY | WRITE_IDX_STRICT;
|
2011-02-03 02:29:01 +01:00
|
|
|
}
|
2011-11-17 07:04:13 +01:00
|
|
|
if (strict)
|
|
|
|
opts.flags |= WRITE_IDX_STRICT;
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2018-11-03 09:48:40 +01:00
|
|
|
if (HAVE_THREADS && !nr_threads) {
|
2012-05-06 14:31:55 +02:00
|
|
|
nr_threads = online_cpus();
|
|
|
|
/* An experiment showed that more threads does not mean faster */
|
|
|
|
if (nr_threads > 3)
|
|
|
|
nr_threads = 3;
|
|
|
|
}
|
|
|
|
|
2006-10-23 20:50:18 +02:00
|
|
|
curr_pack = open_pack_file(pack_name);
|
2005-10-12 21:01:31 +02:00
|
|
|
parse_pack_header();
|
2016-02-22 23:44:35 +01:00
|
|
|
objects = xcalloc(st_add(nr_objects, 1), sizeof(struct object_entry));
|
2015-02-26 11:52:07 +01:00
|
|
|
if (show_stat)
|
2016-02-22 23:44:35 +01:00
|
|
|
obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
|
2018-02-01 03:18:39 +01:00
|
|
|
parse_pack_objects(pack_hash);
|
receive-pack: send keepalives during quiet periods
After a client has sent us the complete pack, we may spend
some time processing the data and running hooks. If the
client asked us to be quiet, receive-pack won't send any
progress data during the index-pack or connectivity-check
steps. And hooks may or may not produce their own progress
output. In these cases, the network connection is totally
silent from both ends.
Git itself doesn't care about this (it will wait forever),
but other parts of the system (e.g., firewalls,
load-balancers, etc) might hang up the connection. So we'd
like to send some sort of keepalive to let the network and
the client side know that we're still alive and processing.
We can use the same trick we did in 05e9515 (upload-pack:
send keepalive packets during pack computation, 2013-09-08).
Namely, we will send an empty sideband data packet every `N`
seconds that we do not relay any stderr data over the
sideband channel. As with 05e9515, this means that we won't
bother sending keepalives when there's actual progress data,
but will kick in when it has been disabled (or if there is a
lull in the progress data).
The concept is simple, but the details are subtle enough
that they need discussing here.
Before the client sends us the pack, we don't want to do any
keepalives. We'll have sent our ref advertisement, and we're
waiting for them to send us the pack (and tell us that they
support sidebands at all).
While we're receiving the pack from the client (or waiting
for it to start), there's no need for keepalives; it's up to
them to keep the connection active by sending data.
Moreover, it would be wrong for us to do so. When we are the
server in the smart-http protocol, we must treat our
connection as half-duplex. So any keepalives we send while
receiving the pack would potentially be buffered by the
webserver. Not only does this make them useless (since they
would not be delivered in a timely manner), but it could
actually cause a deadlock if we fill up the buffer with
keepalives. (It wouldn't be wrong to send keepalives in this
phase for a full-duplex connection like ssh; it's simply
pointless, as it is the client's responsibility to speak).
As soon as we've gotten all of the pack data, then the
client is waiting for us to speak, and we should start
keepalives immediately. From here until the end of the
connection, we send one any time we are not otherwise
sending data.
But there's a catch. Receive-pack doesn't know the moment
we've gotten all the data. It passes the descriptor to
index-pack, who reads all of the data, and then starts
resolving the deltas. We have to communicate that back.
To make this work, we instruct the sideband muxer to enable
keepalives in three phases:
1. In the beginning, not at all.
2. While reading from index-pack, wait for a signal
indicating end-of-input, and then start them.
3. Afterwards, always.
The signal from index-pack in phase 2 has to come over the
stderr channel which the muxer is reading. We can't use an
extra pipe because the portable run-command interface only
gives us stderr and stdout.
Stdout is already used to pass the .keep filename back to
receive-pack. We could also send a signal there, but then we
would find out about it in the main thread. And the
keepalive needs to be done by the async muxer thread (since
it's the one writing sideband data back to the client). And
we can't reliably signal the async thread from the main
thread, because the async code sometimes uses threads and
sometimes uses forked processes.
Therefore the signal must come over the stderr channel,
where it may be interspersed with other random
human-readable messages from index-pack. This patch makes
the signal a single NUL byte. This is easy to parse, should
not appear in any normal stderr output, and we don't have to
worry about any timing issues (like seeing half the signal
bytes in one read(), and half in a subsequent one).
This is a bit ugly, but it's simple to code and should work
reliably.
Another option would be to stop using an async thread for
muxing entirely, and just poll() both stderr and stdout of
index-pack from the main thread. This would work for
index-pack (because we aren't doing anything useful in the
main thread while it runs anyway). But it would make the
connectivity check and the hook muxers much more
complicated, as they need to simultaneously feed the
sub-programs while reading their stderr.
The index-pack phase is the only one that needs this
signaling, so it could simply behave differently than the
other two. That would mean having two separate
implementations of copy_to_sideband (and the keepalive
code), though. And it still doesn't get rid of the
signaling; it just means we can write a nicer message like
"END_OF_INPUT" or something on stdout, since we don't have
to worry about separating it from the stderr cruft.
One final note: this signaling trick is only done with
index-pack, not with unpack-objects. There's no point in
doing it for the latter, because by definition it only kicks
in for a small number of objects, where keepalives are not
as useful (and this conveniently lets us avoid duplicating
the implementation).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-15 12:43:47 +02:00
|
|
|
if (report_end_of_input)
|
|
|
|
write_in_full(2, "\0", 1);
|
2012-05-06 14:31:54 +02:00
|
|
|
resolve_deltas();
|
2018-02-01 03:18:39 +01:00
|
|
|
conclude_pack(fix_thin_pack, curr_pack, pack_hash);
|
index-pack: kill union delta_base to save memory
Once we know the number of objects in the input pack, we allocate an
array of nr_objects of struct delta_entry. On x86-64, this struct is
32 bytes long. The union delta_base, which is part of struct
delta_entry, provides enough space to store either ofs-delta (8 bytes)
or ref-delta (20 bytes).
Because ofs-delta encoding is more efficient space-wise and more
performant at runtime than ref-delta encoding, Git packers try to use
ofs-delta whenever possible, and it is expected that objects encoded
as ref-delta are minority.
In the best clone case where no ref-delta object is present, we waste
(20-8) * nr_objects bytes because of this union. That's about 38MB out
of 100MB for deltas[] with 3.4M objects, or 38%. deltas[] would be
around 62MB without the waste.
This patch attempts to eliminate that. deltas[] array is split into
two: one for ofs-delta and one for ref-delta. Many functions are also
duplicated because of this split. With this patch, ofs_deltas[] array
takes 51MB. ref_deltas[] should remain unallocated in clone case (0
bytes). This array grows as we see ref-delta. We save about half in
this case, or 25% of total bookkeeping.
The saving is more than the calculation above because some padding in
the old delta_entry struct is removed. ofs_delta_entry is 16 bytes,
including the 4 bytes padding. That's 13MB for padding, but packing
the struct could break platforms that do not support unaligned
access. If someone on 32-bit is really low on memory and only deals
with packs smaller than 2G, using 32-bit off_t would eliminate the
padding and save 27MB on top.
A note about ofs_deltas allocation. We could use ref_deltas memory
allocation strategy for ofs_deltas. But that probably just adds more
overhead on top. ofs-deltas are generally the majority (1/2 to 2/3) in
any pack. Incremental realloc may lead to too many memcpy. And if we
preallocate, say 1/2 or 2/3 of nr_objects initially, the growth rate
of ALLOC_GROW() could make this array larger than nr_objects, wasting
more memory.
Brought-up-by: Matthew Sporleder <msporleder@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-04-18 12:47:05 +02:00
|
|
|
free(ofs_deltas);
|
|
|
|
free(ref_deltas);
|
2008-02-25 22:46:12 +01:00
|
|
|
if (strict)
|
2013-05-26 03:16:17 +02:00
|
|
|
foreign_nr = check_objects();
|
2007-06-01 21:18:05 +02:00
|
|
|
|
2013-03-19 14:01:15 +01:00
|
|
|
if (show_stat)
|
2011-06-04 00:32:15 +02:00
|
|
|
show_pack_info(stat_only);
|
|
|
|
|
2016-02-22 23:44:25 +01:00
|
|
|
ALLOC_ARRAY(idx_objects, nr_objects);
|
2007-06-01 21:18:05 +02:00
|
|
|
for (i = 0; i < nr_objects; i++)
|
|
|
|
idx_objects[i] = &objects[i].idx;
|
2018-02-01 03:18:39 +01:00
|
|
|
curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
|
2007-06-01 21:18:05 +02:00
|
|
|
free(idx_objects);
|
|
|
|
|
2011-02-03 02:29:01 +01:00
|
|
|
if (!verify)
|
|
|
|
final(pack_name, curr_pack,
|
|
|
|
index_name, curr_index,
|
introduce fetch-object: fetch one promisor object
Introduce fetch-object, providing the ability to fetch one object from a
promisor remote.
This uses fetch-pack. To do this, the transport mechanism has been
updated with 2 flags, "from-promisor" to indicate that the resulting
pack comes from a promisor remote (and thus should be annotated as such
by index-pack), and "no-dependents" to indicate that only the objects
themselves need to be fetched (but fetching additional objects is
nevertheless safe).
Whenever "no-dependents" is used, fetch-pack will refrain from using any
object flags, because it is most likely invoked as part of a dynamic
object fetch by another Git command (which may itself use object flags).
An alternative to this is to leave fetch-pack alone, and instead update
the allocation of flags so that fetch-pack's flags never overlap with
any others, but this will end up shrinking the number of flags available
to nearly every other Git command (that is, every Git command that
accesses objects), so the approach in this commit was used instead.
This will be tested in a subsequent commit.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-12-05 17:58:49 +01:00
|
|
|
keep_msg, promisor_msg,
|
2018-02-01 03:18:39 +01:00
|
|
|
pack_hash);
|
2011-02-03 02:29:01 +01:00
|
|
|
else
|
|
|
|
close(input_fd);
|
index-pack: check .gitmodules files with --strict
Now that the internal fsck code has all of the plumbing we
need, we can start checking incoming .gitmodules files.
Naively, it seems like we would just need to add a call to
fsck_finish() after we've processed all of the objects. And
that would be enough to cover the initial test included
here. But there are two extra bits:
1. We currently don't bother calling fsck_object() at all
for blobs, since it has traditionally been a noop. We'd
actually catch these blobs in fsck_finish() at the end,
but it's more efficient to check them when we already
have the object loaded in memory.
2. The second pass done by fsck_finish() needs to access
the objects, but we're actually indexing the pack in
this process. In theory we could give the fsck code a
special callback for accessing the in-pack data, but
it's actually quite tricky:
a. We don't have an internal efficient index mapping
oids to packfile offsets. We only generate it on
the fly as part of writing out the .idx file.
b. We'd still have to reconstruct deltas, which means
we'd basically have to replicate all of the
reading logic in packfile.c.
Instead, let's avoid running fsck_finish() until after
we've written out the .idx file, and then just add it
to our internal packed_git list.
This does mean that the objects are "in the repository"
before we finish our fsck checks. But unpack-objects
already exhibits this same behavior, and it's an
acceptable tradeoff here for the same reason: the
quarantine mechanism means that pushes will be
fully protected.
In addition to a basic push test in t7415, we add a sneaky
pack that reverses the usual object order in the pack,
requiring that index-pack access the tree and blob during
the "finish" step.
This already works for unpack-objects (since it will have
written out loose objects), but we'll check it with this
sneaky pack for good measure.
Signed-off-by: Jeff King <peff@peff.net>
2018-05-05 01:45:01 +02:00
|
|
|
|
|
|
|
if (do_fsck_object && fsck_finish(&fsck_options))
|
|
|
|
die(_("fsck error in pack objects"));
|
|
|
|
|
2005-10-12 21:01:31 +02:00
|
|
|
free(objects);
|
2014-06-30 18:59:10 +02:00
|
|
|
strbuf_release(&index_name_buf);
|
2007-10-17 03:55:50 +02:00
|
|
|
if (pack_name == NULL)
|
2010-01-22 16:55:19 +01:00
|
|
|
free((void *) curr_pack);
|
2007-10-17 03:55:50 +02:00
|
|
|
if (index_name == NULL)
|
2010-01-22 16:55:19 +01:00
|
|
|
free((void *) curr_index);
|
2005-10-12 21:01:31 +02:00
|
|
|
|
2013-05-26 03:16:17 +02:00
|
|
|
/*
|
|
|
|
* Let the caller know this pack is not self contained
|
|
|
|
*/
|
|
|
|
if (check_self_contained_and_connected && foreign_nr)
|
|
|
|
return 1;
|
|
|
|
|
2005-10-12 21:01:31 +02:00
|
|
|
return 0;
|
|
|
|
}
|