2005-06-29 11:51:27 +02:00
|
|
|
#include "cache.h"
|
2018-04-25 20:21:04 +02:00
|
|
|
#include "repository.h"
|
2005-06-29 11:51:27 +02:00
|
|
|
#include "pack.h"
|
2008-02-28 06:25:19 +01:00
|
|
|
#include "pack-revindex.h"
|
2011-11-07 03:59:26 +01:00
|
|
|
#include "progress.h"
|
2017-08-19 00:20:19 +02:00
|
|
|
#include "packfile.h"
|
2018-03-23 18:20:59 +01:00
|
|
|
#include "object-store.h"
|
2005-06-29 11:51:27 +02:00
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct idx_entry {
|
2007-06-03 20:21:41 +02:00
|
|
|
off_t offset;
|
2008-06-25 05:19:02 +02:00
|
|
|
unsigned int nr;
|
2007-06-03 20:21:41 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static int compare_entries(const void *e1, const void *e2)
|
|
|
|
{
|
|
|
|
const struct idx_entry *entry1 = e1;
|
|
|
|
const struct idx_entry *entry2 = e2;
|
|
|
|
if (entry1->offset < entry2->offset)
|
|
|
|
return -1;
|
|
|
|
if (entry1->offset > entry2->offset)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-25 05:19:02 +02:00
|
|
|
int check_pack_crc(struct packed_git *p, struct pack_window **w_curs,
|
|
|
|
off_t offset, off_t len, unsigned int nr)
|
|
|
|
{
|
|
|
|
const uint32_t *index_crc;
|
2011-04-03 09:06:54 +02:00
|
|
|
uint32_t data_crc = crc32(0, NULL, 0);
|
2008-06-25 05:19:02 +02:00
|
|
|
|
|
|
|
do {
|
2011-06-10 20:52:15 +02:00
|
|
|
unsigned long avail;
|
2008-06-25 05:19:02 +02:00
|
|
|
void *data = use_pack(p, w_curs, offset, &avail);
|
|
|
|
if (avail > len)
|
|
|
|
avail = len;
|
|
|
|
data_crc = crc32(data_crc, data, avail);
|
|
|
|
offset += avail;
|
|
|
|
len -= avail;
|
|
|
|
} while (len);
|
|
|
|
|
|
|
|
index_crc = p->index_data;
|
compute pack .idx byte offsets using size_t
A pack and its matching .idx file are limited to 2^32 objects, because
the pack format contains a 32-bit field to store the number of objects.
Hence we use uint32_t in the code.
But the byte count of even a .idx file can be much larger than that,
because it stores at least a hash and an offset for each object. So
using SHA-1, a v2 .idx file will cross the 4GB boundary at 153,391,650
objects. This confuses load_idx(), which computes the minimum size like
this:
unsigned long min_size = 8 + 4*256 + nr*(hashsz + 4 + 4) + hashsz + hashsz;
Even though min_size will be big enough on most 64-bit platforms, the
actual arithmetic is done as a uint32_t, resulting in a truncation. We
actually exceed that min_size, but then we do:
unsigned long max_size = min_size;
if (nr)
max_size += (nr - 1)*8;
to account for the variable-sized table. That computation doesn't
overflow quite so low, but with the truncation for min_size, we end up
with a max_size that is much smaller than our actual size. So we
complain that the idx is invalid, and can't find any of its objects.
We can fix this case by casting "nr" to a size_t, which will do the
multiplication in 64-bits (assuming you're on a 64-bit platform; this
will never work on a 32-bit system since we couldn't map the whole .idx
anyway). Likewise, we don't have to worry about further additions,
because adding a smaller number to a size_t will convert the other side
to a size_t.
A few notes:
- obviously we could just declare "nr" as a size_t in the first place
(and likewise, packed_git.num_objects). But it's conceptually a
uint32_t because of the on-disk format, and we correctly treat it
that way in other contexts that don't need to compute byte offsets
(e.g., iterating over the set of objects should and generally does
use a uint32_t). Switching to size_t would make all of those other
cases look wrong.
- it could be argued that the proper type is off_t to represent the
file offset. But in practice the .idx file must fit within memory,
because we mmap the whole thing. And the rest of the code (including
the idx_size variable we're comparing against) uses size_t.
- we'll add the same cast to the max_size arithmetic line. Even though
we're adding to a larger type, which will convert our result, the
multiplication is still done as a 32-bit value and can itself
overflow. I didn't check this with my test case, since it would need
an even larger pack (~530M objects), but looking at compiler output
shows that it works this way. The standard should agree, but I
couldn't find anything explicit in 6.3.1.8 ("usual arithmetic
conversions").
The case in load_idx() was the most immediate one that I was able to
trigger. After fixing it, looking up actual objects (including the very
last one in sha1 order) works in a test repo with 153,725,110 objects.
That's because bsearch_hash() works with uint32_t entry indices, and the
actual byte access:
int cmp = hashcmp(table + mi * stride, sha1);
is done with "stride" as a size_t, causing the uint32_t "mi" to be
promoted to a size_t. This is the way most code will access the index
data.
However, I audited all of the other byte-wise accesses of
packed_git.index_data, and many of the others are suspect (they are
similar to the max_size one, where we are adding to a properly sized
offset or directly to a pointer, but the multiplication in the
sub-expression can overflow). I didn't trigger any of these in practice,
but I believe they're potential problems, and certainly adding in the
cast is not going to hurt anything here.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-13 06:06:48 +01:00
|
|
|
index_crc += 2 + 256 + (size_t)p->num_objects * (the_hash_algo->rawsz/4) + nr;
|
2008-06-25 05:19:02 +02:00
|
|
|
|
|
|
|
return data_crc != ntohl(*index_crc);
|
|
|
|
}
|
|
|
|
|
2018-11-10 06:49:07 +01:00
|
|
|
static int verify_packfile(struct repository *r,
|
|
|
|
struct packed_git *p,
|
2011-11-07 03:59:25 +01:00
|
|
|
struct pack_window **w_curs,
|
2011-11-07 03:59:26 +01:00
|
|
|
verify_fn fn,
|
|
|
|
struct progress *progress, uint32_t base_count)
|
|
|
|
|
2005-06-29 11:51:27 +02:00
|
|
|
{
|
2007-03-07 02:44:30 +01:00
|
|
|
off_t index_size = p->index_size;
|
2007-03-16 21:42:50 +01:00
|
|
|
const unsigned char *index_base = p->index_data;
|
2018-02-01 03:18:43 +01:00
|
|
|
git_hash_ctx ctx;
|
2017-05-07 00:10:20 +02:00
|
|
|
unsigned char hash[GIT_MAX_RAWSZ], *pack_sig;
|
2009-01-18 09:04:26 +01:00
|
|
|
off_t offset = 0, pack_sig_ofs = 0;
|
2007-03-07 02:44:19 +01:00
|
|
|
uint32_t nr_objects, i;
|
2008-05-29 23:34:50 +02:00
|
|
|
int err = 0;
|
2007-06-03 20:21:41 +02:00
|
|
|
struct idx_entry *entries;
|
2005-06-29 11:51:27 +02:00
|
|
|
|
verify_packfile: check pack validity before accessing data
The verify_packfile() does not explicitly open the packfile;
instead, it starts with a sha1 checksum over the whole pack,
and relies on use_pack() to open the packfile as a side
effect.
If the pack cannot be opened for whatever reason (either
because its header information is corrupted, or perhaps
because a simultaneous repack deleted it), then use_pack()
will die(), as it has no way to return an error. This is not
ideal, as verify_packfile() otherwise tries to gently return
an error (this lets programs like git-fsck go on to check
other packs).
Instead, let's check is_pack_valid() up front, and return an
error if it fails. This will open the pack as a side effect,
and then use_pack() will later rely on our cached
descriptor, and avoid calling die().
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-09-22 05:49:05 +02:00
|
|
|
if (!is_pack_valid(p))
|
|
|
|
return error("packfile %s cannot be accessed", p->pack_name);
|
2005-06-29 11:51:27 +02:00
|
|
|
|
2020-01-30 21:32:19 +01:00
|
|
|
r->hash_algo->init_fn(&ctx);
|
2009-01-18 09:04:26 +01:00
|
|
|
do {
|
2011-06-10 20:52:15 +02:00
|
|
|
unsigned long remaining;
|
2006-12-23 08:34:13 +01:00
|
|
|
unsigned char *in = use_pack(p, w_curs, offset, &remaining);
|
|
|
|
offset += remaining;
|
2009-01-18 09:04:26 +01:00
|
|
|
if (!pack_sig_ofs)
|
2020-01-30 21:32:19 +01:00
|
|
|
pack_sig_ofs = p->pack_size - r->hash_algo->rawsz;
|
2008-05-29 23:34:50 +02:00
|
|
|
if (offset > pack_sig_ofs)
|
|
|
|
remaining -= (unsigned int)(offset - pack_sig_ofs);
|
2020-01-30 21:32:19 +01:00
|
|
|
r->hash_algo->update_fn(&ctx, in, remaining);
|
2009-01-18 09:04:26 +01:00
|
|
|
} while (offset < pack_sig_ofs);
|
2020-01-30 21:32:19 +01:00
|
|
|
r->hash_algo->final_fn(hash, &ctx);
|
2008-05-29 23:34:50 +02:00
|
|
|
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
|
2018-08-28 23:22:52 +02:00
|
|
|
if (!hasheq(hash, pack_sig))
|
2018-02-01 03:18:43 +01:00
|
|
|
err = error("%s pack checksum mismatch",
|
2008-05-29 23:34:50 +02:00
|
|
|
p->pack_name);
|
2020-01-30 21:32:19 +01:00
|
|
|
if (!hasheq(index_base + index_size - r->hash_algo->hexsz, pack_sig))
|
2018-02-01 03:18:43 +01:00
|
|
|
err = error("%s pack checksum does not match its index",
|
2008-05-29 23:34:50 +02:00
|
|
|
p->pack_name);
|
2006-12-23 08:34:13 +01:00
|
|
|
unuse_pack(w_curs);
|
2005-07-01 02:15:39 +02:00
|
|
|
|
|
|
|
/* Make sure everything reachable from idx is valid. Since we
|
|
|
|
* have verified that nr_objects matches between idx and pack,
|
|
|
|
* we do not do scan-streaming check on the pack file.
|
|
|
|
*/
|
2007-04-09 07:06:28 +02:00
|
|
|
nr_objects = p->num_objects;
|
2016-02-22 23:44:25 +01:00
|
|
|
ALLOC_ARRAY(entries, nr_objects + 1);
|
2008-06-25 05:19:02 +02:00
|
|
|
entries[nr_objects].offset = pack_sig_ofs;
|
2007-06-03 20:21:41 +02:00
|
|
|
/* first sort entries by pack offset, since unpacking them is more efficient that way */
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
2008-06-25 05:17:12 +02:00
|
|
|
entries[i].offset = nth_packed_object_offset(p, i);
|
2008-06-25 05:19:02 +02:00
|
|
|
entries[i].nr = i;
|
2007-06-03 20:21:41 +02:00
|
|
|
}
|
2016-09-29 17:27:31 +02:00
|
|
|
QSORT(entries, nr_objects, compare_entries);
|
2007-06-03 20:21:41 +02:00
|
|
|
|
2008-05-29 23:34:50 +02:00
|
|
|
for (i = 0; i < nr_objects; i++) {
|
2005-07-01 02:15:39 +02:00
|
|
|
void *data;
|
pack-check: push oid lookup into loop
When we're checking a pack with fsck or verify-pack, we first sort the
idx entries by offset, since accessing them in pack order is more
efficient. To do so, we loop over them and fill in an array of structs
with the offset, object_id, and index position of each, sort the result,
and only then do we iterate over the sorted array and process each
entry.
In order to avoid the memory cost of storing the hash of each object, we
just store a pointer into the copy in the mmap'd pack index file. To
keep that property even as the rest of the code converted to "struct
object_id", commit 9fd750461b (Convert the verify_pack callback to
struct object_id, 2017-05-06) introduced a union in order to type-pun
the pointer-to-hash into an object_id struct.
But we can make this even simpler by observing that the sort operation
doesn't need the object id at all! We only need them one at a time while
we actually process each entry. So we can just omit the oid from the
struct entirely and load it on the fly into a local variable in the
second loop.
This gets rid of the type-punning, and lets us directly use the more
type-safe nth_packed_object_id(), simplifying the code. And as a bonus,
it saves 8 bytes of memory per object.
Note that this does mean we'll do the offset lookup for each object
before the oid lookup. The oid lookup has more safety checks in it
(e.g., for looking past p->num_objects) which in theory protected the
offset lookup. But since violating those checks was already a BUG()
condition (as described in the previous commit), it's not worth worrying
about.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-24 05:36:31 +01:00
|
|
|
struct object_id oid;
|
2007-02-26 20:55:59 +01:00
|
|
|
enum object_type type;
|
2007-03-07 02:44:30 +01:00
|
|
|
unsigned long size;
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 17:44:04 +02:00
|
|
|
off_t curpos;
|
|
|
|
int data_valid;
|
2005-07-01 02:15:39 +02:00
|
|
|
|
pack-check: push oid lookup into loop
When we're checking a pack with fsck or verify-pack, we first sort the
idx entries by offset, since accessing them in pack order is more
efficient. To do so, we loop over them and fill in an array of structs
with the offset, object_id, and index position of each, sort the result,
and only then do we iterate over the sorted array and process each
entry.
In order to avoid the memory cost of storing the hash of each object, we
just store a pointer into the copy in the mmap'd pack index file. To
keep that property even as the rest of the code converted to "struct
object_id", commit 9fd750461b (Convert the verify_pack callback to
struct object_id, 2017-05-06) introduced a union in order to type-pun
the pointer-to-hash into an object_id struct.
But we can make this even simpler by observing that the sort operation
doesn't need the object id at all! We only need them one at a time while
we actually process each entry. So we can just omit the oid from the
struct entirely and load it on the fly into a local variable in the
second loop.
This gets rid of the type-punning, and lets us directly use the more
type-safe nth_packed_object_id(), simplifying the code. And as a bonus,
it saves 8 bytes of memory per object.
Note that this does mean we'll do the offset lookup for each object
before the oid lookup. The oid lookup has more safety checks in it
(e.g., for looking past p->num_objects) which in theory protected the
offset lookup. But since violating those checks was already a BUG()
condition (as described in the previous commit), it's not worth worrying
about.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-24 05:36:31 +01:00
|
|
|
if (nth_packed_object_id(&oid, p, entries[i].nr) < 0)
|
|
|
|
BUG("unable to get oid of object %lu from %s",
|
|
|
|
(unsigned long)entries[i].nr, p->pack_name);
|
|
|
|
|
2008-06-25 05:19:02 +02:00
|
|
|
if (p->index_version > 1) {
|
|
|
|
off_t offset = entries[i].offset;
|
|
|
|
off_t len = entries[i+1].offset - offset;
|
|
|
|
unsigned int nr = entries[i].nr;
|
|
|
|
if (check_pack_crc(p, w_curs, offset, len, nr))
|
|
|
|
err = error("index CRC mismatch for object %s "
|
|
|
|
"from %s at offset %"PRIuMAX"",
|
pack-check: push oid lookup into loop
When we're checking a pack with fsck or verify-pack, we first sort the
idx entries by offset, since accessing them in pack order is more
efficient. To do so, we loop over them and fill in an array of structs
with the offset, object_id, and index position of each, sort the result,
and only then do we iterate over the sorted array and process each
entry.
In order to avoid the memory cost of storing the hash of each object, we
just store a pointer into the copy in the mmap'd pack index file. To
keep that property even as the rest of the code converted to "struct
object_id", commit 9fd750461b (Convert the verify_pack callback to
struct object_id, 2017-05-06) introduced a union in order to type-pun
the pointer-to-hash into an object_id struct.
But we can make this even simpler by observing that the sort operation
doesn't need the object id at all! We only need them one at a time while
we actually process each entry. So we can just omit the oid from the
struct entirely and load it on the fly into a local variable in the
second loop.
This gets rid of the type-punning, and lets us directly use the more
type-safe nth_packed_object_id(), simplifying the code. And as a bonus,
it saves 8 bytes of memory per object.
Note that this does mean we'll do the offset lookup for each object
before the oid lookup. The oid lookup has more safety checks in it
(e.g., for looking past p->num_objects) which in theory protected the
offset lookup. But since violating those checks was already a BUG()
condition (as described in the previous commit), it's not worth worrying
about.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-24 05:36:31 +01:00
|
|
|
oid_to_hex(&oid),
|
2008-06-25 05:19:02 +02:00
|
|
|
p->pack_name, (uintmax_t)offset);
|
|
|
|
}
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 17:44:04 +02:00
|
|
|
|
|
|
|
curpos = entries[i].offset;
|
|
|
|
type = unpack_object_header(p, w_curs, &curpos, &size);
|
|
|
|
unuse_pack(w_curs);
|
|
|
|
|
|
|
|
if (type == OBJ_BLOB && big_file_threshold <= size) {
|
|
|
|
/*
|
2018-03-12 03:27:39 +01:00
|
|
|
* Let check_object_signature() check it with
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 17:44:04 +02:00
|
|
|
* the streaming interface; no point slurping
|
|
|
|
* the data in-core only to discard.
|
|
|
|
*/
|
|
|
|
data = NULL;
|
|
|
|
data_valid = 0;
|
|
|
|
} else {
|
2018-11-10 06:49:07 +01:00
|
|
|
data = unpack_entry(r, p, entries[i].offset, &type, &size);
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 17:44:04 +02:00
|
|
|
data_valid = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data_valid && !data)
|
2008-05-29 23:34:50 +02:00
|
|
|
err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
|
pack-check: push oid lookup into loop
When we're checking a pack with fsck or verify-pack, we first sort the
idx entries by offset, since accessing them in pack order is more
efficient. To do so, we loop over them and fill in an array of structs
with the offset, object_id, and index position of each, sort the result,
and only then do we iterate over the sorted array and process each
entry.
In order to avoid the memory cost of storing the hash of each object, we
just store a pointer into the copy in the mmap'd pack index file. To
keep that property even as the rest of the code converted to "struct
object_id", commit 9fd750461b (Convert the verify_pack callback to
struct object_id, 2017-05-06) introduced a union in order to type-pun
the pointer-to-hash into an object_id struct.
But we can make this even simpler by observing that the sort operation
doesn't need the object id at all! We only need them one at a time while
we actually process each entry. So we can just omit the oid from the
struct entirely and load it on the fly into a local variable in the
second loop.
This gets rid of the type-punning, and lets us directly use the more
type-safe nth_packed_object_id(), simplifying the code. And as a bonus,
it saves 8 bytes of memory per object.
Note that this does mean we'll do the offset lookup for each object
before the oid lookup. The oid lookup has more safety checks in it
(e.g., for looking past p->num_objects) which in theory protected the
offset lookup. But since violating those checks was already a BUG()
condition (as described in the previous commit), it's not worth worrying
about.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-24 05:36:31 +01:00
|
|
|
oid_to_hex(&oid), p->pack_name,
|
2008-05-29 23:34:50 +02:00
|
|
|
(uintmax_t)entries[i].offset);
|
pack-check: push oid lookup into loop
When we're checking a pack with fsck or verify-pack, we first sort the
idx entries by offset, since accessing them in pack order is more
efficient. To do so, we loop over them and fill in an array of structs
with the offset, object_id, and index position of each, sort the result,
and only then do we iterate over the sorted array and process each
entry.
In order to avoid the memory cost of storing the hash of each object, we
just store a pointer into the copy in the mmap'd pack index file. To
keep that property even as the rest of the code converted to "struct
object_id", commit 9fd750461b (Convert the verify_pack callback to
struct object_id, 2017-05-06) introduced a union in order to type-pun
the pointer-to-hash into an object_id struct.
But we can make this even simpler by observing that the sort operation
doesn't need the object id at all! We only need them one at a time while
we actually process each entry. So we can just omit the oid from the
struct entirely and load it on the fly into a local variable in the
second loop.
This gets rid of the type-punning, and lets us directly use the more
type-safe nth_packed_object_id(), simplifying the code. And as a bonus,
it saves 8 bytes of memory per object.
Note that this does mean we'll do the offset lookup for each object
before the oid lookup. The oid lookup has more safety checks in it
(e.g., for looking past p->num_objects) which in theory protected the
offset lookup. But since violating those checks was already a BUG()
condition (as described in the previous commit), it's not worth worrying
about.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-24 05:36:31 +01:00
|
|
|
else if (check_object_signature(r, &oid, data, size, type_name(type)))
|
2005-07-08 00:12:20 +02:00
|
|
|
err = error("packed %s from %s is corrupt",
|
pack-check: push oid lookup into loop
When we're checking a pack with fsck or verify-pack, we first sort the
idx entries by offset, since accessing them in pack order is more
efficient. To do so, we loop over them and fill in an array of structs
with the offset, object_id, and index position of each, sort the result,
and only then do we iterate over the sorted array and process each
entry.
In order to avoid the memory cost of storing the hash of each object, we
just store a pointer into the copy in the mmap'd pack index file. To
keep that property even as the rest of the code converted to "struct
object_id", commit 9fd750461b (Convert the verify_pack callback to
struct object_id, 2017-05-06) introduced a union in order to type-pun
the pointer-to-hash into an object_id struct.
But we can make this even simpler by observing that the sort operation
doesn't need the object id at all! We only need them one at a time while
we actually process each entry. So we can just omit the oid from the
struct entirely and load it on the fly into a local variable in the
second loop.
This gets rid of the type-punning, and lets us directly use the more
type-safe nth_packed_object_id(), simplifying the code. And as a bonus,
it saves 8 bytes of memory per object.
Note that this does mean we'll do the offset lookup for each object
before the oid lookup. The oid lookup has more safety checks in it
(e.g., for looking past p->num_objects) which in theory protected the
offset lookup. But since violating those checks was already a BUG()
condition (as described in the previous commit), it's not worth worrying
about.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-24 05:36:31 +01:00
|
|
|
oid_to_hex(&oid), p->pack_name);
|
2011-11-07 03:59:25 +01:00
|
|
|
else if (fn) {
|
|
|
|
int eaten = 0;
|
pack-check: push oid lookup into loop
When we're checking a pack with fsck or verify-pack, we first sort the
idx entries by offset, since accessing them in pack order is more
efficient. To do so, we loop over them and fill in an array of structs
with the offset, object_id, and index position of each, sort the result,
and only then do we iterate over the sorted array and process each
entry.
In order to avoid the memory cost of storing the hash of each object, we
just store a pointer into the copy in the mmap'd pack index file. To
keep that property even as the rest of the code converted to "struct
object_id", commit 9fd750461b (Convert the verify_pack callback to
struct object_id, 2017-05-06) introduced a union in order to type-pun
the pointer-to-hash into an object_id struct.
But we can make this even simpler by observing that the sort operation
doesn't need the object id at all! We only need them one at a time while
we actually process each entry. So we can just omit the oid from the
struct entirely and load it on the fly into a local variable in the
second loop.
This gets rid of the type-punning, and lets us directly use the more
type-safe nth_packed_object_id(), simplifying the code. And as a bonus,
it saves 8 bytes of memory per object.
Note that this does mean we'll do the offset lookup for each object
before the oid lookup. The oid lookup has more safety checks in it
(e.g., for looking past p->num_objects) which in theory protected the
offset lookup. But since violating those checks was already a BUG()
condition (as described in the previous commit), it's not worth worrying
about.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-24 05:36:31 +01:00
|
|
|
err |= fn(&oid, type, size, data, &eaten);
|
2011-11-07 03:59:25 +01:00
|
|
|
if (eaten)
|
|
|
|
data = NULL;
|
|
|
|
}
|
2011-11-07 03:59:26 +01:00
|
|
|
if (((base_count + i) & 1023) == 0)
|
|
|
|
display_progress(progress, base_count + i);
|
2005-07-01 02:15:39 +02:00
|
|
|
free(data);
|
2011-11-07 03:59:26 +01:00
|
|
|
|
2005-07-01 02:15:39 +02:00
|
|
|
}
|
2011-11-07 03:59:26 +01:00
|
|
|
display_progress(progress, base_count + i);
|
2007-06-03 20:21:41 +02:00
|
|
|
free(entries);
|
2005-07-01 02:15:39 +02:00
|
|
|
|
|
|
|
return err;
|
2005-06-29 11:51:27 +02:00
|
|
|
}
|
|
|
|
|
2010-04-19 16:23:07 +02:00
|
|
|
int verify_pack_index(struct packed_git *p)
|
2005-06-29 11:51:27 +02:00
|
|
|
{
|
fsck: correctly compute checksums on idx files larger than 4GB
When checking the trailing checksum hash of a .idx file, we pass the
whole buffer (minus the trailing hash) into a single call to
the_hash_algo->update_fn(). But we cast it to an "unsigned int". This
comes from c4001d92be (Use off_t when we really mean a file offset.,
2007-03-06). That commit started storing the index_size variable as an
off_t, but our mozilla-sha1 implementation from the time was limited to
a smaller size. Presumably the cast was a way of annotating that we
expected .idx files to be small, and so we didn't need to loop (as we do
for arbitrarily-large .pack files). Though as an aside it was still
wrong, because the mozilla function actually took a signed int.
These days our hash-update functions are defined to take a size_t, so we
can pass the whole buffer in directly. The cast is actually causing a
buggy truncation!
While we're here, though, let's drop the confusing off_t variable in the
first place. We're getting the size not from the filesystem anyway, but
from p->index_size, which is a size_t. In fact, we can make the code a
bit more readable by dropping our local variable duplicating
p->index_size, and instead have one that stores the size of the actual
index data, minus the trailing hash.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-13 06:07:14 +01:00
|
|
|
size_t len;
|
2007-05-26 07:24:19 +02:00
|
|
|
const unsigned char *index_base;
|
2018-02-01 03:18:43 +01:00
|
|
|
git_hash_ctx ctx;
|
|
|
|
unsigned char hash[GIT_MAX_RAWSZ];
|
2008-05-29 23:34:50 +02:00
|
|
|
int err = 0;
|
2005-06-29 11:51:27 +02:00
|
|
|
|
2007-05-26 07:24:19 +02:00
|
|
|
if (open_pack_index(p))
|
|
|
|
return error("packfile %s index not opened", p->pack_name);
|
|
|
|
index_base = p->index_data;
|
fsck: correctly compute checksums on idx files larger than 4GB
When checking the trailing checksum hash of a .idx file, we pass the
whole buffer (minus the trailing hash) into a single call to
the_hash_algo->update_fn(). But we cast it to an "unsigned int". This
comes from c4001d92be (Use off_t when we really mean a file offset.,
2007-03-06). That commit started storing the index_size variable as an
off_t, but our mozilla-sha1 implementation from the time was limited to
a smaller size. Presumably the cast was a way of annotating that we
expected .idx files to be small, and so we didn't need to loop (as we do
for arbitrarily-large .pack files). Though as an aside it was still
wrong, because the mozilla function actually took a signed int.
These days our hash-update functions are defined to take a size_t, so we
can pass the whole buffer in directly. The cast is actually causing a
buggy truncation!
While we're here, though, let's drop the confusing off_t variable in the
first place. We're getting the size not from the filesystem anyway, but
from p->index_size, which is a size_t. In fact, we can make the code a
bit more readable by dropping our local variable duplicating
p->index_size, and instead have one that stores the size of the actual
index data, minus the trailing hash.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-13 06:07:14 +01:00
|
|
|
len = p->index_size - the_hash_algo->rawsz;
|
2007-05-26 07:24:19 +02:00
|
|
|
|
2005-06-29 11:51:27 +02:00
|
|
|
/* Verify SHA1 sum of the index file */
|
2018-02-01 03:18:43 +01:00
|
|
|
the_hash_algo->init_fn(&ctx);
|
fsck: correctly compute checksums on idx files larger than 4GB
When checking the trailing checksum hash of a .idx file, we pass the
whole buffer (minus the trailing hash) into a single call to
the_hash_algo->update_fn(). But we cast it to an "unsigned int". This
comes from c4001d92be (Use off_t when we really mean a file offset.,
2007-03-06). That commit started storing the index_size variable as an
off_t, but our mozilla-sha1 implementation from the time was limited to
a smaller size. Presumably the cast was a way of annotating that we
expected .idx files to be small, and so we didn't need to loop (as we do
for arbitrarily-large .pack files). Though as an aside it was still
wrong, because the mozilla function actually took a signed int.
These days our hash-update functions are defined to take a size_t, so we
can pass the whole buffer in directly. The cast is actually causing a
buggy truncation!
While we're here, though, let's drop the confusing off_t variable in the
first place. We're getting the size not from the filesystem anyway, but
from p->index_size, which is a size_t. In fact, we can make the code a
bit more readable by dropping our local variable duplicating
p->index_size, and instead have one that stores the size of the actual
index data, minus the trailing hash.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-13 06:07:14 +01:00
|
|
|
the_hash_algo->update_fn(&ctx, index_base, len);
|
2018-02-01 03:18:43 +01:00
|
|
|
the_hash_algo->final_fn(hash, &ctx);
|
fsck: correctly compute checksums on idx files larger than 4GB
When checking the trailing checksum hash of a .idx file, we pass the
whole buffer (minus the trailing hash) into a single call to
the_hash_algo->update_fn(). But we cast it to an "unsigned int". This
comes from c4001d92be (Use off_t when we really mean a file offset.,
2007-03-06). That commit started storing the index_size variable as an
off_t, but our mozilla-sha1 implementation from the time was limited to
a smaller size. Presumably the cast was a way of annotating that we
expected .idx files to be small, and so we didn't need to loop (as we do
for arbitrarily-large .pack files). Though as an aside it was still
wrong, because the mozilla function actually took a signed int.
These days our hash-update functions are defined to take a size_t, so we
can pass the whole buffer in directly. The cast is actually causing a
buggy truncation!
While we're here, though, let's drop the confusing off_t variable in the
first place. We're getting the size not from the filesystem anyway, but
from p->index_size, which is a size_t. In fact, we can make the code a
bit more readable by dropping our local variable duplicating
p->index_size, and instead have one that stores the size of the actual
index data, minus the trailing hash.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-13 06:07:14 +01:00
|
|
|
if (!hasheq(hash, index_base + len))
|
2018-02-01 03:18:43 +01:00
|
|
|
err = error("Packfile index for %s hash mismatch",
|
2005-07-01 02:15:39 +02:00
|
|
|
p->pack_name);
|
2010-04-19 16:23:07 +02:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-11-10 06:49:07 +01:00
|
|
|
int verify_pack(struct repository *r, struct packed_git *p, verify_fn fn,
|
2011-11-07 03:59:26 +01:00
|
|
|
struct progress *progress, uint32_t base_count)
|
2010-04-19 16:23:07 +02:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct pack_window *w_curs = NULL;
|
|
|
|
|
|
|
|
err |= verify_pack_index(p);
|
|
|
|
if (!p->index_data)
|
|
|
|
return -1;
|
2005-07-01 02:15:39 +02:00
|
|
|
|
2018-11-10 06:49:07 +01:00
|
|
|
err |= verify_packfile(r, p, &w_curs, fn, progress, base_count);
|
2008-05-29 23:34:50 +02:00
|
|
|
unuse_pack(&w_curs);
|
2005-07-01 02:15:39 +02:00
|
|
|
|
2008-05-29 23:34:50 +02:00
|
|
|
return err;
|
2005-06-29 11:51:27 +02:00
|
|
|
}
|