2005-06-28 23:21:02 +02:00
|
|
|
#ifndef PACK_H
|
|
|
|
#define PACK_H
|
|
|
|
|
2006-07-12 05:45:31 +02:00
|
|
|
#include "object.h"
|
2011-10-28 20:40:48 +02:00
|
|
|
#include "csum-file.h"
|
2005-06-28 23:21:02 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Packed object header
|
|
|
|
*/
|
|
|
|
#define PACK_SIGNATURE 0x5041434b /* "PACK" */
|
2006-10-15 08:37:41 +02:00
|
|
|
#define PACK_VERSION 2
|
2006-02-09 23:50:04 +01:00
|
|
|
#define pack_version_ok(v) ((v) == htonl(2) || (v) == htonl(3))
|
2005-06-28 23:21:02 +02:00
|
|
|
struct pack_header {
|
2007-01-17 09:07:23 +01:00
|
|
|
uint32_t hdr_signature;
|
|
|
|
uint32_t hdr_version;
|
|
|
|
uint32_t hdr_entries;
|
2005-06-28 23:21:02 +02:00
|
|
|
};
|
|
|
|
|
2007-01-18 02:43:57 +01:00
|
|
|
/*
|
2007-03-16 21:42:50 +01:00
|
|
|
* The first four bytes of index formats later than version 1 should
|
|
|
|
* start with this signature, as all older git binaries would find this
|
|
|
|
* value illegal and abort reading the file.
|
2007-01-18 02:43:57 +01:00
|
|
|
*
|
|
|
|
* This is the case because the number of objects in a packfile
|
|
|
|
* cannot exceed 1,431,660,000 as every object would need at least
|
2007-03-16 21:42:50 +01:00
|
|
|
* 3 bytes of data and the overall packfile cannot exceed 4 GiB with
|
|
|
|
* version 1 of the index file due to the offsets limited to 32 bits.
|
|
|
|
* Clearly the signature exceeds this maximum.
|
2007-01-18 02:43:57 +01:00
|
|
|
*
|
|
|
|
* Very old git binaries will also compare the first 4 bytes to the
|
|
|
|
* next 4 bytes in the index and abort with a "non-monotonic index"
|
|
|
|
* error if the second 4 byte word is smaller than the first 4
|
|
|
|
* byte word. This would be true in the proposed future index
|
|
|
|
* format as idx_signature would be greater than idx_version.
|
|
|
|
*/
|
|
|
|
#define PACK_IDX_SIGNATURE 0xff744f63 /* "\377tOc" */
|
|
|
|
|
2011-02-26 00:43:25 +01:00
|
|
|
struct pack_idx_option {
|
2011-02-03 02:29:01 +01:00
|
|
|
unsigned flags;
|
|
|
|
/* flag bits */
|
2011-11-17 07:04:13 +01:00
|
|
|
#define WRITE_IDX_VERIFY 01 /* verify only, do not write the idx file */
|
|
|
|
#define WRITE_IDX_STRICT 02
|
2011-02-03 02:29:01 +01:00
|
|
|
|
2011-02-26 00:43:25 +01:00
|
|
|
uint32_t version;
|
|
|
|
uint32_t off32_limit;
|
2011-02-26 01:55:26 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* List of offsets that would fit within off32_limit but
|
|
|
|
* need to be written out as 64-bit entity for byte-for-byte
|
|
|
|
* verification.
|
|
|
|
*/
|
|
|
|
int anomaly_alloc, anomaly_nr;
|
|
|
|
uint32_t *anomaly;
|
2011-02-26 00:43:25 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
extern void reset_pack_idx_option(struct pack_idx_option *);
|
2007-06-01 21:18:05 +02:00
|
|
|
|
2007-03-16 21:42:50 +01:00
|
|
|
/*
|
|
|
|
* Packed object index header
|
|
|
|
*/
|
|
|
|
struct pack_idx_header {
|
|
|
|
uint32_t idx_signature;
|
|
|
|
uint32_t idx_version;
|
|
|
|
};
|
|
|
|
|
2007-06-01 21:18:05 +02:00
|
|
|
/*
|
|
|
|
* Common part of object structure used for write_idx_file
|
|
|
|
*/
|
|
|
|
struct pack_idx_entry {
|
|
|
|
unsigned char sha1[20];
|
|
|
|
uint32_t crc32;
|
|
|
|
off_t offset;
|
|
|
|
};
|
|
|
|
|
2011-11-07 03:59:25 +01:00
|
|
|
|
2011-11-07 03:59:26 +01:00
|
|
|
struct progress;
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 17:44:04 +02:00
|
|
|
/* Note, the data argument could be NULL if object type is blob */
|
2011-11-07 03:59:25 +01:00
|
|
|
typedef int (*verify_fn)(const unsigned char*, enum object_type, unsigned long, void*, int*);
|
|
|
|
|
pack-objects: name pack files after trailer hash
Our current scheme for naming packfiles is to calculate the
sha1 hash of the sorted list of objects contained in the
packfile. This gives us a unique name, so we are reasonably
sure that two packs with the same name will contain the same
objects.
It does not, however, tell us that two such packs have the
exact same bytes. This makes things awkward if we repack the
same set of objects. Due to run-to-run variations, the bytes
may not be identical (e.g., changed zlib or git versions,
different source object reuse due to new packs in the
repository, or even different deltas due to races during a
multi-threaded delta search).
In theory, this could be helpful to a program that cares
that the packfile contains a certain set of objects, but
does not care about the particular representation. In
practice, no part of git makes use of that, and in many
cases it is potentially harmful. For example, if a dumb http
client fetches the .idx file, it must be sure to get the
exact .pack that matches it. Similarly, a partial transfer
of a .pack file cannot be safely resumed, as the actual
bytes may have changed. This could also affect a local
client which opened the .idx and .pack files, closes the
.pack file (due to memory or file descriptor limits), and
then re-opens a changed packfile.
In all of these cases, git can detect the problem, as we
have the sha1 of the bytes themselves in the pack trailer
(which we verify on transfer), and the .idx file references
the trailer from the matching packfile. But it would be
simpler and more efficient to actually get the correct
bytes, rather than noticing the problem and having to
restart the operation.
This patch simply uses the pack trailer sha1 as the pack
name. It should be similarly unique, but covers the exact
representation of the objects. Other parts of git should not
care, as the pack name is returned by pack-objects and is
essentially opaque.
One test needs to be updated, because it actually corrupts a
pack and expects that re-packing the corrupted bytes will
use the same name. It won't anymore, but we can easily just
use the name that pack-objects hands back.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-12-05 21:28:07 +01:00
|
|
|
extern const char *write_idx_file(const char *index_name, struct pack_idx_entry **objects, int nr_objects, const struct pack_idx_option *, const unsigned char *sha1);
|
2008-06-25 05:19:02 +02:00
|
|
|
extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
|
2010-04-19 16:23:07 +02:00
|
|
|
extern int verify_pack_index(struct packed_git *);
|
2011-11-07 03:59:26 +01:00
|
|
|
extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uint32_t);
|
2011-10-28 20:40:48 +02:00
|
|
|
extern off_t write_pack_header(struct sha1file *f, uint32_t);
|
2008-08-29 22:07:59 +02:00
|
|
|
extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
|
2007-09-14 09:31:16 +02:00
|
|
|
extern char *index_pack_lockfile(int fd);
|
2017-03-24 18:26:50 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The "hdr" output buffer should be at least this big, which will handle sizes
|
|
|
|
* up to 2^67.
|
|
|
|
*/
|
|
|
|
#define MAX_PACK_OBJECT_HEADER 10
|
encode_in_pack_object_header: respect output buffer length
The encode_in_pack_object_header() writes a variable-length
header to an output buffer, but it doesn't actually know
long the buffer is. At first glance, this looks like it
might be possible to overflow.
In practice, this is probably impossible. The smallest
buffer we use is 10 bytes, which would hold the header for
an object up to 2^67 bytes. Obviously we're not likely to
see such an object, but we might worry that an object could
lie about its size (causing us to overflow before we realize
it does not actually have that many bytes). But the argument
is passed as a uintmax_t. Even on systems that have __int128
available, uintmax_t is typically restricted to 64-bit by
the ABI.
So it's unlikely that a system exists where this could be
exploited. Still, it's easy enough to use a normal out/len
pair and make sure we don't write too far. That protects the
hypothetical 128-bit system, makes it harder for callers to
accidentally specify a too-small buffer, and makes the
resulting code easier to audit.
Note that the one caller in fast-import tried to catch such
a case, but did so _after_ the call (at which point we'd
have already overflowed!). This check can now go away.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-24 18:26:40 +01:00
|
|
|
extern int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
|
|
|
|
enum object_type, uintmax_t);
|
2007-01-23 06:55:18 +01:00
|
|
|
|
|
|
|
#define PH_ERROR_EOF (-1)
|
|
|
|
#define PH_ERROR_PACK_SIGNATURE (-2)
|
|
|
|
#define PH_ERROR_PROTOCOL (-3)
|
|
|
|
extern int read_pack_header(int fd, struct pack_header *);
|
2011-10-28 20:52:14 +02:00
|
|
|
|
|
|
|
extern struct sha1file *create_tmp_packfile(char **pack_tmp_name);
|
2014-03-03 10:24:29 +01:00
|
|
|
extern void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
|
2011-10-28 20:52:14 +02:00
|
|
|
|
2005-06-28 23:21:02 +02:00
|
|
|
#endif
|