2006-08-05 08:04:21 +02:00
|
|
|
#include "builtin.h"
|
|
|
|
#include "cache.h"
|
2018-03-23 18:20:59 +01:00
|
|
|
#include "repository.h"
|
2017-06-14 20:07:36 +02:00
|
|
|
#include "config.h"
|
2014-10-01 12:28:42 +02:00
|
|
|
#include "lockfile.h"
|
2006-08-05 08:04:21 +02:00
|
|
|
#include "object.h"
|
|
|
|
#include "blob.h"
|
2006-08-14 06:58:19 +02:00
|
|
|
#include "tree.h"
|
2007-02-06 22:08:06 +01:00
|
|
|
#include "commit.h"
|
2006-08-05 08:04:21 +02:00
|
|
|
#include "delta.h"
|
|
|
|
#include "pack.h"
|
2006-08-14 06:58:19 +02:00
|
|
|
#include "refs.h"
|
2006-08-05 08:04:21 +02:00
|
|
|
#include "csum-file.h"
|
2006-08-15 02:16:28 +02:00
|
|
|
#include "quote.h"
|
2010-10-03 11:56:46 +02:00
|
|
|
#include "dir.h"
|
2016-04-25 23:17:28 +02:00
|
|
|
#include "run-command.h"
|
2017-08-19 00:20:16 +02:00
|
|
|
#include "packfile.h"
|
2018-03-23 18:20:59 +01:00
|
|
|
#include "object-store.h"
|
2018-04-11 20:37:55 +02:00
|
|
|
#include "mem-pool.h"
|
2018-07-20 18:33:04 +02:00
|
|
|
#include "commit-reach.h"
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
#include "khash.h"
|
2006-08-05 08:04:21 +02:00
|
|
|
|
2007-01-17 08:42:43 +01:00
|
|
|
#define PACK_ID_BITS 16
|
|
|
|
#define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
|
2007-11-14 05:48:42 +01:00
|
|
|
#define DEPTH_BITS 13
|
|
|
|
#define MAX_DEPTH ((1<<DEPTH_BITS)-1)
|
2007-01-17 08:42:43 +01:00
|
|
|
|
2011-08-14 20:32:24 +02:00
|
|
|
/*
|
|
|
|
* We abuse the setuid bit on directories to mean "do not delta".
|
|
|
|
*/
|
|
|
|
#define NO_DELTA S_ISUID
|
|
|
|
|
2019-02-19 01:05:05 +01:00
|
|
|
/*
|
|
|
|
* The amount of additional space required in order to write an object into the
|
|
|
|
* current pack. This is the hash lengths at the end of the pack, plus the
|
|
|
|
* length of one object ID.
|
|
|
|
*/
|
|
|
|
#define PACK_SIZE_THRESHOLD (the_hash_algo->rawsz * 3)
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct object_entry {
|
2010-02-17 20:05:51 +01:00
|
|
|
struct pack_idx_entry idx;
|
fast-import: replace custom hash with hashmap.c
We use a custom hash in fast-import to store the set of objects we've
imported so far. It has a fixed set of 2^16 buckets and chains any
collisions with a linked list. As the number of objects grows larger
than that, the load factor increases and we degrade to O(n) lookups and
O(n^2) insertions.
We can scale better by using our hashmap.c implementation, which will
resize the bucket count as we grow. This does incur an extra memory cost
of 8 bytes per object, as hashmap stores the integer hash value for each
entry in its hashmap_entry struct (which we really don't care about
here, because we're just reusing the embedded object hash). But I think
the numbers below justify this (and our per-object memory cost is
already much higher).
I also looked at using khash, but it seemed to perform slightly worse
than hashmap at all sizes, and worse even than the existing code for
small sizes. It's also awkward to use here, because we want to look up a
"struct object_entry" from a "struct object_id", and it doesn't handle
mismatched keys as well. Making a mapping of object_id to object_entry
would be more natural, but that would require pulling the embedded oid
out of the object_entry or incurring an extra 32 bytes per object.
In a synthetic test creating as many cheap, tiny objects as possible
perl -e '
my $bits = shift;
my $nr = 2**$bits;
for (my $i = 0; $i < $nr; $i++) {
print "blob\n";
print "data 4\n";
print pack("N", $i);
}
' $bits | git fast-import
I got these results:
nr_objects master khash hashmap
2^20 0m4.317s 0m5.109s 0m3.890s
2^21 0m10.204s 0m9.702s 0m7.933s
2^22 0m27.159s 0m17.911s 0m16.751s
2^23 1m19.038s 0m35.080s 0m31.963s
2^24 4m18.766s 1m10.233s 1m6.793s
which points to hashmap as the winner. We didn't have any perf tests for
fast-export or fast-import, so I added one as a more real-world case.
It uses an export without blobs since that's significantly cheaper than
a full one, but still is an interesting case people might use (e.g., for
rewriting history). It will emphasize this change in some ways (as a
percentage we spend more time making objects and less shuffling blob
bytes around) and less in others (the total object count is lower).
Here are the results for linux.git:
Test HEAD^ HEAD
----------------------------------------------------------------------------
9300.1: export (no-blobs) 67.64(66.96+0.67) 67.81(67.06+0.75) +0.3%
9300.2: import (no-blobs) 284.04(283.34+0.69) 198.09(196.01+0.92) -30.3%
It only has ~5.2M commits and trees, so this is a larger effect than I
expected (the 2^23 case above only improved by 50s or so, but here we
gained almost 90s). This is probably due to actually performing more
object lookups in a real import with trees and commits, as opposed to
just dumping a bunch of blobs into a pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-06 21:49:40 +02:00
|
|
|
struct hashmap_entry ent;
|
2007-11-14 05:48:42 +01:00
|
|
|
uint32_t type : TYPE_BITS,
|
|
|
|
pack_id : PACK_ID_BITS,
|
|
|
|
depth : DEPTH_BITS;
|
2006-08-08 06:03:59 +02:00
|
|
|
};
|
|
|
|
|
fast-import: replace custom hash with hashmap.c
We use a custom hash in fast-import to store the set of objects we've
imported so far. It has a fixed set of 2^16 buckets and chains any
collisions with a linked list. As the number of objects grows larger
than that, the load factor increases and we degrade to O(n) lookups and
O(n^2) insertions.
We can scale better by using our hashmap.c implementation, which will
resize the bucket count as we grow. This does incur an extra memory cost
of 8 bytes per object, as hashmap stores the integer hash value for each
entry in its hashmap_entry struct (which we really don't care about
here, because we're just reusing the embedded object hash). But I think
the numbers below justify this (and our per-object memory cost is
already much higher).
I also looked at using khash, but it seemed to perform slightly worse
than hashmap at all sizes, and worse even than the existing code for
small sizes. It's also awkward to use here, because we want to look up a
"struct object_entry" from a "struct object_id", and it doesn't handle
mismatched keys as well. Making a mapping of object_id to object_entry
would be more natural, but that would require pulling the embedded oid
out of the object_entry or incurring an extra 32 bytes per object.
In a synthetic test creating as many cheap, tiny objects as possible
perl -e '
my $bits = shift;
my $nr = 2**$bits;
for (my $i = 0; $i < $nr; $i++) {
print "blob\n";
print "data 4\n";
print pack("N", $i);
}
' $bits | git fast-import
I got these results:
nr_objects master khash hashmap
2^20 0m4.317s 0m5.109s 0m3.890s
2^21 0m10.204s 0m9.702s 0m7.933s
2^22 0m27.159s 0m17.911s 0m16.751s
2^23 1m19.038s 0m35.080s 0m31.963s
2^24 4m18.766s 1m10.233s 1m6.793s
which points to hashmap as the winner. We didn't have any perf tests for
fast-export or fast-import, so I added one as a more real-world case.
It uses an export without blobs since that's significantly cheaper than
a full one, but still is an interesting case people might use (e.g., for
rewriting history). It will emphasize this change in some ways (as a
percentage we spend more time making objects and less shuffling blob
bytes around) and less in others (the total object count is lower).
Here are the results for linux.git:
Test HEAD^ HEAD
----------------------------------------------------------------------------
9300.1: export (no-blobs) 67.64(66.96+0.67) 67.81(67.06+0.75) +0.3%
9300.2: import (no-blobs) 284.04(283.34+0.69) 198.09(196.01+0.92) -30.3%
It only has ~5.2M commits and trees, so this is a larger effect than I
expected (the 2^23 case above only improved by 50s or so, but here we
gained almost 90s). This is probably due to actually performing more
object lookups in a real import with trees and commits, as opposed to
just dumping a bunch of blobs into a pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-06 21:49:40 +02:00
|
|
|
static int object_entry_hashcmp(const void *map_data,
|
|
|
|
const struct hashmap_entry *eptr,
|
|
|
|
const struct hashmap_entry *entry_or_key,
|
|
|
|
const void *keydata)
|
|
|
|
{
|
|
|
|
const struct object_id *oid = keydata;
|
|
|
|
const struct object_entry *e1, *e2;
|
|
|
|
|
|
|
|
e1 = container_of(eptr, const struct object_entry, ent);
|
|
|
|
if (oid)
|
|
|
|
return oidcmp(&e1->idx.oid, oid);
|
|
|
|
|
|
|
|
e2 = container_of(entry_or_key, const struct object_entry, ent);
|
|
|
|
return oidcmp(&e1->idx.oid, &e2->idx.oid);
|
|
|
|
}
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct object_entry_pool {
|
2006-08-14 06:58:19 +02:00
|
|
|
struct object_entry_pool *next_pool;
|
2006-08-08 06:03:59 +02:00
|
|
|
struct object_entry *next_free;
|
|
|
|
struct object_entry *end;
|
2006-08-08 06:46:13 +02:00
|
|
|
struct object_entry entries[FLEX_ARRAY]; /* more */
|
2006-08-08 06:03:59 +02:00
|
|
|
};
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct mark_set {
|
2006-08-23 10:17:45 +02:00
|
|
|
union {
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
struct object_id *oids[1024];
|
2006-08-23 10:17:45 +02:00
|
|
|
struct object_entry *marked[1024];
|
|
|
|
struct mark_set *sets[1024];
|
|
|
|
} data;
|
2007-01-17 06:57:23 +01:00
|
|
|
unsigned int shift;
|
2006-08-23 10:17:45 +02:00
|
|
|
};
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct last_object {
|
2007-09-17 14:00:38 +02:00
|
|
|
struct strbuf data;
|
2010-02-17 20:05:54 +01:00
|
|
|
off_t offset;
|
2006-08-08 09:36:45 +02:00
|
|
|
unsigned int depth;
|
2007-09-17 14:00:38 +02:00
|
|
|
unsigned no_swap : 1;
|
2006-08-08 06:46:13 +02:00
|
|
|
};
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct atom_str {
|
2006-08-14 06:58:19 +02:00
|
|
|
struct atom_str *next_atom;
|
2007-02-05 22:34:56 +01:00
|
|
|
unsigned short str_len;
|
2006-08-14 06:58:19 +02:00
|
|
|
char str_dat[FLEX_ARRAY]; /* more */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct tree_content;
|
2011-03-16 08:08:34 +01:00
|
|
|
struct tree_entry {
|
2006-08-14 06:58:19 +02:00
|
|
|
struct tree_content *tree;
|
2009-05-01 11:06:36 +02:00
|
|
|
struct atom_str *name;
|
2011-03-16 08:08:34 +01:00
|
|
|
struct tree_entry_ms {
|
2007-02-05 22:34:56 +01:00
|
|
|
uint16_t mode;
|
2017-05-01 04:29:03 +02:00
|
|
|
struct object_id oid;
|
2006-08-28 18:22:50 +02:00
|
|
|
} versions[2];
|
2006-08-08 09:36:45 +02:00
|
|
|
};
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct tree_content {
|
2006-08-14 06:58:19 +02:00
|
|
|
unsigned int entry_capacity; /* must match avail_tree_content */
|
|
|
|
unsigned int entry_count;
|
2006-08-28 18:22:50 +02:00
|
|
|
unsigned int delta_depth;
|
2006-08-14 06:58:19 +02:00
|
|
|
struct tree_entry *entries[FLEX_ARRAY]; /* more */
|
|
|
|
};
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct avail_tree_content {
|
2006-08-14 06:58:19 +02:00
|
|
|
unsigned int entry_capacity; /* must match tree_content */
|
|
|
|
struct avail_tree_content *next_avail;
|
2006-08-08 09:36:45 +02:00
|
|
|
};
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct branch {
|
2006-08-14 06:58:19 +02:00
|
|
|
struct branch *table_next_branch;
|
|
|
|
struct branch *active_next_branch;
|
2006-08-08 09:36:45 +02:00
|
|
|
const char *name;
|
2006-08-14 06:58:19 +02:00
|
|
|
struct tree_entry branch_tree;
|
2007-01-17 08:42:43 +01:00
|
|
|
uintmax_t last_commit;
|
2009-12-07 12:27:24 +01:00
|
|
|
uintmax_t num_notes;
|
2007-03-05 18:31:09 +01:00
|
|
|
unsigned active : 1;
|
2014-04-20 20:59:27 +02:00
|
|
|
unsigned delete : 1;
|
2007-03-05 18:31:09 +01:00
|
|
|
unsigned pack_id : PACK_ID_BITS;
|
2017-05-01 04:29:03 +02:00
|
|
|
struct object_id oid;
|
2006-08-08 09:36:45 +02:00
|
|
|
};
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct tag {
|
2006-08-24 09:12:13 +02:00
|
|
|
struct tag *next_tag;
|
|
|
|
const char *name;
|
2007-01-16 22:18:44 +01:00
|
|
|
unsigned int pack_id;
|
2017-05-01 04:29:03 +02:00
|
|
|
struct object_id oid;
|
2006-08-24 09:12:13 +02:00
|
|
|
};
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct hash_list {
|
2007-01-12 04:21:38 +01:00
|
|
|
struct hash_list *next;
|
2017-05-01 04:29:03 +02:00
|
|
|
struct object_id oid;
|
2007-01-12 04:21:38 +01:00
|
|
|
};
|
2006-08-14 06:58:19 +02:00
|
|
|
|
2007-02-06 20:58:30 +01:00
|
|
|
typedef enum {
|
|
|
|
WHENSPEC_RAW = 1,
|
2020-05-30 22:25:57 +02:00
|
|
|
WHENSPEC_RAW_PERMISSIVE,
|
2007-02-06 20:58:30 +01:00
|
|
|
WHENSPEC_RFC2822,
|
2010-05-14 11:31:35 +02:00
|
|
|
WHENSPEC_NOW
|
2007-02-06 20:58:30 +01:00
|
|
|
} whenspec_type;
|
|
|
|
|
2011-03-16 08:08:34 +01:00
|
|
|
struct recent_command {
|
2007-08-03 10:47:04 +02:00
|
|
|
struct recent_command *prev;
|
|
|
|
struct recent_command *next;
|
|
|
|
char *buf;
|
|
|
|
};
|
|
|
|
|
2020-02-22 21:17:46 +01:00
|
|
|
typedef void (*mark_set_inserter_t)(struct mark_set *s, struct object_id *oid, uintmax_t mark);
|
2020-02-22 21:17:48 +01:00
|
|
|
typedef void (*each_mark_fn_t)(uintmax_t mark, void *obj, void *cbp);
|
2020-02-22 21:17:46 +01:00
|
|
|
|
2007-01-16 06:33:19 +01:00
|
|
|
/* Configured limits on output */
|
2017-06-08 07:34:36 +02:00
|
|
|
static unsigned long max_depth = 50;
|
2010-02-17 20:05:54 +01:00
|
|
|
static off_t max_packsize;
|
2016-04-25 23:17:28 +02:00
|
|
|
static int unpack_limit = 100;
|
2007-02-06 22:08:06 +01:00
|
|
|
static int force_update;
|
2007-01-16 06:33:19 +01:00
|
|
|
|
|
|
|
/* Stats and misc. counters */
|
|
|
|
static uintmax_t alloc_count;
|
|
|
|
static uintmax_t marks_set_count;
|
|
|
|
static uintmax_t object_count_by_type[1 << TYPE_BITS];
|
|
|
|
static uintmax_t duplicate_count_by_type[1 << TYPE_BITS];
|
|
|
|
static uintmax_t delta_count_by_type[1 << TYPE_BITS];
|
2011-08-20 21:04:11 +02:00
|
|
|
static uintmax_t delta_count_attempts_by_type[1 << TYPE_BITS];
|
2007-01-16 10:55:41 +01:00
|
|
|
static unsigned long object_count;
|
2006-08-08 09:36:45 +02:00
|
|
|
static unsigned long branch_count;
|
2006-08-23 10:31:12 +02:00
|
|
|
static unsigned long branch_load_count;
|
2007-02-06 22:08:06 +01:00
|
|
|
static int failure;
|
2007-02-12 01:45:56 +01:00
|
|
|
static FILE *pack_edges;
|
2009-12-04 18:06:54 +01:00
|
|
|
static unsigned int show_stats = 1;
|
2009-12-04 18:06:57 +01:00
|
|
|
static int global_argc;
|
add an extra level of indirection to main()
There are certain startup tasks that we expect every git
process to do. In some cases this is just to improve the
quality of the program (e.g., setting up gettext()). In
others it is a requirement for using certain functions in
libgit.a (e.g., system_path() expects that you have called
git_extract_argv0_path()).
Most commands are builtins and are covered by the git.c
version of main(). However, there are still a few external
commands that use their own main(). Each of these has to
remember to include the correct startup sequence, and we are
not always consistent.
Rather than just fix the inconsistencies, let's make this
harder to get wrong by providing a common main() that can
run this standard startup.
We basically have two options to do this:
- the compat/mingw.h file already does something like this by
adding a #define that replaces the definition of main with a
wrapper that calls mingw_startup().
The upside is that the code in each program doesn't need
to be changed at all; it's rewritten on the fly by the
preprocessor.
The downside is that it may make debugging of the startup
sequence a bit more confusing, as the preprocessor is
quietly inserting new code.
- the builtin functions are all of the form cmd_foo(),
and git.c's main() calls them.
This is much more explicit, which may make things more
obvious to somebody reading the code. It's also more
flexible (because of course we have to figure out _which_
cmd_foo() to call).
The downside is that each of the builtins must define
cmd_foo(), instead of just main().
This patch chooses the latter option, preferring the more
explicit approach, even though it is more invasive. We
introduce a new file common-main.c, with the "real" main. It
expects to call cmd_main() from whatever other objects it is
linked against.
We link common-main.o against anything that links against
libgit.a, since we know that such programs will need to do
this setup. Note that common-main.o can't actually go inside
libgit.a, as the linker would not pick up its main()
function automatically (it has no callers).
The rest of the patch is just adjusting all of the various
external programs (mostly in t/helper) to use cmd_main().
I've provided a global declaration for cmd_main(), which
means that all of the programs also need to match its
signature. In particular, many functions need to switch to
"const char **" instead of "char **" for argv. This effect
ripples out to a few other variables and functions, as well.
This makes the patch even more invasive, but the end result
is much better. We should be treating argv strings as const
anyway, and now all programs conform to the same signature
(which also matches the way builtins are defined).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-01 07:58:58 +02:00
|
|
|
static const char **global_argv;
|
2006-08-08 06:46:13 +02:00
|
|
|
|
2006-08-14 06:58:19 +02:00
|
|
|
/* Memory pools */
|
2018-04-11 20:37:54 +02:00
|
|
|
static struct mem_pool fi_mem_pool = {NULL, 2*1024*1024 -
|
|
|
|
sizeof(struct mp_block), 0 };
|
2006-08-14 06:58:19 +02:00
|
|
|
|
2006-08-15 02:16:28 +02:00
|
|
|
/* Atom management */
|
2006-08-14 06:58:19 +02:00
|
|
|
static unsigned int atom_table_sz = 4451;
|
|
|
|
static unsigned int atom_cnt;
|
|
|
|
static struct atom_str **atom_table;
|
|
|
|
|
|
|
|
/* The .pack file being generated */
|
2011-02-26 00:43:25 +01:00
|
|
|
static struct pack_idx_option pack_idx_opts;
|
2007-01-15 12:35:41 +01:00
|
|
|
static unsigned int pack_id;
|
2018-02-01 03:18:46 +01:00
|
|
|
static struct hashfile *pack_file;
|
2007-01-14 12:20:23 +01:00
|
|
|
static struct packed_git *pack_data;
|
2007-01-15 12:35:41 +01:00
|
|
|
static struct packed_git **all_packs;
|
2010-02-17 20:05:54 +01:00
|
|
|
static off_t pack_size;
|
2006-08-08 06:46:13 +02:00
|
|
|
|
|
|
|
/* Table of objects we've written. */
|
2006-08-28 18:22:50 +02:00
|
|
|
static unsigned int object_entry_alloc = 5000;
|
2006-08-14 06:58:19 +02:00
|
|
|
static struct object_entry_pool *blocks;
|
fast-import: replace custom hash with hashmap.c
We use a custom hash in fast-import to store the set of objects we've
imported so far. It has a fixed set of 2^16 buckets and chains any
collisions with a linked list. As the number of objects grows larger
than that, the load factor increases and we degrade to O(n) lookups and
O(n^2) insertions.
We can scale better by using our hashmap.c implementation, which will
resize the bucket count as we grow. This does incur an extra memory cost
of 8 bytes per object, as hashmap stores the integer hash value for each
entry in its hashmap_entry struct (which we really don't care about
here, because we're just reusing the embedded object hash). But I think
the numbers below justify this (and our per-object memory cost is
already much higher).
I also looked at using khash, but it seemed to perform slightly worse
than hashmap at all sizes, and worse even than the existing code for
small sizes. It's also awkward to use here, because we want to look up a
"struct object_entry" from a "struct object_id", and it doesn't handle
mismatched keys as well. Making a mapping of object_id to object_entry
would be more natural, but that would require pulling the embedded oid
out of the object_entry or incurring an extra 32 bytes per object.
In a synthetic test creating as many cheap, tiny objects as possible
perl -e '
my $bits = shift;
my $nr = 2**$bits;
for (my $i = 0; $i < $nr; $i++) {
print "blob\n";
print "data 4\n";
print pack("N", $i);
}
' $bits | git fast-import
I got these results:
nr_objects master khash hashmap
2^20 0m4.317s 0m5.109s 0m3.890s
2^21 0m10.204s 0m9.702s 0m7.933s
2^22 0m27.159s 0m17.911s 0m16.751s
2^23 1m19.038s 0m35.080s 0m31.963s
2^24 4m18.766s 1m10.233s 1m6.793s
which points to hashmap as the winner. We didn't have any perf tests for
fast-export or fast-import, so I added one as a more real-world case.
It uses an export without blobs since that's significantly cheaper than
a full one, but still is an interesting case people might use (e.g., for
rewriting history). It will emphasize this change in some ways (as a
percentage we spend more time making objects and less shuffling blob
bytes around) and less in others (the total object count is lower).
Here are the results for linux.git:
Test HEAD^ HEAD
----------------------------------------------------------------------------
9300.1: export (no-blobs) 67.64(66.96+0.67) 67.81(67.06+0.75) +0.3%
9300.2: import (no-blobs) 284.04(283.34+0.69) 198.09(196.01+0.92) -30.3%
It only has ~5.2M commits and trees, so this is a larger effect than I
expected (the 2^23 case above only improved by 50s or so, but here we
gained almost 90s). This is probably due to actually performing more
object lookups in a real import with trees and commits, as opposed to
just dumping a bunch of blobs into a pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-06 21:49:40 +02:00
|
|
|
static struct hashmap object_table;
|
2006-08-23 10:17:45 +02:00
|
|
|
static struct mark_set *marks;
|
2009-12-04 18:06:55 +01:00
|
|
|
static const char *export_marks_file;
|
|
|
|
static const char *import_marks_file;
|
2009-12-04 18:06:59 +01:00
|
|
|
static int import_marks_file_from_stream;
|
2011-01-15 07:31:46 +01:00
|
|
|
static int import_marks_file_ignore_missing;
|
2016-05-17 23:40:23 +02:00
|
|
|
static int import_marks_file_done;
|
2009-12-04 18:07:00 +01:00
|
|
|
static int relative_marks_paths;
|
2006-08-08 06:46:13 +02:00
|
|
|
|
|
|
|
/* Our last blob */
|
2007-09-17 14:00:38 +02:00
|
|
|
static struct last_object last_blob = { STRBUF_INIT, 0, 0, 0 };
|
2006-08-14 06:58:19 +02:00
|
|
|
|
|
|
|
/* Tree management */
|
|
|
|
static unsigned int tree_entry_alloc = 1000;
|
|
|
|
static void *avail_tree_entry;
|
|
|
|
static unsigned int avail_tree_table_sz = 100;
|
|
|
|
static struct avail_tree_content **avail_tree_table;
|
2018-04-11 20:37:54 +02:00
|
|
|
static size_t tree_entry_allocd;
|
2007-09-17 13:48:17 +02:00
|
|
|
static struct strbuf old_tree = STRBUF_INIT;
|
|
|
|
static struct strbuf new_tree = STRBUF_INIT;
|
2006-08-06 19:51:39 +02:00
|
|
|
|
2006-08-08 09:36:45 +02:00
|
|
|
/* Branch data */
|
2006-08-23 08:00:31 +02:00
|
|
|
static unsigned long max_active_branches = 5;
|
|
|
|
static unsigned long cur_active_branches;
|
|
|
|
static unsigned long branch_table_sz = 1039;
|
2006-08-14 06:58:19 +02:00
|
|
|
static struct branch **branch_table;
|
|
|
|
static struct branch *active_branches;
|
|
|
|
|
2006-08-24 09:12:13 +02:00
|
|
|
/* Tag data */
|
|
|
|
static struct tag *first_tag;
|
|
|
|
static struct tag *last_tag;
|
|
|
|
|
2006-08-15 02:16:28 +02:00
|
|
|
/* Input stream parsing */
|
2007-02-06 20:58:30 +01:00
|
|
|
static whenspec_type whenspec = WHENSPEC_RAW;
|
2007-09-06 13:20:07 +02:00
|
|
|
static struct strbuf command_buf = STRBUF_INIT;
|
2007-08-01 08:22:53 +02:00
|
|
|
static int unread_command_buf;
|
2007-08-03 10:47:04 +02:00
|
|
|
static struct recent_command cmd_hist = {&cmd_hist, &cmd_hist, NULL};
|
|
|
|
static struct recent_command *cmd_tail = &cmd_hist;
|
|
|
|
static struct recent_command *rc_free;
|
|
|
|
static unsigned int cmd_save = 100;
|
2007-01-16 06:33:19 +01:00
|
|
|
static uintmax_t next_mark;
|
2007-09-17 13:48:17 +02:00
|
|
|
static struct strbuf new_data = STRBUF_INIT;
|
2009-12-04 18:06:56 +01:00
|
|
|
static int seen_data_command;
|
2011-07-16 15:03:32 +02:00
|
|
|
static int require_explicit_termination;
|
fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <peff@peff.net>
2019-08-29 20:37:26 +02:00
|
|
|
static int allow_unsafe_features;
|
2006-08-15 02:16:28 +02:00
|
|
|
|
2010-11-22 09:16:02 +01:00
|
|
|
/* Signal handling */
|
|
|
|
static volatile sig_atomic_t checkpoint_requested;
|
|
|
|
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
/* Submodule marks */
|
|
|
|
static struct string_list sub_marks_from = STRING_LIST_INIT_DUP;
|
|
|
|
static struct string_list sub_marks_to = STRING_LIST_INIT_DUP;
|
|
|
|
static kh_oid_map_t *sub_oid_map;
|
|
|
|
|
2010-11-28 20:45:01 +01:00
|
|
|
/* Where to write output of cat-blob commands */
|
|
|
|
static int cat_blob_fd = STDOUT_FILENO;
|
|
|
|
|
2009-12-04 18:06:57 +01:00
|
|
|
static void parse_argv(void);
|
2015-07-01 17:05:58 +02:00
|
|
|
static void parse_get_mark(const char *p);
|
2014-06-18 21:49:12 +02:00
|
|
|
static void parse_cat_blob(const char *p);
|
|
|
|
static void parse_ls(const char *p, struct branch *b);
|
2006-08-15 02:16:28 +02:00
|
|
|
|
2020-02-22 21:17:48 +01:00
|
|
|
static void for_each_mark(struct mark_set *m, uintmax_t base, each_mark_fn_t callback, void *p)
|
|
|
|
{
|
|
|
|
uintmax_t k;
|
|
|
|
if (m->shift) {
|
|
|
|
for (k = 0; k < 1024; k++) {
|
|
|
|
if (m->data.sets[k])
|
|
|
|
for_each_mark(m->data.sets[k], base + (k << m->shift), callback, p);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (k = 0; k < 1024; k++) {
|
|
|
|
if (m->data.marked[k])
|
|
|
|
callback(base + k, m->data.marked[k], p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_marks_fn(uintmax_t mark, void *object, void *cbp) {
|
|
|
|
struct object_entry *e = object;
|
|
|
|
FILE *f = cbp;
|
|
|
|
|
|
|
|
fprintf(f, ":%" PRIuMAX " %s\n", mark, oid_to_hex(&e->idx.oid));
|
|
|
|
}
|
|
|
|
|
2007-08-03 08:00:37 +02:00
|
|
|
static void write_branch_report(FILE *rpt, struct branch *b)
|
|
|
|
{
|
|
|
|
fprintf(rpt, "%s:\n", b->name);
|
|
|
|
|
|
|
|
fprintf(rpt, " status :");
|
|
|
|
if (b->active)
|
|
|
|
fputs(" active", rpt);
|
|
|
|
if (b->branch_tree.tree)
|
|
|
|
fputs(" loaded", rpt);
|
2017-05-01 04:29:03 +02:00
|
|
|
if (is_null_oid(&b->branch_tree.versions[1].oid))
|
2007-08-03 08:00:37 +02:00
|
|
|
fputs(" dirty", rpt);
|
|
|
|
fputc('\n', rpt);
|
|
|
|
|
2017-05-01 04:29:03 +02:00
|
|
|
fprintf(rpt, " tip commit : %s\n", oid_to_hex(&b->oid));
|
|
|
|
fprintf(rpt, " old tree : %s\n",
|
|
|
|
oid_to_hex(&b->branch_tree.versions[0].oid));
|
|
|
|
fprintf(rpt, " cur tree : %s\n",
|
|
|
|
oid_to_hex(&b->branch_tree.versions[1].oid));
|
2007-08-03 08:00:37 +02:00
|
|
|
fprintf(rpt, " commit clock: %" PRIuMAX "\n", b->last_commit);
|
|
|
|
|
|
|
|
fputs(" last pack : ", rpt);
|
|
|
|
if (b->pack_id < MAX_PACK_ID)
|
|
|
|
fprintf(rpt, "%u", b->pack_id);
|
|
|
|
fputc('\n', rpt);
|
|
|
|
|
|
|
|
fputc('\n', rpt);
|
|
|
|
}
|
|
|
|
|
2007-08-21 05:38:14 +02:00
|
|
|
static void write_crash_report(const char *err)
|
2007-08-03 08:00:37 +02:00
|
|
|
{
|
2015-08-10 11:35:31 +02:00
|
|
|
char *loc = git_pathdup("fast_import_crash_%"PRIuMAX, (uintmax_t) getpid());
|
2007-08-03 08:00:37 +02:00
|
|
|
FILE *rpt = fopen(loc, "w");
|
|
|
|
struct branch *b;
|
|
|
|
unsigned long lu;
|
2007-08-03 10:47:04 +02:00
|
|
|
struct recent_command *rc;
|
2007-08-03 08:00:37 +02:00
|
|
|
|
|
|
|
if (!rpt) {
|
2016-05-08 11:47:45 +02:00
|
|
|
error_errno("can't write crash report %s", loc);
|
2015-08-10 11:35:31 +02:00
|
|
|
free(loc);
|
2007-08-03 08:00:37 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(stderr, "fast-import: dumping crash report to %s\n", loc);
|
|
|
|
|
|
|
|
fprintf(rpt, "fast-import crash report:\n");
|
2008-08-31 14:09:39 +02:00
|
|
|
fprintf(rpt, " fast-import process: %"PRIuMAX"\n", (uintmax_t) getpid());
|
|
|
|
fprintf(rpt, " parent process : %"PRIuMAX"\n", (uintmax_t) getppid());
|
2015-09-03 23:48:55 +02:00
|
|
|
fprintf(rpt, " at %s\n", show_date(time(NULL), 0, DATE_MODE(ISO8601)));
|
2007-08-03 08:00:37 +02:00
|
|
|
fputc('\n', rpt);
|
|
|
|
|
|
|
|
fputs("fatal: ", rpt);
|
2007-08-21 05:38:14 +02:00
|
|
|
fputs(err, rpt);
|
2007-08-03 08:00:37 +02:00
|
|
|
fputc('\n', rpt);
|
|
|
|
|
2007-08-03 10:47:04 +02:00
|
|
|
fputc('\n', rpt);
|
|
|
|
fputs("Most Recent Commands Before Crash\n", rpt);
|
|
|
|
fputs("---------------------------------\n", rpt);
|
|
|
|
for (rc = cmd_hist.next; rc != &cmd_hist; rc = rc->next) {
|
|
|
|
if (rc->next == &cmd_hist)
|
|
|
|
fputs("* ", rpt);
|
|
|
|
else
|
|
|
|
fputs(" ", rpt);
|
|
|
|
fputs(rc->buf, rpt);
|
|
|
|
fputc('\n', rpt);
|
|
|
|
}
|
|
|
|
|
2007-08-03 08:00:37 +02:00
|
|
|
fputc('\n', rpt);
|
|
|
|
fputs("Active Branch LRU\n", rpt);
|
|
|
|
fputs("-----------------\n", rpt);
|
|
|
|
fprintf(rpt, " active_branches = %lu cur, %lu max\n",
|
|
|
|
cur_active_branches,
|
|
|
|
max_active_branches);
|
|
|
|
fputc('\n', rpt);
|
|
|
|
fputs(" pos clock name\n", rpt);
|
|
|
|
fputs(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", rpt);
|
|
|
|
for (b = active_branches, lu = 0; b; b = b->active_next_branch)
|
|
|
|
fprintf(rpt, " %2lu) %6" PRIuMAX" %s\n",
|
|
|
|
++lu, b->last_commit, b->name);
|
|
|
|
|
|
|
|
fputc('\n', rpt);
|
|
|
|
fputs("Inactive Branches\n", rpt);
|
|
|
|
fputs("-----------------\n", rpt);
|
|
|
|
for (lu = 0; lu < branch_table_sz; lu++) {
|
|
|
|
for (b = branch_table[lu]; b; b = b->table_next_branch)
|
|
|
|
write_branch_report(rpt, b);
|
|
|
|
}
|
|
|
|
|
2008-02-14 07:34:36 +01:00
|
|
|
if (first_tag) {
|
|
|
|
struct tag *tg;
|
|
|
|
fputc('\n', rpt);
|
|
|
|
fputs("Annotated Tags\n", rpt);
|
|
|
|
fputs("--------------\n", rpt);
|
|
|
|
for (tg = first_tag; tg; tg = tg->next_tag) {
|
2017-05-01 04:29:03 +02:00
|
|
|
fputs(oid_to_hex(&tg->oid), rpt);
|
2008-02-14 07:34:36 +01:00
|
|
|
fputc(' ', rpt);
|
|
|
|
fputs(tg->name, rpt);
|
|
|
|
fputc('\n', rpt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-14 07:34:40 +01:00
|
|
|
fputc('\n', rpt);
|
|
|
|
fputs("Marks\n", rpt);
|
|
|
|
fputs("-----\n", rpt);
|
2009-12-04 18:06:55 +01:00
|
|
|
if (export_marks_file)
|
|
|
|
fprintf(rpt, " exported to %s\n", export_marks_file);
|
2008-02-14 07:34:40 +01:00
|
|
|
else
|
2020-02-22 21:17:48 +01:00
|
|
|
for_each_mark(marks, 0, dump_marks_fn, rpt);
|
2008-02-14 07:34:40 +01:00
|
|
|
|
2007-08-03 08:00:37 +02:00
|
|
|
fputc('\n', rpt);
|
|
|
|
fputs("-------------------\n", rpt);
|
|
|
|
fputs("END OF CRASH REPORT\n", rpt);
|
|
|
|
fclose(rpt);
|
2015-08-10 11:35:31 +02:00
|
|
|
free(loc);
|
2007-08-03 08:00:37 +02:00
|
|
|
}
|
|
|
|
|
2008-02-14 07:34:43 +01:00
|
|
|
static void end_packfile(void);
|
|
|
|
static void unkeep_all_packs(void);
|
|
|
|
static void dump_marks(void);
|
|
|
|
|
2007-08-03 08:00:37 +02:00
|
|
|
static NORETURN void die_nicely(const char *err, va_list params)
|
|
|
|
{
|
|
|
|
static int zombie;
|
2007-08-21 05:38:14 +02:00
|
|
|
char message[2 * PATH_MAX];
|
2007-08-03 08:00:37 +02:00
|
|
|
|
2007-08-21 05:38:14 +02:00
|
|
|
vsnprintf(message, sizeof(message), err, params);
|
2007-08-03 08:00:37 +02:00
|
|
|
fputs("fatal: ", stderr);
|
2007-08-21 05:38:14 +02:00
|
|
|
fputs(message, stderr);
|
2007-08-03 08:00:37 +02:00
|
|
|
fputc('\n', stderr);
|
|
|
|
|
|
|
|
if (!zombie) {
|
|
|
|
zombie = 1;
|
2007-08-21 05:38:14 +02:00
|
|
|
write_crash_report(message);
|
2008-02-14 07:34:43 +01:00
|
|
|
end_packfile();
|
|
|
|
unkeep_all_packs();
|
|
|
|
dump_marks();
|
2007-08-03 08:00:37 +02:00
|
|
|
}
|
|
|
|
exit(128);
|
|
|
|
}
|
2006-08-08 09:36:45 +02:00
|
|
|
|
2010-11-22 09:16:02 +01:00
|
|
|
#ifndef SIGUSR1 /* Windows, for example */
|
|
|
|
|
|
|
|
static void set_checkpoint_signal(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static void checkpoint_signal(int signo)
|
|
|
|
{
|
|
|
|
checkpoint_requested = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_checkpoint_signal(void)
|
|
|
|
{
|
|
|
|
struct sigaction sa;
|
|
|
|
|
|
|
|
memset(&sa, 0, sizeof(sa));
|
|
|
|
sa.sa_handler = checkpoint_signal;
|
|
|
|
sigemptyset(&sa.sa_mask);
|
|
|
|
sa.sa_flags = SA_RESTART;
|
|
|
|
sigaction(SIGUSR1, &sa, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2007-01-15 06:16:23 +01:00
|
|
|
static void alloc_objects(unsigned int cnt)
|
2006-08-06 19:51:39 +02:00
|
|
|
{
|
2006-08-14 06:58:19 +02:00
|
|
|
struct object_entry_pool *b;
|
2006-08-08 06:03:59 +02:00
|
|
|
|
2006-08-14 06:58:19 +02:00
|
|
|
b = xmalloc(sizeof(struct object_entry_pool)
|
2006-08-08 06:03:59 +02:00
|
|
|
+ cnt * sizeof(struct object_entry));
|
2006-08-14 06:58:19 +02:00
|
|
|
b->next_pool = blocks;
|
2006-08-08 06:03:59 +02:00
|
|
|
b->next_free = b->entries;
|
|
|
|
b->end = b->entries + cnt;
|
|
|
|
blocks = b;
|
|
|
|
alloc_count += cnt;
|
|
|
|
}
|
2006-08-06 19:51:39 +02:00
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
static struct object_entry *new_object(struct object_id *oid)
|
2006-08-06 19:51:39 +02:00
|
|
|
{
|
2006-08-08 06:03:59 +02:00
|
|
|
struct object_entry *e;
|
2006-08-06 19:51:39 +02:00
|
|
|
|
2006-08-08 06:03:59 +02:00
|
|
|
if (blocks->next_free == blocks->end)
|
2006-08-14 06:58:19 +02:00
|
|
|
alloc_objects(object_entry_alloc);
|
2006-08-06 19:51:39 +02:00
|
|
|
|
2006-08-08 06:03:59 +02:00
|
|
|
e = blocks->next_free++;
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&e->idx.oid, oid);
|
2006-08-08 06:03:59 +02:00
|
|
|
return e;
|
2006-08-06 19:51:39 +02:00
|
|
|
}
|
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
static struct object_entry *find_object(struct object_id *oid)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
fast-import: replace custom hash with hashmap.c
We use a custom hash in fast-import to store the set of objects we've
imported so far. It has a fixed set of 2^16 buckets and chains any
collisions with a linked list. As the number of objects grows larger
than that, the load factor increases and we degrade to O(n) lookups and
O(n^2) insertions.
We can scale better by using our hashmap.c implementation, which will
resize the bucket count as we grow. This does incur an extra memory cost
of 8 bytes per object, as hashmap stores the integer hash value for each
entry in its hashmap_entry struct (which we really don't care about
here, because we're just reusing the embedded object hash). But I think
the numbers below justify this (and our per-object memory cost is
already much higher).
I also looked at using khash, but it seemed to perform slightly worse
than hashmap at all sizes, and worse even than the existing code for
small sizes. It's also awkward to use here, because we want to look up a
"struct object_entry" from a "struct object_id", and it doesn't handle
mismatched keys as well. Making a mapping of object_id to object_entry
would be more natural, but that would require pulling the embedded oid
out of the object_entry or incurring an extra 32 bytes per object.
In a synthetic test creating as many cheap, tiny objects as possible
perl -e '
my $bits = shift;
my $nr = 2**$bits;
for (my $i = 0; $i < $nr; $i++) {
print "blob\n";
print "data 4\n";
print pack("N", $i);
}
' $bits | git fast-import
I got these results:
nr_objects master khash hashmap
2^20 0m4.317s 0m5.109s 0m3.890s
2^21 0m10.204s 0m9.702s 0m7.933s
2^22 0m27.159s 0m17.911s 0m16.751s
2^23 1m19.038s 0m35.080s 0m31.963s
2^24 4m18.766s 1m10.233s 1m6.793s
which points to hashmap as the winner. We didn't have any perf tests for
fast-export or fast-import, so I added one as a more real-world case.
It uses an export without blobs since that's significantly cheaper than
a full one, but still is an interesting case people might use (e.g., for
rewriting history). It will emphasize this change in some ways (as a
percentage we spend more time making objects and less shuffling blob
bytes around) and less in others (the total object count is lower).
Here are the results for linux.git:
Test HEAD^ HEAD
----------------------------------------------------------------------------
9300.1: export (no-blobs) 67.64(66.96+0.67) 67.81(67.06+0.75) +0.3%
9300.2: import (no-blobs) 284.04(283.34+0.69) 198.09(196.01+0.92) -30.3%
It only has ~5.2M commits and trees, so this is a larger effect than I
expected (the 2^23 case above only improved by 50s or so, but here we
gained almost 90s). This is probably due to actually performing more
object lookups in a real import with trees and commits, as opposed to
just dumping a bunch of blobs into a pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-06 21:49:40 +02:00
|
|
|
return hashmap_get_entry_from_hash(&object_table, oidhash(oid), oid,
|
|
|
|
struct object_entry, ent);
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
static struct object_entry *insert_object(struct object_id *oid)
|
2006-08-06 19:51:39 +02:00
|
|
|
{
|
fast-import: replace custom hash with hashmap.c
We use a custom hash in fast-import to store the set of objects we've
imported so far. It has a fixed set of 2^16 buckets and chains any
collisions with a linked list. As the number of objects grows larger
than that, the load factor increases and we degrade to O(n) lookups and
O(n^2) insertions.
We can scale better by using our hashmap.c implementation, which will
resize the bucket count as we grow. This does incur an extra memory cost
of 8 bytes per object, as hashmap stores the integer hash value for each
entry in its hashmap_entry struct (which we really don't care about
here, because we're just reusing the embedded object hash). But I think
the numbers below justify this (and our per-object memory cost is
already much higher).
I also looked at using khash, but it seemed to perform slightly worse
than hashmap at all sizes, and worse even than the existing code for
small sizes. It's also awkward to use here, because we want to look up a
"struct object_entry" from a "struct object_id", and it doesn't handle
mismatched keys as well. Making a mapping of object_id to object_entry
would be more natural, but that would require pulling the embedded oid
out of the object_entry or incurring an extra 32 bytes per object.
In a synthetic test creating as many cheap, tiny objects as possible
perl -e '
my $bits = shift;
my $nr = 2**$bits;
for (my $i = 0; $i < $nr; $i++) {
print "blob\n";
print "data 4\n";
print pack("N", $i);
}
' $bits | git fast-import
I got these results:
nr_objects master khash hashmap
2^20 0m4.317s 0m5.109s 0m3.890s
2^21 0m10.204s 0m9.702s 0m7.933s
2^22 0m27.159s 0m17.911s 0m16.751s
2^23 1m19.038s 0m35.080s 0m31.963s
2^24 4m18.766s 1m10.233s 1m6.793s
which points to hashmap as the winner. We didn't have any perf tests for
fast-export or fast-import, so I added one as a more real-world case.
It uses an export without blobs since that's significantly cheaper than
a full one, but still is an interesting case people might use (e.g., for
rewriting history). It will emphasize this change in some ways (as a
percentage we spend more time making objects and less shuffling blob
bytes around) and less in others (the total object count is lower).
Here are the results for linux.git:
Test HEAD^ HEAD
----------------------------------------------------------------------------
9300.1: export (no-blobs) 67.64(66.96+0.67) 67.81(67.06+0.75) +0.3%
9300.2: import (no-blobs) 284.04(283.34+0.69) 198.09(196.01+0.92) -30.3%
It only has ~5.2M commits and trees, so this is a larger effect than I
expected (the 2^23 case above only improved by 50s or so, but here we
gained almost 90s). This is probably due to actually performing more
object lookups in a real import with trees and commits, as opposed to
just dumping a bunch of blobs into a pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-06 21:49:40 +02:00
|
|
|
struct object_entry *e;
|
|
|
|
unsigned int hash = oidhash(oid);
|
2006-08-06 19:51:39 +02:00
|
|
|
|
fast-import: replace custom hash with hashmap.c
We use a custom hash in fast-import to store the set of objects we've
imported so far. It has a fixed set of 2^16 buckets and chains any
collisions with a linked list. As the number of objects grows larger
than that, the load factor increases and we degrade to O(n) lookups and
O(n^2) insertions.
We can scale better by using our hashmap.c implementation, which will
resize the bucket count as we grow. This does incur an extra memory cost
of 8 bytes per object, as hashmap stores the integer hash value for each
entry in its hashmap_entry struct (which we really don't care about
here, because we're just reusing the embedded object hash). But I think
the numbers below justify this (and our per-object memory cost is
already much higher).
I also looked at using khash, but it seemed to perform slightly worse
than hashmap at all sizes, and worse even than the existing code for
small sizes. It's also awkward to use here, because we want to look up a
"struct object_entry" from a "struct object_id", and it doesn't handle
mismatched keys as well. Making a mapping of object_id to object_entry
would be more natural, but that would require pulling the embedded oid
out of the object_entry or incurring an extra 32 bytes per object.
In a synthetic test creating as many cheap, tiny objects as possible
perl -e '
my $bits = shift;
my $nr = 2**$bits;
for (my $i = 0; $i < $nr; $i++) {
print "blob\n";
print "data 4\n";
print pack("N", $i);
}
' $bits | git fast-import
I got these results:
nr_objects master khash hashmap
2^20 0m4.317s 0m5.109s 0m3.890s
2^21 0m10.204s 0m9.702s 0m7.933s
2^22 0m27.159s 0m17.911s 0m16.751s
2^23 1m19.038s 0m35.080s 0m31.963s
2^24 4m18.766s 1m10.233s 1m6.793s
which points to hashmap as the winner. We didn't have any perf tests for
fast-export or fast-import, so I added one as a more real-world case.
It uses an export without blobs since that's significantly cheaper than
a full one, but still is an interesting case people might use (e.g., for
rewriting history). It will emphasize this change in some ways (as a
percentage we spend more time making objects and less shuffling blob
bytes around) and less in others (the total object count is lower).
Here are the results for linux.git:
Test HEAD^ HEAD
----------------------------------------------------------------------------
9300.1: export (no-blobs) 67.64(66.96+0.67) 67.81(67.06+0.75) +0.3%
9300.2: import (no-blobs) 284.04(283.34+0.69) 198.09(196.01+0.92) -30.3%
It only has ~5.2M commits and trees, so this is a larger effect than I
expected (the 2^23 case above only improved by 50s or so, but here we
gained almost 90s). This is probably due to actually performing more
object lookups in a real import with trees and commits, as opposed to
just dumping a bunch of blobs into a pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-06 21:49:40 +02:00
|
|
|
e = hashmap_get_entry_from_hash(&object_table, hash, oid,
|
|
|
|
struct object_entry, ent);
|
|
|
|
if (!e) {
|
|
|
|
e = new_object(oid);
|
|
|
|
e->idx.offset = 0;
|
|
|
|
hashmap_entry_init(&e->ent, hash);
|
|
|
|
hashmap_add(&object_table, &e->ent);
|
2006-08-06 19:51:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return e;
|
|
|
|
}
|
2006-08-05 08:04:21 +02:00
|
|
|
|
2016-05-26 00:54:02 +02:00
|
|
|
static void invalidate_pack_id(unsigned int id)
|
|
|
|
{
|
|
|
|
unsigned long lu;
|
|
|
|
struct tag *t;
|
fast-import: replace custom hash with hashmap.c
We use a custom hash in fast-import to store the set of objects we've
imported so far. It has a fixed set of 2^16 buckets and chains any
collisions with a linked list. As the number of objects grows larger
than that, the load factor increases and we degrade to O(n) lookups and
O(n^2) insertions.
We can scale better by using our hashmap.c implementation, which will
resize the bucket count as we grow. This does incur an extra memory cost
of 8 bytes per object, as hashmap stores the integer hash value for each
entry in its hashmap_entry struct (which we really don't care about
here, because we're just reusing the embedded object hash). But I think
the numbers below justify this (and our per-object memory cost is
already much higher).
I also looked at using khash, but it seemed to perform slightly worse
than hashmap at all sizes, and worse even than the existing code for
small sizes. It's also awkward to use here, because we want to look up a
"struct object_entry" from a "struct object_id", and it doesn't handle
mismatched keys as well. Making a mapping of object_id to object_entry
would be more natural, but that would require pulling the embedded oid
out of the object_entry or incurring an extra 32 bytes per object.
In a synthetic test creating as many cheap, tiny objects as possible
perl -e '
my $bits = shift;
my $nr = 2**$bits;
for (my $i = 0; $i < $nr; $i++) {
print "blob\n";
print "data 4\n";
print pack("N", $i);
}
' $bits | git fast-import
I got these results:
nr_objects master khash hashmap
2^20 0m4.317s 0m5.109s 0m3.890s
2^21 0m10.204s 0m9.702s 0m7.933s
2^22 0m27.159s 0m17.911s 0m16.751s
2^23 1m19.038s 0m35.080s 0m31.963s
2^24 4m18.766s 1m10.233s 1m6.793s
which points to hashmap as the winner. We didn't have any perf tests for
fast-export or fast-import, so I added one as a more real-world case.
It uses an export without blobs since that's significantly cheaper than
a full one, but still is an interesting case people might use (e.g., for
rewriting history). It will emphasize this change in some ways (as a
percentage we spend more time making objects and less shuffling blob
bytes around) and less in others (the total object count is lower).
Here are the results for linux.git:
Test HEAD^ HEAD
----------------------------------------------------------------------------
9300.1: export (no-blobs) 67.64(66.96+0.67) 67.81(67.06+0.75) +0.3%
9300.2: import (no-blobs) 284.04(283.34+0.69) 198.09(196.01+0.92) -30.3%
It only has ~5.2M commits and trees, so this is a larger effect than I
expected (the 2^23 case above only improved by 50s or so, but here we
gained almost 90s). This is probably due to actually performing more
object lookups in a real import with trees and commits, as opposed to
just dumping a bunch of blobs into a pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-06 21:49:40 +02:00
|
|
|
struct hashmap_iter iter;
|
|
|
|
struct object_entry *e;
|
2016-05-26 00:54:02 +02:00
|
|
|
|
fast-import: replace custom hash with hashmap.c
We use a custom hash in fast-import to store the set of objects we've
imported so far. It has a fixed set of 2^16 buckets and chains any
collisions with a linked list. As the number of objects grows larger
than that, the load factor increases and we degrade to O(n) lookups and
O(n^2) insertions.
We can scale better by using our hashmap.c implementation, which will
resize the bucket count as we grow. This does incur an extra memory cost
of 8 bytes per object, as hashmap stores the integer hash value for each
entry in its hashmap_entry struct (which we really don't care about
here, because we're just reusing the embedded object hash). But I think
the numbers below justify this (and our per-object memory cost is
already much higher).
I also looked at using khash, but it seemed to perform slightly worse
than hashmap at all sizes, and worse even than the existing code for
small sizes. It's also awkward to use here, because we want to look up a
"struct object_entry" from a "struct object_id", and it doesn't handle
mismatched keys as well. Making a mapping of object_id to object_entry
would be more natural, but that would require pulling the embedded oid
out of the object_entry or incurring an extra 32 bytes per object.
In a synthetic test creating as many cheap, tiny objects as possible
perl -e '
my $bits = shift;
my $nr = 2**$bits;
for (my $i = 0; $i < $nr; $i++) {
print "blob\n";
print "data 4\n";
print pack("N", $i);
}
' $bits | git fast-import
I got these results:
nr_objects master khash hashmap
2^20 0m4.317s 0m5.109s 0m3.890s
2^21 0m10.204s 0m9.702s 0m7.933s
2^22 0m27.159s 0m17.911s 0m16.751s
2^23 1m19.038s 0m35.080s 0m31.963s
2^24 4m18.766s 1m10.233s 1m6.793s
which points to hashmap as the winner. We didn't have any perf tests for
fast-export or fast-import, so I added one as a more real-world case.
It uses an export without blobs since that's significantly cheaper than
a full one, but still is an interesting case people might use (e.g., for
rewriting history). It will emphasize this change in some ways (as a
percentage we spend more time making objects and less shuffling blob
bytes around) and less in others (the total object count is lower).
Here are the results for linux.git:
Test HEAD^ HEAD
----------------------------------------------------------------------------
9300.1: export (no-blobs) 67.64(66.96+0.67) 67.81(67.06+0.75) +0.3%
9300.2: import (no-blobs) 284.04(283.34+0.69) 198.09(196.01+0.92) -30.3%
It only has ~5.2M commits and trees, so this is a larger effect than I
expected (the 2^23 case above only improved by 50s or so, but here we
gained almost 90s). This is probably due to actually performing more
object lookups in a real import with trees and commits, as opposed to
just dumping a bunch of blobs into a pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-06 21:49:40 +02:00
|
|
|
hashmap_for_each_entry(&object_table, &iter, e, ent) {
|
|
|
|
if (e->pack_id == id)
|
|
|
|
e->pack_id = MAX_PACK_ID;
|
2016-05-26 00:54:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for (lu = 0; lu < branch_table_sz; lu++) {
|
|
|
|
struct branch *b;
|
|
|
|
|
|
|
|
for (b = branch_table[lu]; b; b = b->table_next_branch)
|
|
|
|
if (b->pack_id == id)
|
|
|
|
b->pack_id = MAX_PACK_ID;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (t = first_tag; t; t = t->next_tag)
|
|
|
|
if (t->pack_id == id)
|
|
|
|
t->pack_id = MAX_PACK_ID;
|
|
|
|
}
|
|
|
|
|
2006-08-14 06:58:19 +02:00
|
|
|
static unsigned int hc_str(const char *s, size_t len)
|
|
|
|
{
|
|
|
|
unsigned int r = 0;
|
|
|
|
while (len-- > 0)
|
|
|
|
r = r * 31 + *s++;
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2007-02-06 06:43:59 +01:00
|
|
|
static char *pool_strdup(const char *s)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
2015-09-24 23:08:19 +02:00
|
|
|
size_t len = strlen(s) + 1;
|
2018-04-11 20:37:54 +02:00
|
|
|
char *r = mem_pool_alloc(&fi_mem_pool, len);
|
2015-09-24 23:08:19 +02:00
|
|
|
memcpy(r, s, len);
|
2006-08-14 06:58:19 +02:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2020-02-22 21:17:45 +01:00
|
|
|
static void insert_mark(struct mark_set *s, uintmax_t idnum, struct object_entry *oe)
|
2006-08-23 10:17:45 +02:00
|
|
|
{
|
|
|
|
while ((idnum >> s->shift) >= 1024) {
|
2018-04-11 20:37:54 +02:00
|
|
|
s = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
|
2006-08-23 10:17:45 +02:00
|
|
|
s->shift = marks->shift + 10;
|
|
|
|
s->data.sets[0] = marks;
|
|
|
|
marks = s;
|
|
|
|
}
|
|
|
|
while (s->shift) {
|
2007-01-16 06:33:19 +01:00
|
|
|
uintmax_t i = idnum >> s->shift;
|
2006-08-23 10:17:45 +02:00
|
|
|
idnum -= i << s->shift;
|
|
|
|
if (!s->data.sets[i]) {
|
2018-04-11 20:37:54 +02:00
|
|
|
s->data.sets[i] = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
|
2006-08-23 10:17:45 +02:00
|
|
|
s->data.sets[i]->shift = s->shift - 10;
|
|
|
|
}
|
|
|
|
s = s->data.sets[i];
|
|
|
|
}
|
|
|
|
if (!s->data.marked[idnum])
|
|
|
|
marks_set_count++;
|
|
|
|
s->data.marked[idnum] = oe;
|
|
|
|
}
|
|
|
|
|
2020-02-22 21:17:47 +01:00
|
|
|
static void *find_mark(struct mark_set *s, uintmax_t idnum)
|
2006-08-23 10:17:45 +02:00
|
|
|
{
|
2007-01-16 06:33:19 +01:00
|
|
|
uintmax_t orig_idnum = idnum;
|
2006-08-23 10:17:45 +02:00
|
|
|
struct object_entry *oe = NULL;
|
|
|
|
if ((idnum >> s->shift) < 1024) {
|
|
|
|
while (s && s->shift) {
|
2007-01-16 06:33:19 +01:00
|
|
|
uintmax_t i = idnum >> s->shift;
|
2006-08-23 10:17:45 +02:00
|
|
|
idnum -= i << s->shift;
|
|
|
|
s = s->data.sets[i];
|
|
|
|
}
|
|
|
|
if (s)
|
|
|
|
oe = s->data.marked[idnum];
|
|
|
|
}
|
|
|
|
if (!oe)
|
2007-02-21 02:34:56 +01:00
|
|
|
die("mark :%" PRIuMAX " not declared", orig_idnum);
|
2006-08-23 10:17:45 +02:00
|
|
|
return oe;
|
|
|
|
}
|
|
|
|
|
2007-02-06 06:43:59 +01:00
|
|
|
static struct atom_str *to_atom(const char *s, unsigned short len)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
|
|
|
unsigned int hc = hc_str(s, len) % atom_table_sz;
|
|
|
|
struct atom_str *c;
|
|
|
|
|
|
|
|
for (c = atom_table[hc]; c; c = c->next_atom)
|
|
|
|
if (c->str_len == len && !strncmp(s, c->str_dat, len))
|
|
|
|
return c;
|
|
|
|
|
2018-04-11 20:37:54 +02:00
|
|
|
c = mem_pool_alloc(&fi_mem_pool, sizeof(struct atom_str) + len + 1);
|
2006-08-14 06:58:19 +02:00
|
|
|
c->str_len = len;
|
2015-09-24 23:08:26 +02:00
|
|
|
memcpy(c->str_dat, s, len);
|
2006-08-14 06:58:19 +02:00
|
|
|
c->str_dat[len] = 0;
|
|
|
|
c->next_atom = atom_table[hc];
|
|
|
|
atom_table[hc] = c;
|
|
|
|
atom_cnt++;
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2007-02-06 06:43:59 +01:00
|
|
|
static struct branch *lookup_branch(const char *name)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
|
|
|
unsigned int hc = hc_str(name, strlen(name)) % branch_table_sz;
|
|
|
|
struct branch *b;
|
|
|
|
|
|
|
|
for (b = branch_table[hc]; b; b = b->table_next_branch)
|
|
|
|
if (!strcmp(name, b->name))
|
|
|
|
return b;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-02-06 06:43:59 +01:00
|
|
|
static struct branch *new_branch(const char *name)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
|
|
|
unsigned int hc = hc_str(name, strlen(name)) % branch_table_sz;
|
2009-05-01 11:06:36 +02:00
|
|
|
struct branch *b = lookup_branch(name);
|
2006-08-14 06:58:19 +02:00
|
|
|
|
|
|
|
if (b)
|
|
|
|
die("Invalid attempt to create duplicate branch: %s", name);
|
2011-09-15 23:10:25 +02:00
|
|
|
if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
|
2006-08-15 02:16:28 +02:00
|
|
|
die("Branch name doesn't conform to GIT standards: %s", name);
|
2006-08-14 06:58:19 +02:00
|
|
|
|
2018-04-11 20:37:54 +02:00
|
|
|
b = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct branch));
|
2006-08-14 06:58:19 +02:00
|
|
|
b->name = pool_strdup(name);
|
|
|
|
b->table_next_branch = branch_table[hc];
|
2006-08-29 04:06:13 +02:00
|
|
|
b->branch_tree.versions[0].mode = S_IFDIR;
|
|
|
|
b->branch_tree.versions[1].mode = S_IFDIR;
|
2009-12-07 12:27:24 +01:00
|
|
|
b->num_notes = 0;
|
2007-03-05 18:31:09 +01:00
|
|
|
b->active = 0;
|
2007-01-17 08:42:43 +01:00
|
|
|
b->pack_id = MAX_PACK_ID;
|
2006-08-14 06:58:19 +02:00
|
|
|
branch_table[hc] = b;
|
|
|
|
branch_count++;
|
|
|
|
return b;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int hc_entries(unsigned int cnt)
|
|
|
|
{
|
|
|
|
cnt = cnt & 7 ? (cnt / 8) + 1 : cnt / 8;
|
|
|
|
return cnt < avail_tree_table_sz ? cnt : avail_tree_table_sz - 1;
|
|
|
|
}
|
|
|
|
|
2007-02-06 06:43:59 +01:00
|
|
|
static struct tree_content *new_tree_content(unsigned int cnt)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
|
|
|
struct avail_tree_content *f, *l = NULL;
|
|
|
|
struct tree_content *t;
|
|
|
|
unsigned int hc = hc_entries(cnt);
|
|
|
|
|
|
|
|
for (f = avail_tree_table[hc]; f; l = f, f = f->next_avail)
|
|
|
|
if (f->entry_capacity >= cnt)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (f) {
|
|
|
|
if (l)
|
|
|
|
l->next_avail = f->next_avail;
|
|
|
|
else
|
|
|
|
avail_tree_table[hc] = f->next_avail;
|
|
|
|
} else {
|
|
|
|
cnt = cnt & 7 ? ((cnt / 8) + 1) * 8 : cnt;
|
2018-04-11 20:37:54 +02:00
|
|
|
f = mem_pool_alloc(&fi_mem_pool, sizeof(*t) + sizeof(t->entries[0]) * cnt);
|
2006-08-14 06:58:19 +02:00
|
|
|
f->entry_capacity = cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
t = (struct tree_content*)f;
|
|
|
|
t->entry_count = 0;
|
2006-08-28 18:22:50 +02:00
|
|
|
t->delta_depth = 0;
|
2006-08-14 06:58:19 +02:00
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void release_tree_entry(struct tree_entry *e);
|
|
|
|
static void release_tree_content(struct tree_content *t)
|
|
|
|
{
|
|
|
|
struct avail_tree_content *f = (struct avail_tree_content*)t;
|
|
|
|
unsigned int hc = hc_entries(f->entry_capacity);
|
2006-08-23 07:33:47 +02:00
|
|
|
f->next_avail = avail_tree_table[hc];
|
|
|
|
avail_tree_table[hc] = f;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void release_tree_content_recursive(struct tree_content *t)
|
|
|
|
{
|
2006-08-14 06:58:19 +02:00
|
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < t->entry_count; i++)
|
|
|
|
release_tree_entry(t->entries[i]);
|
2006-08-23 07:33:47 +02:00
|
|
|
release_tree_content(t);
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
|
2007-02-06 06:43:59 +01:00
|
|
|
static struct tree_content *grow_tree_content(
|
2006-08-14 06:58:19 +02:00
|
|
|
struct tree_content *t,
|
|
|
|
int amt)
|
|
|
|
{
|
|
|
|
struct tree_content *r = new_tree_content(t->entry_count + amt);
|
|
|
|
r->entry_count = t->entry_count;
|
2006-08-28 18:22:50 +02:00
|
|
|
r->delta_depth = t->delta_depth;
|
2019-06-15 20:36:35 +02:00
|
|
|
COPY_ARRAY(r->entries, t->entries, t->entry_count);
|
2006-08-14 06:58:19 +02:00
|
|
|
release_tree_content(t);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2007-02-06 06:43:59 +01:00
|
|
|
static struct tree_entry *new_tree_entry(void)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
|
|
|
struct tree_entry *e;
|
|
|
|
|
|
|
|
if (!avail_tree_entry) {
|
|
|
|
unsigned int n = tree_entry_alloc;
|
2018-04-11 20:37:54 +02:00
|
|
|
tree_entry_allocd += n * sizeof(struct tree_entry);
|
2016-02-22 23:44:25 +01:00
|
|
|
ALLOC_ARRAY(e, n);
|
|
|
|
avail_tree_entry = e;
|
2006-08-27 04:38:02 +02:00
|
|
|
while (n-- > 1) {
|
2006-08-14 06:58:19 +02:00
|
|
|
*((void**)e) = e + 1;
|
|
|
|
e++;
|
|
|
|
}
|
2006-08-27 05:37:31 +02:00
|
|
|
*((void**)e) = NULL;
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
e = avail_tree_entry;
|
|
|
|
avail_tree_entry = *((void**)e);
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void release_tree_entry(struct tree_entry *e)
|
|
|
|
{
|
|
|
|
if (e->tree)
|
2006-08-23 07:33:47 +02:00
|
|
|
release_tree_content_recursive(e->tree);
|
2006-08-14 06:58:19 +02:00
|
|
|
*((void**)e) = avail_tree_entry;
|
|
|
|
avail_tree_entry = e;
|
|
|
|
}
|
|
|
|
|
2007-07-15 07:40:37 +02:00
|
|
|
static struct tree_content *dup_tree_content(struct tree_content *s)
|
|
|
|
{
|
|
|
|
struct tree_content *d;
|
|
|
|
struct tree_entry *a, *b;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!s)
|
|
|
|
return NULL;
|
|
|
|
d = new_tree_content(s->entry_count);
|
|
|
|
for (i = 0; i < s->entry_count; i++) {
|
|
|
|
a = s->entries[i];
|
|
|
|
b = new_tree_entry();
|
|
|
|
memcpy(b, a, sizeof(*a));
|
2017-05-01 04:29:03 +02:00
|
|
|
if (a->tree && is_null_oid(&b->versions[1].oid))
|
2007-07-15 07:40:37 +02:00
|
|
|
b->tree = dup_tree_content(a->tree);
|
|
|
|
else
|
|
|
|
b->tree = NULL;
|
|
|
|
d->entries[i] = b;
|
|
|
|
}
|
|
|
|
d->entry_count = s->entry_count;
|
|
|
|
d->delta_depth = s->delta_depth;
|
|
|
|
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
2007-01-17 07:47:25 +01:00
|
|
|
static void start_packfile(void)
|
2007-01-15 10:39:05 +01:00
|
|
|
{
|
2017-03-28 21:45:43 +02:00
|
|
|
struct strbuf tmp_file = STRBUF_INIT;
|
2007-01-15 12:35:41 +01:00
|
|
|
struct packed_git *p;
|
2007-01-15 10:39:05 +01:00
|
|
|
struct pack_header hdr;
|
2007-01-16 07:20:57 +01:00
|
|
|
int pack_fd;
|
2007-01-15 10:39:05 +01:00
|
|
|
|
2017-03-28 21:45:43 +02:00
|
|
|
pack_fd = odb_mkstemp(&tmp_file, "pack/tmp_pack_XXXXXX");
|
|
|
|
FLEX_ALLOC_STR(p, pack_name, tmp_file.buf);
|
|
|
|
strbuf_release(&tmp_file);
|
|
|
|
|
2007-01-15 12:35:41 +01:00
|
|
|
p->pack_fd = pack_fd;
|
2011-03-02 19:01:54 +01:00
|
|
|
p->do_not_close = 1;
|
2018-02-01 03:18:46 +01:00
|
|
|
pack_file = hashfd(pack_fd, p->pack_name);
|
2007-01-15 10:39:05 +01:00
|
|
|
|
|
|
|
hdr.hdr_signature = htonl(PACK_SIGNATURE);
|
|
|
|
hdr.hdr_version = htonl(2);
|
|
|
|
hdr.hdr_entries = 0;
|
2018-02-01 03:18:46 +01:00
|
|
|
hashwrite(pack_file, &hdr, sizeof(hdr));
|
2007-01-15 12:35:41 +01:00
|
|
|
|
|
|
|
pack_data = p;
|
2007-01-15 10:39:05 +01:00
|
|
|
pack_size = sizeof(hdr);
|
|
|
|
object_count = 0;
|
2007-01-15 12:35:41 +01:00
|
|
|
|
2014-09-16 20:56:57 +02:00
|
|
|
REALLOC_ARRAY(all_packs, pack_id + 1);
|
2007-01-15 12:35:41 +01:00
|
|
|
all_packs[pack_id] = p;
|
2007-01-15 10:39:05 +01:00
|
|
|
}
|
|
|
|
|
2010-02-17 20:05:53 +01:00
|
|
|
static const char *create_index(void)
|
2007-01-15 10:39:05 +01:00
|
|
|
{
|
2010-02-17 20:05:53 +01:00
|
|
|
const char *tmpfile;
|
|
|
|
struct pack_idx_entry **idx, **c, **last;
|
|
|
|
struct object_entry *e;
|
2007-01-15 10:39:05 +01:00
|
|
|
struct object_entry_pool *o;
|
|
|
|
|
2010-02-17 20:05:53 +01:00
|
|
|
/* Build the table of object IDs. */
|
2016-02-22 23:44:25 +01:00
|
|
|
ALLOC_ARRAY(idx, object_count);
|
2007-01-15 10:39:05 +01:00
|
|
|
c = idx;
|
|
|
|
for (o = blocks; o; o = o->next_pool)
|
2007-01-15 14:00:49 +01:00
|
|
|
for (e = o->next_free; e-- != o->entries;)
|
|
|
|
if (pack_id == e->pack_id)
|
2010-02-17 20:05:53 +01:00
|
|
|
*c++ = &e->idx;
|
2007-01-15 10:39:05 +01:00
|
|
|
last = idx + object_count;
|
2007-01-15 12:51:58 +01:00
|
|
|
if (c != last)
|
|
|
|
die("internal consistency error creating the index");
|
2007-01-15 10:39:05 +01:00
|
|
|
|
2019-02-19 01:05:03 +01:00
|
|
|
tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts,
|
|
|
|
pack_data->hash);
|
2007-01-15 10:39:05 +01:00
|
|
|
free(idx);
|
2007-01-16 07:15:31 +01:00
|
|
|
return tmpfile;
|
|
|
|
}
|
|
|
|
|
2010-02-17 20:05:53 +01:00
|
|
|
static char *keep_pack(const char *curr_index_name)
|
2007-01-16 07:15:31 +01:00
|
|
|
{
|
2007-03-07 02:44:17 +01:00
|
|
|
static const char *keep_msg = "fast-import";
|
2017-03-16 15:27:15 +01:00
|
|
|
struct strbuf name = STRBUF_INIT;
|
2007-01-16 07:15:31 +01:00
|
|
|
int keep_fd;
|
|
|
|
|
2019-02-19 01:05:03 +01:00
|
|
|
odb_pack_name(&name, pack_data->hash, "keep");
|
2017-03-16 15:27:15 +01:00
|
|
|
keep_fd = odb_pack_keep(name.buf);
|
2007-01-16 07:15:31 +01:00
|
|
|
if (keep_fd < 0)
|
2009-06-27 17:58:47 +02:00
|
|
|
die_errno("cannot create keep file");
|
2008-01-10 09:54:25 +01:00
|
|
|
write_or_die(keep_fd, keep_msg, strlen(keep_msg));
|
|
|
|
if (close(keep_fd))
|
2009-06-27 17:58:47 +02:00
|
|
|
die_errno("failed to write keep file");
|
2007-01-16 07:15:31 +01:00
|
|
|
|
2019-02-19 01:05:03 +01:00
|
|
|
odb_pack_name(&name, pack_data->hash, "pack");
|
2017-03-16 15:27:15 +01:00
|
|
|
if (finalize_object_file(pack_data->pack_name, name.buf))
|
2007-01-16 07:15:31 +01:00
|
|
|
die("cannot store pack file");
|
|
|
|
|
2019-02-19 01:05:03 +01:00
|
|
|
odb_pack_name(&name, pack_data->hash, "idx");
|
2017-03-16 15:27:15 +01:00
|
|
|
if (finalize_object_file(curr_index_name, name.buf))
|
2007-01-16 07:15:31 +01:00
|
|
|
die("cannot store index file");
|
2010-02-17 20:05:53 +01:00
|
|
|
free((void *)curr_index_name);
|
2017-03-16 15:27:15 +01:00
|
|
|
return strbuf_detach(&name, NULL);
|
2007-01-16 07:15:31 +01:00
|
|
|
}
|
|
|
|
|
2007-01-17 07:47:25 +01:00
|
|
|
static void unkeep_all_packs(void)
|
2007-01-16 07:15:31 +01:00
|
|
|
{
|
2017-03-16 15:27:15 +01:00
|
|
|
struct strbuf name = STRBUF_INIT;
|
2007-01-16 07:15:31 +01:00
|
|
|
int k;
|
|
|
|
|
|
|
|
for (k = 0; k < pack_id; k++) {
|
|
|
|
struct packed_git *p = all_packs[k];
|
2019-02-19 01:05:03 +01:00
|
|
|
odb_pack_name(&name, p->hash, "keep");
|
2017-03-16 15:27:15 +01:00
|
|
|
unlink_or_warn(name.buf);
|
2007-01-16 07:15:31 +01:00
|
|
|
}
|
2017-03-16 15:27:15 +01:00
|
|
|
strbuf_release(&name);
|
2007-01-15 10:39:05 +01:00
|
|
|
}
|
|
|
|
|
2016-04-25 23:17:28 +02:00
|
|
|
static int loosen_small_pack(const struct packed_git *p)
|
|
|
|
{
|
|
|
|
struct child_process unpack = CHILD_PROCESS_INIT;
|
|
|
|
|
|
|
|
if (lseek(p->pack_fd, 0, SEEK_SET) < 0)
|
|
|
|
die_errno("Failed seeking to start of '%s'", p->pack_name);
|
|
|
|
|
|
|
|
unpack.in = p->pack_fd;
|
|
|
|
unpack.git_cmd = 1;
|
|
|
|
unpack.stdout_to_stderr = 1;
|
|
|
|
argv_array_push(&unpack.args, "unpack-objects");
|
|
|
|
if (!show_stats)
|
|
|
|
argv_array_push(&unpack.args, "-q");
|
|
|
|
|
|
|
|
return run_command(&unpack);
|
|
|
|
}
|
|
|
|
|
2007-01-17 07:47:25 +01:00
|
|
|
static void end_packfile(void)
|
2007-01-15 10:39:05 +01:00
|
|
|
{
|
fast-import: avoid running end_packfile recursively
When an import has finished, we run end_packfile() to
finalize the data and move the packfile into place. If this
process fails, we call die() and end up in our die_nicely()
handler. Which unfortunately includes running end_packfile
to save any progress we made. We enter the function again,
and start operating on the pack_data struct while it is in
an inconsistent state, leading to a segfault.
One way to trigger this is to simply start two identical
fast-imports at the same time. They will both create the
same packfiles, which will then try to create identically
named ".keep" files. One will win the race, and the other
will die(), and end up with the segfault.
Since 3c078b9, we already reset the pack_data pointer to
NULL at the end of end_packfile. That covers the case of us
calling die() right after end_packfile, before we have
reinitialized the pack_data pointer. This new problem is
quite similar, except that we are worried about calling
die() _during_ end_packfile, not right after. Ideally we
would simply set pack_data to NULL as soon as we enter the
function, and operate on a copy of the pointer.
Unfortunately, it is not so easy. pack_data is a global, and
end_packfile calls into other functions which operate on the
global directly. We would have to teach each of these to
take an argument, and there is no guarantee that we would
catch all of the spots.
Instead, we can simply use a static flag to avoid
recursively entering the function. This is a little less
elegant, but it's short and fool-proof.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-02-10 02:07:19 +01:00
|
|
|
static int running;
|
|
|
|
|
|
|
|
if (running || !pack_data)
|
2014-08-23 07:27:41 +02:00
|
|
|
return;
|
2007-01-15 12:35:41 +01:00
|
|
|
|
fast-import: avoid running end_packfile recursively
When an import has finished, we run end_packfile() to
finalize the data and move the packfile into place. If this
process fails, we call die() and end up in our die_nicely()
handler. Which unfortunately includes running end_packfile
to save any progress we made. We enter the function again,
and start operating on the pack_data struct while it is in
an inconsistent state, leading to a segfault.
One way to trigger this is to simply start two identical
fast-imports at the same time. They will both create the
same packfiles, which will then try to create identically
named ".keep" files. One will win the race, and the other
will die(), and end up with the segfault.
Since 3c078b9, we already reset the pack_data pointer to
NULL at the end of end_packfile. That covers the case of us
calling die() right after end_packfile, before we have
reinitialized the pack_data pointer. This new problem is
quite similar, except that we are worried about calling
die() _during_ end_packfile, not right after. Ideally we
would simply set pack_data to NULL as soon as we enter the
function, and operate on a copy of the pointer.
Unfortunately, it is not so easy. pack_data is a global, and
end_packfile calls into other functions which operate on the
global directly. We would have to teach each of these to
take an argument, and there is no guarantee that we would
catch all of the spots.
Instead, we can simply use a static flag to avoid
recursively entering the function. This is a little less
elegant, but it's short and fool-proof.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-02-10 02:07:19 +01:00
|
|
|
running = 1;
|
2009-02-10 22:36:12 +01:00
|
|
|
clear_delta_base_cache();
|
2007-01-15 12:39:39 +01:00
|
|
|
if (object_count) {
|
2014-08-23 07:27:41 +02:00
|
|
|
struct packed_git *new_p;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id cur_pack_oid;
|
2007-01-16 07:15:31 +01:00
|
|
|
char *idx_name;
|
2007-01-16 22:18:44 +01:00
|
|
|
int i;
|
|
|
|
struct branch *b;
|
|
|
|
struct tag *t;
|
2007-01-16 07:15:31 +01:00
|
|
|
|
2008-01-18 04:57:00 +01:00
|
|
|
close_pack_windows(pack_data);
|
2018-04-02 22:34:14 +02:00
|
|
|
finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
|
2019-02-19 01:05:03 +01:00
|
|
|
fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
|
|
|
|
pack_data->pack_name, object_count,
|
|
|
|
cur_pack_oid.hash, pack_size);
|
2016-04-25 23:17:28 +02:00
|
|
|
|
|
|
|
if (object_count <= unpack_limit) {
|
2016-05-26 00:54:02 +02:00
|
|
|
if (!loosen_small_pack(pack_data)) {
|
|
|
|
invalidate_pack_id(pack_id);
|
2016-04-25 23:17:28 +02:00
|
|
|
goto discard_pack;
|
2016-05-26 00:54:02 +02:00
|
|
|
}
|
2016-04-25 23:17:28 +02:00
|
|
|
}
|
|
|
|
|
2007-05-02 18:13:14 +02:00
|
|
|
close(pack_data->pack_fd);
|
2007-01-16 07:15:31 +01:00
|
|
|
idx_name = keep_pack(create_index());
|
2007-01-15 12:39:39 +01:00
|
|
|
|
2009-04-17 20:13:30 +02:00
|
|
|
/* Register the packfile with core git's machinery. */
|
2007-01-15 12:39:39 +01:00
|
|
|
new_p = add_packed_git(idx_name, strlen(idx_name), 1);
|
|
|
|
if (!new_p)
|
|
|
|
die("core git rejected index %s", idx_name);
|
2007-01-16 22:18:44 +01:00
|
|
|
all_packs[pack_id] = new_p;
|
2018-03-23 18:45:18 +01:00
|
|
|
install_packed_git(the_repository, new_p);
|
2017-03-16 15:27:15 +01:00
|
|
|
free(idx_name);
|
2007-01-16 22:18:44 +01:00
|
|
|
|
|
|
|
/* Print the boundary */
|
2007-02-12 01:45:56 +01:00
|
|
|
if (pack_edges) {
|
|
|
|
fprintf(pack_edges, "%s:", new_p->pack_name);
|
|
|
|
for (i = 0; i < branch_table_sz; i++) {
|
|
|
|
for (b = branch_table[i]; b; b = b->table_next_branch) {
|
|
|
|
if (b->pack_id == pack_id)
|
2017-05-01 04:29:03 +02:00
|
|
|
fprintf(pack_edges, " %s",
|
|
|
|
oid_to_hex(&b->oid));
|
2007-02-12 01:45:56 +01:00
|
|
|
}
|
2007-01-16 22:18:44 +01:00
|
|
|
}
|
2007-02-12 01:45:56 +01:00
|
|
|
for (t = first_tag; t; t = t->next_tag) {
|
|
|
|
if (t->pack_id == pack_id)
|
2017-05-01 04:29:03 +02:00
|
|
|
fprintf(pack_edges, " %s",
|
|
|
|
oid_to_hex(&t->oid));
|
2007-02-12 01:45:56 +01:00
|
|
|
}
|
|
|
|
fputc('\n', pack_edges);
|
|
|
|
fflush(pack_edges);
|
2007-01-16 22:18:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
pack_id++;
|
2007-01-15 12:39:39 +01:00
|
|
|
}
|
2008-12-15 22:11:40 +01:00
|
|
|
else {
|
2016-04-25 23:17:28 +02:00
|
|
|
discard_pack:
|
2014-08-23 07:27:41 +02:00
|
|
|
close(pack_data->pack_fd);
|
|
|
|
unlink_or_warn(pack_data->pack_name);
|
2008-12-15 22:11:40 +01:00
|
|
|
}
|
2017-06-16 01:15:46 +02:00
|
|
|
FREE_AND_NULL(pack_data);
|
fast-import: avoid running end_packfile recursively
When an import has finished, we run end_packfile() to
finalize the data and move the packfile into place. If this
process fails, we call die() and end up in our die_nicely()
handler. Which unfortunately includes running end_packfile
to save any progress we made. We enter the function again,
and start operating on the pack_data struct while it is in
an inconsistent state, leading to a segfault.
One way to trigger this is to simply start two identical
fast-imports at the same time. They will both create the
same packfiles, which will then try to create identically
named ".keep" files. One will win the race, and the other
will die(), and end up with the segfault.
Since 3c078b9, we already reset the pack_data pointer to
NULL at the end of end_packfile. That covers the case of us
calling die() right after end_packfile, before we have
reinitialized the pack_data pointer. This new problem is
quite similar, except that we are worried about calling
die() _during_ end_packfile, not right after. Ideally we
would simply set pack_data to NULL as soon as we enter the
function, and operate on a copy of the pointer.
Unfortunately, it is not so easy. pack_data is a global, and
end_packfile calls into other functions which operate on the
global directly. We would have to teach each of these to
take an argument, and there is no guarantee that we would
catch all of the spots.
Instead, we can simply use a static flag to avoid
recursively entering the function. This is a little less
elegant, but it's short and fool-proof.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-02-10 02:07:19 +01:00
|
|
|
running = 0;
|
2007-01-15 12:35:41 +01:00
|
|
|
|
|
|
|
/* We can't carry a delta across packfiles. */
|
2007-09-17 14:00:38 +02:00
|
|
|
strbuf_release(&last_blob.data);
|
2007-01-15 12:35:41 +01:00
|
|
|
last_blob.offset = 0;
|
|
|
|
last_blob.depth = 0;
|
2007-01-15 10:39:05 +01:00
|
|
|
}
|
|
|
|
|
2007-02-07 08:42:44 +01:00
|
|
|
static void cycle_packfile(void)
|
2007-01-15 14:00:49 +01:00
|
|
|
{
|
|
|
|
end_packfile();
|
|
|
|
start_packfile();
|
|
|
|
}
|
|
|
|
|
2006-08-08 06:46:13 +02:00
|
|
|
static int store_object(
|
|
|
|
enum object_type type,
|
2007-09-17 13:48:17 +02:00
|
|
|
struct strbuf *dat,
|
2006-08-08 09:36:45 +02:00
|
|
|
struct last_object *last,
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id *oidout,
|
2007-01-16 06:33:19 +01:00
|
|
|
uintmax_t mark)
|
2006-08-05 08:04:21 +02:00
|
|
|
{
|
|
|
|
void *out, *delta;
|
2006-08-08 06:46:13 +02:00
|
|
|
struct object_entry *e;
|
|
|
|
unsigned char hdr[96];
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid;
|
2006-08-05 08:04:21 +02:00
|
|
|
unsigned long hdrlen, deltalen;
|
2018-02-01 03:18:42 +01:00
|
|
|
git_hash_ctx c;
|
2011-06-10 20:52:15 +02:00
|
|
|
git_zstream s;
|
2006-08-08 06:46:13 +02:00
|
|
|
|
2015-09-24 23:06:42 +02:00
|
|
|
hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
|
2018-02-14 19:59:24 +01:00
|
|
|
type_name(type), (unsigned long)dat->len) + 1;
|
2018-02-01 03:18:42 +01:00
|
|
|
the_hash_algo->init_fn(&c);
|
|
|
|
the_hash_algo->update_fn(&c, hdr, hdrlen);
|
|
|
|
the_hash_algo->update_fn(&c, dat->buf, dat->len);
|
|
|
|
the_hash_algo->final_fn(oid.hash, &c);
|
2017-05-07 00:09:56 +02:00
|
|
|
if (oidout)
|
|
|
|
oidcpy(oidout, &oid);
|
2006-08-08 06:46:13 +02:00
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
e = insert_object(&oid);
|
2006-08-23 10:17:45 +02:00
|
|
|
if (mark)
|
2020-02-22 21:17:45 +01:00
|
|
|
insert_mark(marks, mark, e);
|
2010-02-17 20:05:51 +01:00
|
|
|
if (e->idx.offset) {
|
2006-08-08 07:14:21 +02:00
|
|
|
duplicate_count_by_type[type]++;
|
2006-08-14 06:58:19 +02:00
|
|
|
return 1;
|
2018-03-23 18:20:59 +01:00
|
|
|
} else if (find_sha1_pack(oid.hash,
|
2018-08-20 18:52:04 +02:00
|
|
|
get_all_packs(the_repository))) {
|
2007-04-20 17:23:45 +02:00
|
|
|
e->type = type;
|
|
|
|
e->pack_id = MAX_PACK_ID;
|
2010-02-17 20:05:51 +01:00
|
|
|
e->idx.offset = 1; /* just not zero! */
|
2007-04-20 17:23:45 +02:00
|
|
|
duplicate_count_by_type[type]++;
|
|
|
|
return 1;
|
2006-08-08 06:46:13 +02:00
|
|
|
}
|
2006-08-05 08:04:21 +02:00
|
|
|
|
2018-06-30 23:41:06 +02:00
|
|
|
if (last && last->data.len && last->data.buf && last->depth < max_depth
|
2018-02-01 03:18:42 +01:00
|
|
|
&& dat->len > the_hash_algo->rawsz) {
|
|
|
|
|
2011-08-20 21:04:11 +02:00
|
|
|
delta_count_attempts_by_type[type]++;
|
2007-09-17 14:00:38 +02:00
|
|
|
delta = diff_delta(last->data.buf, last->data.len,
|
2007-09-17 13:48:17 +02:00
|
|
|
dat->buf, dat->len,
|
2018-02-01 03:18:42 +01:00
|
|
|
&deltalen, dat->len - the_hash_algo->rawsz);
|
2007-01-15 14:00:49 +01:00
|
|
|
} else
|
|
|
|
delta = NULL;
|
2006-08-05 08:04:21 +02:00
|
|
|
|
2011-06-10 19:55:10 +02:00
|
|
|
git_deflate_init(&s, pack_compression_level);
|
2007-01-15 14:00:49 +01:00
|
|
|
if (delta) {
|
|
|
|
s.next_in = delta;
|
|
|
|
s.avail_in = deltalen;
|
|
|
|
} else {
|
2007-09-17 13:48:17 +02:00
|
|
|
s.next_in = (void *)dat->buf;
|
|
|
|
s.avail_in = dat->len;
|
2007-01-15 14:00:49 +01:00
|
|
|
}
|
2011-06-10 20:18:17 +02:00
|
|
|
s.avail_out = git_deflate_bound(&s, s.avail_in);
|
2007-01-15 14:00:49 +01:00
|
|
|
s.next_out = out = xmalloc(s.avail_out);
|
2011-06-10 19:55:10 +02:00
|
|
|
while (git_deflate(&s, Z_FINISH) == Z_OK)
|
|
|
|
; /* nothing */
|
|
|
|
git_deflate_end(&s);
|
2007-01-15 14:00:49 +01:00
|
|
|
|
|
|
|
/* Determine if we should auto-checkpoint. */
|
2019-02-19 01:05:05 +01:00
|
|
|
if ((max_packsize
|
|
|
|
&& (pack_size + PACK_SIZE_THRESHOLD + s.total_out) > max_packsize)
|
|
|
|
|| (pack_size + PACK_SIZE_THRESHOLD + s.total_out) < pack_size) {
|
2007-01-15 14:00:49 +01:00
|
|
|
|
|
|
|
/* This new object needs to *not* have the current pack_id. */
|
|
|
|
e->pack_id = pack_id + 1;
|
2007-02-07 08:42:44 +01:00
|
|
|
cycle_packfile();
|
2007-01-15 14:00:49 +01:00
|
|
|
|
|
|
|
/* We cannot carry a delta into the new pack. */
|
|
|
|
if (delta) {
|
2017-06-16 01:15:46 +02:00
|
|
|
FREE_AND_NULL(delta);
|
2007-01-16 05:40:27 +01:00
|
|
|
|
2011-06-10 19:55:10 +02:00
|
|
|
git_deflate_init(&s, pack_compression_level);
|
2007-09-17 13:48:17 +02:00
|
|
|
s.next_in = (void *)dat->buf;
|
|
|
|
s.avail_in = dat->len;
|
2011-06-10 20:18:17 +02:00
|
|
|
s.avail_out = git_deflate_bound(&s, s.avail_in);
|
2007-01-16 05:40:27 +01:00
|
|
|
s.next_out = out = xrealloc(out, s.avail_out);
|
2011-06-10 19:55:10 +02:00
|
|
|
while (git_deflate(&s, Z_FINISH) == Z_OK)
|
|
|
|
; /* nothing */
|
|
|
|
git_deflate_end(&s);
|
2007-01-15 14:00:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
e->type = type;
|
|
|
|
e->pack_id = pack_id;
|
2010-02-17 20:05:51 +01:00
|
|
|
e->idx.offset = pack_size;
|
2007-01-15 14:00:49 +01:00
|
|
|
object_count++;
|
|
|
|
object_count_by_type[type]++;
|
2006-08-05 08:04:21 +02:00
|
|
|
|
2010-02-17 20:05:53 +01:00
|
|
|
crc32_begin(pack_file);
|
|
|
|
|
2006-08-05 08:04:21 +02:00
|
|
|
if (delta) {
|
2010-02-17 20:05:54 +01:00
|
|
|
off_t ofs = e->idx.offset - last->offset;
|
2007-01-14 12:20:23 +01:00
|
|
|
unsigned pos = sizeof(hdr) - 1;
|
|
|
|
|
2006-08-28 18:22:50 +02:00
|
|
|
delta_count_by_type[type]++;
|
2007-11-14 05:48:42 +01:00
|
|
|
e->depth = last->depth + 1;
|
2007-01-14 12:20:23 +01:00
|
|
|
|
encode_in_pack_object_header: respect output buffer length
The encode_in_pack_object_header() writes a variable-length
header to an output buffer, but it doesn't actually know
long the buffer is. At first glance, this looks like it
might be possible to overflow.
In practice, this is probably impossible. The smallest
buffer we use is 10 bytes, which would hold the header for
an object up to 2^67 bytes. Obviously we're not likely to
see such an object, but we might worry that an object could
lie about its size (causing us to overflow before we realize
it does not actually have that many bytes). But the argument
is passed as a uintmax_t. Even on systems that have __int128
available, uintmax_t is typically restricted to 64-bit by
the ABI.
So it's unlikely that a system exists where this could be
exploited. Still, it's easy enough to use a normal out/len
pair and make sure we don't write too far. That protects the
hypothetical 128-bit system, makes it harder for callers to
accidentally specify a too-small buffer, and makes the
resulting code easier to audit.
Note that the one caller in fast-import tried to catch such
a case, but did so _after_ the call (at which point we'd
have already overflowed!). This check can now go away.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-24 18:26:40 +01:00
|
|
|
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
|
|
|
|
OBJ_OFS_DELTA, deltalen);
|
2018-02-01 03:18:46 +01:00
|
|
|
hashwrite(pack_file, hdr, hdrlen);
|
2007-01-14 12:20:23 +01:00
|
|
|
pack_size += hdrlen;
|
|
|
|
|
|
|
|
hdr[pos] = ofs & 127;
|
|
|
|
while (ofs >>= 7)
|
|
|
|
hdr[--pos] = 128 | (--ofs & 127);
|
2018-02-01 03:18:46 +01:00
|
|
|
hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
|
2007-01-14 12:20:23 +01:00
|
|
|
pack_size += sizeof(hdr) - pos;
|
2006-08-05 08:04:21 +02:00
|
|
|
} else {
|
2007-11-14 05:48:42 +01:00
|
|
|
e->depth = 0;
|
encode_in_pack_object_header: respect output buffer length
The encode_in_pack_object_header() writes a variable-length
header to an output buffer, but it doesn't actually know
long the buffer is. At first glance, this looks like it
might be possible to overflow.
In practice, this is probably impossible. The smallest
buffer we use is 10 bytes, which would hold the header for
an object up to 2^67 bytes. Obviously we're not likely to
see such an object, but we might worry that an object could
lie about its size (causing us to overflow before we realize
it does not actually have that many bytes). But the argument
is passed as a uintmax_t. Even on systems that have __int128
available, uintmax_t is typically restricted to 64-bit by
the ABI.
So it's unlikely that a system exists where this could be
exploited. Still, it's easy enough to use a normal out/len
pair and make sure we don't write too far. That protects the
hypothetical 128-bit system, makes it harder for callers to
accidentally specify a too-small buffer, and makes the
resulting code easier to audit.
Note that the one caller in fast-import tried to catch such
a case, but did so _after_ the call (at which point we'd
have already overflowed!). This check can now go away.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-24 18:26:40 +01:00
|
|
|
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
|
|
|
|
type, dat->len);
|
2018-02-01 03:18:46 +01:00
|
|
|
hashwrite(pack_file, hdr, hdrlen);
|
2006-08-24 10:37:35 +02:00
|
|
|
pack_size += hdrlen;
|
2006-08-05 08:04:21 +02:00
|
|
|
}
|
|
|
|
|
2018-02-01 03:18:46 +01:00
|
|
|
hashwrite(pack_file, out, s.total_out);
|
2006-08-24 10:37:35 +02:00
|
|
|
pack_size += s.total_out;
|
2006-08-05 08:04:21 +02:00
|
|
|
|
2010-02-17 20:05:53 +01:00
|
|
|
e->idx.crc32 = crc32_end(pack_file);
|
|
|
|
|
2006-08-05 08:04:21 +02:00
|
|
|
free(out);
|
2007-02-06 18:05:51 +01:00
|
|
|
free(delta);
|
2006-08-14 06:58:19 +02:00
|
|
|
if (last) {
|
2007-09-17 14:00:38 +02:00
|
|
|
if (last->no_swap) {
|
|
|
|
last->data = *dat;
|
|
|
|
} else {
|
2007-09-20 00:42:12 +02:00
|
|
|
strbuf_swap(&last->data, dat);
|
2007-09-17 14:00:38 +02:00
|
|
|
}
|
2010-02-17 20:05:51 +01:00
|
|
|
last->offset = e->idx.offset;
|
2007-11-14 05:48:42 +01:00
|
|
|
last->depth = e->depth;
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-01 03:18:46 +01:00
|
|
|
static void truncate_pack(struct hashfile_checkpoint *checkpoint)
|
2010-02-01 18:27:35 +01:00
|
|
|
{
|
2018-02-01 03:18:46 +01:00
|
|
|
if (hashfile_truncate(pack_file, checkpoint))
|
2010-02-01 18:27:35 +01:00
|
|
|
die_errno("cannot truncate pack to skip duplicate");
|
2011-11-18 01:26:54 +01:00
|
|
|
pack_size = checkpoint->offset;
|
2010-02-01 18:27:35 +01:00
|
|
|
}
|
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
|
2010-02-01 18:27:35 +01:00
|
|
|
{
|
|
|
|
size_t in_sz = 64 * 1024, out_sz = 64 * 1024;
|
|
|
|
unsigned char *in_buf = xmalloc(in_sz);
|
|
|
|
unsigned char *out_buf = xmalloc(out_sz);
|
|
|
|
struct object_entry *e;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid;
|
2010-02-01 18:27:35 +01:00
|
|
|
unsigned long hdrlen;
|
|
|
|
off_t offset;
|
2018-02-01 03:18:42 +01:00
|
|
|
git_hash_ctx c;
|
2011-06-10 20:52:15 +02:00
|
|
|
git_zstream s;
|
2018-02-01 03:18:46 +01:00
|
|
|
struct hashfile_checkpoint checkpoint;
|
2010-02-01 18:27:35 +01:00
|
|
|
int status = Z_OK;
|
|
|
|
|
|
|
|
/* Determine if we should auto-checkpoint. */
|
2019-02-19 01:05:05 +01:00
|
|
|
if ((max_packsize
|
|
|
|
&& (pack_size + PACK_SIZE_THRESHOLD + len) > max_packsize)
|
|
|
|
|| (pack_size + PACK_SIZE_THRESHOLD + len) < pack_size)
|
2010-02-01 18:27:35 +01:00
|
|
|
cycle_packfile();
|
|
|
|
|
2018-02-01 03:18:46 +01:00
|
|
|
hashfile_checkpoint(pack_file, &checkpoint);
|
2011-11-18 01:26:54 +01:00
|
|
|
offset = checkpoint.offset;
|
2010-02-17 20:05:52 +01:00
|
|
|
|
2017-03-24 18:26:24 +01:00
|
|
|
hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
|
2010-02-01 18:27:35 +01:00
|
|
|
|
2018-02-01 03:18:42 +01:00
|
|
|
the_hash_algo->init_fn(&c);
|
|
|
|
the_hash_algo->update_fn(&c, out_buf, hdrlen);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
2010-02-17 20:05:53 +01:00
|
|
|
crc32_begin(pack_file);
|
|
|
|
|
2011-06-10 19:55:10 +02:00
|
|
|
git_deflate_init(&s, pack_compression_level);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
encode_in_pack_object_header: respect output buffer length
The encode_in_pack_object_header() writes a variable-length
header to an output buffer, but it doesn't actually know
long the buffer is. At first glance, this looks like it
might be possible to overflow.
In practice, this is probably impossible. The smallest
buffer we use is 10 bytes, which would hold the header for
an object up to 2^67 bytes. Obviously we're not likely to
see such an object, but we might worry that an object could
lie about its size (causing us to overflow before we realize
it does not actually have that many bytes). But the argument
is passed as a uintmax_t. Even on systems that have __int128
available, uintmax_t is typically restricted to 64-bit by
the ABI.
So it's unlikely that a system exists where this could be
exploited. Still, it's easy enough to use a normal out/len
pair and make sure we don't write too far. That protects the
hypothetical 128-bit system, makes it harder for callers to
accidentally specify a too-small buffer, and makes the
resulting code easier to audit.
Note that the one caller in fast-import tried to catch such
a case, but did so _after_ the call (at which point we'd
have already overflowed!). This check can now go away.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-03-24 18:26:40 +01:00
|
|
|
hdrlen = encode_in_pack_object_header(out_buf, out_sz, OBJ_BLOB, len);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
|
|
|
s.next_out = out_buf + hdrlen;
|
|
|
|
s.avail_out = out_sz - hdrlen;
|
|
|
|
|
|
|
|
while (status != Z_STREAM_END) {
|
|
|
|
if (0 < len && !s.avail_in) {
|
|
|
|
size_t cnt = in_sz < len ? in_sz : (size_t)len;
|
|
|
|
size_t n = fread(in_buf, 1, cnt, stdin);
|
|
|
|
if (!n && feof(stdin))
|
|
|
|
die("EOF in data (%" PRIuMAX " bytes remaining)", len);
|
|
|
|
|
2018-02-01 03:18:42 +01:00
|
|
|
the_hash_algo->update_fn(&c, in_buf, n);
|
2010-02-01 18:27:35 +01:00
|
|
|
s.next_in = in_buf;
|
|
|
|
s.avail_in = n;
|
|
|
|
len -= n;
|
|
|
|
}
|
|
|
|
|
2011-06-10 19:55:10 +02:00
|
|
|
status = git_deflate(&s, len ? 0 : Z_FINISH);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
|
|
|
if (!s.avail_out || status == Z_STREAM_END) {
|
|
|
|
size_t n = s.next_out - out_buf;
|
2018-02-01 03:18:46 +01:00
|
|
|
hashwrite(pack_file, out_buf, n);
|
2010-02-01 18:27:35 +01:00
|
|
|
pack_size += n;
|
|
|
|
s.next_out = out_buf;
|
|
|
|
s.avail_out = out_sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (status) {
|
|
|
|
case Z_OK:
|
|
|
|
case Z_BUF_ERROR:
|
|
|
|
case Z_STREAM_END:
|
|
|
|
continue;
|
|
|
|
default:
|
|
|
|
die("unexpected deflate failure: %d", status);
|
|
|
|
}
|
|
|
|
}
|
2011-06-10 19:55:10 +02:00
|
|
|
git_deflate_end(&s);
|
2018-02-01 03:18:42 +01:00
|
|
|
the_hash_algo->final_fn(oid.hash, &c);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
if (oidout)
|
|
|
|
oidcpy(oidout, &oid);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
e = insert_object(&oid);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
|
|
|
if (mark)
|
2020-02-22 21:17:45 +01:00
|
|
|
insert_mark(marks, mark, e);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
2010-02-17 20:05:51 +01:00
|
|
|
if (e->idx.offset) {
|
2010-02-01 18:27:35 +01:00
|
|
|
duplicate_count_by_type[OBJ_BLOB]++;
|
2011-11-18 01:26:54 +01:00
|
|
|
truncate_pack(&checkpoint);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
2018-03-23 18:20:59 +01:00
|
|
|
} else if (find_sha1_pack(oid.hash,
|
2018-08-20 18:52:04 +02:00
|
|
|
get_all_packs(the_repository))) {
|
2010-02-01 18:27:35 +01:00
|
|
|
e->type = OBJ_BLOB;
|
|
|
|
e->pack_id = MAX_PACK_ID;
|
2010-02-17 20:05:51 +01:00
|
|
|
e->idx.offset = 1; /* just not zero! */
|
2010-02-01 18:27:35 +01:00
|
|
|
duplicate_count_by_type[OBJ_BLOB]++;
|
2011-11-18 01:26:54 +01:00
|
|
|
truncate_pack(&checkpoint);
|
2010-02-01 18:27:35 +01:00
|
|
|
|
|
|
|
} else {
|
|
|
|
e->depth = 0;
|
|
|
|
e->type = OBJ_BLOB;
|
|
|
|
e->pack_id = pack_id;
|
2010-02-17 20:05:51 +01:00
|
|
|
e->idx.offset = offset;
|
2010-02-17 20:05:53 +01:00
|
|
|
e->idx.crc32 = crc32_end(pack_file);
|
2010-02-01 18:27:35 +01:00
|
|
|
object_count++;
|
|
|
|
object_count_by_type[OBJ_BLOB]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(in_buf);
|
|
|
|
free(out_buf);
|
|
|
|
}
|
|
|
|
|
2008-01-21 05:37:01 +01:00
|
|
|
/* All calls must be guarded by find_object() or find_mark() to
|
|
|
|
* ensure the 'struct object_entry' passed was written by this
|
|
|
|
* process instance. We unpack the entry by the offset, avoiding
|
|
|
|
* the need for the corresponding .idx file. This unpacking rule
|
|
|
|
* works because we only use OBJ_REF_DELTA within the packfiles
|
|
|
|
* created by fast-import.
|
|
|
|
*
|
|
|
|
* oe must not be NULL. Such an oe usually comes from giving
|
|
|
|
* an unknown SHA-1 to find_object() or an undefined mark to
|
|
|
|
* find_mark(). Callers must test for this condition and use
|
|
|
|
* the standard read_sha1_file() when it happens.
|
|
|
|
*
|
|
|
|
* oe->pack_id must not be MAX_PACK_ID. Such an oe is usually from
|
|
|
|
* find_mark(), where the mark was reloaded from an existing marks
|
|
|
|
* file and is referencing an object that this fast-import process
|
|
|
|
* instance did not write out to a packfile. Callers must test for
|
|
|
|
* this condition and use read_sha1_file() instead.
|
|
|
|
*/
|
2007-01-15 12:35:41 +01:00
|
|
|
static void *gfi_unpack_entry(
|
|
|
|
struct object_entry *oe,
|
|
|
|
unsigned long *sizep)
|
2006-08-24 10:37:35 +02:00
|
|
|
{
|
2007-02-26 20:55:59 +01:00
|
|
|
enum object_type type;
|
2007-01-15 12:35:41 +01:00
|
|
|
struct packed_git *p = all_packs[oe->pack_id];
|
2018-02-01 03:18:42 +01:00
|
|
|
if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
|
2008-01-21 05:37:01 +01:00
|
|
|
/* The object is stored in the packfile we are writing to
|
|
|
|
* and we have modified it since the last time we scanned
|
|
|
|
* back to read a previously written object. If an old
|
2018-02-01 03:18:42 +01:00
|
|
|
* window covered [p->pack_size, p->pack_size + rawsz) its
|
2008-01-21 05:37:01 +01:00
|
|
|
* data is stale and is not valid. Closing all windows
|
|
|
|
* and updating the packfile length ensures we can read
|
|
|
|
* the newly written data.
|
|
|
|
*/
|
2008-01-18 04:57:00 +01:00
|
|
|
close_pack_windows(p);
|
2018-02-01 03:18:46 +01:00
|
|
|
hashflush(pack_file);
|
2008-01-21 05:37:01 +01:00
|
|
|
|
2018-02-01 03:18:42 +01:00
|
|
|
/* We have to offer rawsz bytes additional on the end of
|
2008-01-21 05:37:01 +01:00
|
|
|
* the packfile as the core unpacker code assumes the
|
|
|
|
* footer is present at the file end and must promise
|
2018-02-01 03:18:42 +01:00
|
|
|
* at least rawsz bytes within any window it maps. But
|
2008-01-21 05:37:01 +01:00
|
|
|
* we don't actually create the footer here.
|
|
|
|
*/
|
2018-02-01 03:18:42 +01:00
|
|
|
p->pack_size = pack_size + the_hash_algo->rawsz;
|
2008-01-18 04:57:00 +01:00
|
|
|
}
|
2018-04-25 20:21:04 +02:00
|
|
|
return unpack_entry(the_repository, p, oe->idx.offset, &type, sizep);
|
2006-08-24 10:37:35 +02:00
|
|
|
}
|
|
|
|
|
2007-02-05 22:34:56 +01:00
|
|
|
static const char *get_mode(const char *str, uint16_t *modep)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
|
|
|
unsigned char c;
|
2007-02-05 22:34:56 +01:00
|
|
|
uint16_t mode = 0;
|
2006-08-14 06:58:19 +02:00
|
|
|
|
|
|
|
while ((c = *str++) != ' ') {
|
|
|
|
if (c < '0' || c > '7')
|
|
|
|
return NULL;
|
|
|
|
mode = (mode << 3) + (c - '0');
|
|
|
|
}
|
|
|
|
*modep = mode;
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void load_tree(struct tree_entry *root)
|
|
|
|
{
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id *oid = &root->versions[1].oid;
|
2006-08-14 06:58:19 +02:00
|
|
|
struct object_entry *myoe;
|
|
|
|
struct tree_content *t;
|
|
|
|
unsigned long size;
|
|
|
|
char *buf;
|
|
|
|
const char *c;
|
|
|
|
|
|
|
|
root->tree = t = new_tree_content(8);
|
2017-05-07 00:09:56 +02:00
|
|
|
if (is_null_oid(oid))
|
2006-08-14 06:58:19 +02:00
|
|
|
return;
|
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
myoe = find_object(oid);
|
2007-05-23 23:01:49 +02:00
|
|
|
if (myoe && myoe->pack_id != MAX_PACK_ID) {
|
2006-08-24 10:37:35 +02:00
|
|
|
if (myoe->type != OBJ_TREE)
|
2017-05-07 00:09:56 +02:00
|
|
|
die("Not a tree: %s", oid_to_hex(oid));
|
2007-11-14 05:48:42 +01:00
|
|
|
t->delta_depth = myoe->depth;
|
2007-01-15 12:35:41 +01:00
|
|
|
buf = gfi_unpack_entry(myoe, &size);
|
2008-02-14 07:34:34 +01:00
|
|
|
if (!buf)
|
2017-05-07 00:09:56 +02:00
|
|
|
die("Can't load tree %s", oid_to_hex(oid));
|
2006-08-14 06:58:19 +02:00
|
|
|
} else {
|
2007-02-26 20:55:59 +01:00
|
|
|
enum object_type type;
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:53 +01:00
|
|
|
buf = read_object_file(oid, &type, &size);
|
2007-02-26 20:55:59 +01:00
|
|
|
if (!buf || type != OBJ_TREE)
|
2017-05-07 00:09:56 +02:00
|
|
|
die("Can't load tree %s", oid_to_hex(oid));
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
c = buf;
|
|
|
|
while (c != (buf + size)) {
|
|
|
|
struct tree_entry *e = new_tree_entry();
|
|
|
|
|
|
|
|
if (t->entry_count == t->entry_capacity)
|
2007-03-11 03:39:17 +01:00
|
|
|
root->tree = t = grow_tree_content(t, t->entry_count);
|
2006-08-14 06:58:19 +02:00
|
|
|
t->entries[t->entry_count++] = e;
|
|
|
|
|
|
|
|
e->tree = NULL;
|
2006-08-28 18:22:50 +02:00
|
|
|
c = get_mode(c, &e->versions[1].mode);
|
2006-08-14 06:58:19 +02:00
|
|
|
if (!c)
|
2017-05-07 00:09:56 +02:00
|
|
|
die("Corrupt mode in %s", oid_to_hex(oid));
|
2006-08-28 18:22:50 +02:00
|
|
|
e->versions[0].mode = e->versions[1].mode;
|
2007-03-12 20:48:37 +01:00
|
|
|
e->name = to_atom(c, strlen(c));
|
2006-08-14 06:58:19 +02:00
|
|
|
c += e->name->str_len + 1;
|
2017-05-01 04:29:03 +02:00
|
|
|
hashcpy(e->versions[0].oid.hash, (unsigned char *)c);
|
|
|
|
hashcpy(e->versions[1].oid.hash, (unsigned char *)c);
|
2019-02-19 01:05:05 +01:00
|
|
|
c += the_hash_algo->rawsz;
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
free(buf);
|
|
|
|
}
|
|
|
|
|
2006-08-28 18:22:50 +02:00
|
|
|
static int tecmp0 (const void *_a, const void *_b)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
|
|
|
struct tree_entry *a = *((struct tree_entry**)_a);
|
|
|
|
struct tree_entry *b = *((struct tree_entry**)_b);
|
|
|
|
return base_name_compare(
|
2006-08-28 18:22:50 +02:00
|
|
|
a->name->str_dat, a->name->str_len, a->versions[0].mode,
|
|
|
|
b->name->str_dat, b->name->str_len, b->versions[0].mode);
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
|
2006-08-28 18:22:50 +02:00
|
|
|
static int tecmp1 (const void *_a, const void *_b)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
2006-08-28 18:22:50 +02:00
|
|
|
struct tree_entry *a = *((struct tree_entry**)_a);
|
|
|
|
struct tree_entry *b = *((struct tree_entry**)_b);
|
|
|
|
return base_name_compare(
|
|
|
|
a->name->str_dat, a->name->str_len, a->versions[1].mode,
|
|
|
|
b->name->str_dat, b->name->str_len, b->versions[1].mode);
|
|
|
|
}
|
|
|
|
|
2007-09-17 13:48:17 +02:00
|
|
|
static void mktree(struct tree_content *t, int v, struct strbuf *b)
|
2006-08-28 18:22:50 +02:00
|
|
|
{
|
|
|
|
size_t maxlen = 0;
|
2006-08-14 06:58:19 +02:00
|
|
|
unsigned int i;
|
|
|
|
|
2006-08-28 18:22:50 +02:00
|
|
|
if (!v)
|
2016-09-29 17:27:31 +02:00
|
|
|
QSORT(t->entries, t->entry_count, tecmp0);
|
2006-08-28 18:22:50 +02:00
|
|
|
else
|
2016-09-29 17:27:31 +02:00
|
|
|
QSORT(t->entries, t->entry_count, tecmp1);
|
2006-08-14 06:58:19 +02:00
|
|
|
|
|
|
|
for (i = 0; i < t->entry_count; i++) {
|
2006-08-28 18:22:50 +02:00
|
|
|
if (t->entries[i]->versions[v].mode)
|
|
|
|
maxlen += t->entries[i]->name->str_len + 34;
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
|
2007-09-17 13:48:17 +02:00
|
|
|
strbuf_reset(b);
|
|
|
|
strbuf_grow(b, maxlen);
|
2006-08-14 06:58:19 +02:00
|
|
|
for (i = 0; i < t->entry_count; i++) {
|
|
|
|
struct tree_entry *e = t->entries[i];
|
2006-08-28 18:22:50 +02:00
|
|
|
if (!e->versions[v].mode)
|
|
|
|
continue;
|
2011-08-14 20:32:24 +02:00
|
|
|
strbuf_addf(b, "%o %s%c",
|
|
|
|
(unsigned int)(e->versions[v].mode & ~NO_DELTA),
|
|
|
|
e->name->str_dat, '\0');
|
2019-02-19 01:05:05 +01:00
|
|
|
strbuf_add(b, e->versions[v].oid.hash, the_hash_algo->rawsz);
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
2006-08-28 18:22:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void store_tree(struct tree_entry *root)
|
|
|
|
{
|
2014-08-29 13:53:37 +02:00
|
|
|
struct tree_content *t;
|
2006-08-28 18:22:50 +02:00
|
|
|
unsigned int i, j, del;
|
2007-09-17 14:00:38 +02:00
|
|
|
struct last_object lo = { STRBUF_INIT, 0, 0, /* no_swap */ 1 };
|
2011-08-14 20:32:24 +02:00
|
|
|
struct object_entry *le = NULL;
|
2006-08-28 18:22:50 +02:00
|
|
|
|
2017-05-01 04:29:03 +02:00
|
|
|
if (!is_null_oid(&root->versions[1].oid))
|
2006-08-28 18:22:50 +02:00
|
|
|
return;
|
|
|
|
|
2014-08-29 13:53:37 +02:00
|
|
|
if (!root->tree)
|
|
|
|
load_tree(root);
|
|
|
|
t = root->tree;
|
|
|
|
|
2006-08-28 18:22:50 +02:00
|
|
|
for (i = 0; i < t->entry_count; i++) {
|
|
|
|
if (t->entries[i]->tree)
|
|
|
|
store_tree(t->entries[i]);
|
|
|
|
}
|
|
|
|
|
2011-08-14 20:32:24 +02:00
|
|
|
if (!(root->versions[0].mode & NO_DELTA))
|
2017-05-07 00:09:56 +02:00
|
|
|
le = find_object(&root->versions[0].oid);
|
2007-09-17 14:00:38 +02:00
|
|
|
if (S_ISDIR(root->versions[0].mode) && le && le->pack_id == pack_id) {
|
2007-09-17 13:48:17 +02:00
|
|
|
mktree(t, 0, &old_tree);
|
2007-09-17 14:00:38 +02:00
|
|
|
lo.data = old_tree;
|
2010-02-17 20:05:51 +01:00
|
|
|
lo.offset = le->idx.offset;
|
2006-08-28 18:22:50 +02:00
|
|
|
lo.depth = t->delta_depth;
|
|
|
|
}
|
|
|
|
|
2007-09-17 13:48:17 +02:00
|
|
|
mktree(t, 1, &new_tree);
|
2017-05-07 00:09:56 +02:00
|
|
|
store_object(OBJ_TREE, &new_tree, &lo, &root->versions[1].oid, 0);
|
2006-08-28 18:22:50 +02:00
|
|
|
|
|
|
|
t->delta_depth = lo.depth;
|
|
|
|
for (i = 0, j = 0, del = 0; i < t->entry_count; i++) {
|
|
|
|
struct tree_entry *e = t->entries[i];
|
|
|
|
if (e->versions[1].mode) {
|
|
|
|
e->versions[0].mode = e->versions[1].mode;
|
2017-05-01 04:29:03 +02:00
|
|
|
oidcpy(&e->versions[0].oid, &e->versions[1].oid);
|
2006-08-28 18:22:50 +02:00
|
|
|
t->entries[j++] = e;
|
|
|
|
} else {
|
|
|
|
release_tree_entry(e);
|
|
|
|
del++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t->entry_count -= del;
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
|
fast-import: tighten M 040000 syntax
When tree_content_set() is asked to modify the path "foo/bar/",
it first recurses like so:
tree_content_set(root, "foo/bar/", sha1, S_IFDIR) ->
tree_content_set(root:foo, "bar/", ...) ->
tree_content_set(root:foo/bar, "", ...)
And as a side-effect of 2794ad5 (fast-import: Allow filemodify to set
the root, 2010-10-10), this last call is accepted and changes
the tree entry for root:foo/bar to refer to the specified tree.
That seems safe enough but let's reject the new syntax (we never meant
to support it) and make it harder for frontends to introduce pointless
incompatibilities with git fast-import 1.7.3.
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-10-18 03:08:53 +02:00
|
|
|
static void tree_content_replace(
|
|
|
|
struct tree_entry *root,
|
2017-05-07 00:09:56 +02:00
|
|
|
const struct object_id *oid,
|
fast-import: tighten M 040000 syntax
When tree_content_set() is asked to modify the path "foo/bar/",
it first recurses like so:
tree_content_set(root, "foo/bar/", sha1, S_IFDIR) ->
tree_content_set(root:foo, "bar/", ...) ->
tree_content_set(root:foo/bar, "", ...)
And as a side-effect of 2794ad5 (fast-import: Allow filemodify to set
the root, 2010-10-10), this last call is accepted and changes
the tree entry for root:foo/bar to refer to the specified tree.
That seems safe enough but let's reject the new syntax (we never meant
to support it) and make it harder for frontends to introduce pointless
incompatibilities with git fast-import 1.7.3.
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-10-18 03:08:53 +02:00
|
|
|
const uint16_t mode,
|
|
|
|
struct tree_content *newtree)
|
|
|
|
{
|
|
|
|
if (!S_ISDIR(mode))
|
|
|
|
die("Root cannot be a non-directory");
|
2017-05-01 04:29:03 +02:00
|
|
|
oidclr(&root->versions[0].oid);
|
2017-05-07 00:09:56 +02:00
|
|
|
oidcpy(&root->versions[1].oid, oid);
|
fast-import: tighten M 040000 syntax
When tree_content_set() is asked to modify the path "foo/bar/",
it first recurses like so:
tree_content_set(root, "foo/bar/", sha1, S_IFDIR) ->
tree_content_set(root:foo, "bar/", ...) ->
tree_content_set(root:foo/bar, "", ...)
And as a side-effect of 2794ad5 (fast-import: Allow filemodify to set
the root, 2010-10-10), this last call is accepted and changes
the tree entry for root:foo/bar to refer to the specified tree.
That seems safe enough but let's reject the new syntax (we never meant
to support it) and make it harder for frontends to introduce pointless
incompatibilities with git fast-import 1.7.3.
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-10-18 03:08:53 +02:00
|
|
|
if (root->tree)
|
|
|
|
release_tree_content_recursive(root->tree);
|
|
|
|
root->tree = newtree;
|
|
|
|
}
|
|
|
|
|
2006-08-14 06:58:19 +02:00
|
|
|
static int tree_content_set(
|
|
|
|
struct tree_entry *root,
|
|
|
|
const char *p,
|
2017-05-07 00:09:56 +02:00
|
|
|
const struct object_id *oid,
|
2007-07-10 04:58:23 +02:00
|
|
|
const uint16_t mode,
|
|
|
|
struct tree_content *subtree)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
2010-10-18 03:03:38 +02:00
|
|
|
struct tree_content *t;
|
2006-08-14 06:58:19 +02:00
|
|
|
const char *slash1;
|
|
|
|
unsigned int i, n;
|
|
|
|
struct tree_entry *e;
|
|
|
|
|
2014-03-08 07:48:31 +01:00
|
|
|
slash1 = strchrnul(p, '/');
|
|
|
|
n = slash1 - p;
|
2007-04-29 02:01:27 +02:00
|
|
|
if (!n)
|
|
|
|
die("Empty path component found in input");
|
2014-03-08 07:48:31 +01:00
|
|
|
if (!*slash1 && !S_ISDIR(mode) && subtree)
|
2007-07-10 04:58:23 +02:00
|
|
|
die("Non-directories cannot have subtrees");
|
2006-08-14 06:58:19 +02:00
|
|
|
|
2010-10-18 03:03:38 +02:00
|
|
|
if (!root->tree)
|
|
|
|
load_tree(root);
|
|
|
|
t = root->tree;
|
2006-08-14 06:58:19 +02:00
|
|
|
for (i = 0; i < t->entry_count; i++) {
|
|
|
|
e = t->entries[i];
|
2016-04-22 15:01:24 +02:00
|
|
|
if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
|
2014-03-08 07:48:31 +01:00
|
|
|
if (!*slash1) {
|
2007-07-10 04:58:23 +02:00
|
|
|
if (!S_ISDIR(mode)
|
|
|
|
&& e->versions[1].mode == mode
|
convert "oidcmp() == 0" to oideq()
Using the more restrictive oideq() should, in the long run,
give the compiler more opportunities to optimize these
callsites. For now, this conversion should be a complete
noop with respect to the generated code.
The result is also perhaps a little more readable, as it
avoids the "zero is equal" idiom. Since it's so prevalent in
C, I think seasoned programmers tend not to even notice it
anymore, but it can sometimes make for awkward double
negations (e.g., we can drop a few !!oidcmp() instances
here).
This patch was generated almost entirely by the included
coccinelle patch. This mechanical conversion should be
completely safe, because we check explicitly for cases where
oidcmp() is compared to 0, which is what oideq() is doing
under the hood. Note that we don't have to catch "!oidcmp()"
separately; coccinelle's standard isomorphisms make sure the
two are treated equivalently.
I say "almost" because I did hand-edit the coccinelle output
to fix up a few style violations (it mostly keeps the
original formatting, but sometimes unwraps long lines).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-28 23:22:40 +02:00
|
|
|
&& oideq(&e->versions[1].oid, oid))
|
2006-08-14 06:58:19 +02:00
|
|
|
return 0;
|
2006-08-28 18:22:50 +02:00
|
|
|
e->versions[1].mode = mode;
|
2017-05-07 00:09:56 +02:00
|
|
|
oidcpy(&e->versions[1].oid, oid);
|
2007-07-10 04:58:23 +02:00
|
|
|
if (e->tree)
|
2006-08-23 07:33:47 +02:00
|
|
|
release_tree_content_recursive(e->tree);
|
2007-07-10 04:58:23 +02:00
|
|
|
e->tree = subtree;
|
2011-08-14 20:32:24 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to leave e->versions[0].sha1 alone
|
|
|
|
* to avoid modifying the preimage tree used
|
|
|
|
* when writing out the parent directory.
|
|
|
|
* But after replacing the subdir with a
|
|
|
|
* completely different one, it's not a good
|
|
|
|
* delta base any more, and besides, we've
|
|
|
|
* thrown away the tree entries needed to
|
|
|
|
* make a delta against it.
|
|
|
|
*
|
|
|
|
* So let's just explicitly disable deltas
|
|
|
|
* for the subtree.
|
|
|
|
*/
|
|
|
|
if (S_ISDIR(e->versions[0].mode))
|
|
|
|
e->versions[0].mode |= NO_DELTA;
|
|
|
|
|
2017-05-01 04:29:03 +02:00
|
|
|
oidclr(&root->versions[1].oid);
|
2006-08-14 06:58:19 +02:00
|
|
|
return 1;
|
|
|
|
}
|
2006-08-28 18:22:50 +02:00
|
|
|
if (!S_ISDIR(e->versions[1].mode)) {
|
2006-08-14 06:58:19 +02:00
|
|
|
e->tree = new_tree_content(8);
|
2006-08-28 18:22:50 +02:00
|
|
|
e->versions[1].mode = S_IFDIR;
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
if (!e->tree)
|
|
|
|
load_tree(e);
|
2017-05-07 00:09:56 +02:00
|
|
|
if (tree_content_set(e, slash1 + 1, oid, mode, subtree)) {
|
2017-05-01 04:29:03 +02:00
|
|
|
oidclr(&root->versions[1].oid);
|
2006-08-14 06:58:19 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (t->entry_count == t->entry_capacity)
|
2007-03-11 03:39:17 +01:00
|
|
|
root->tree = t = grow_tree_content(t, t->entry_count);
|
2006-08-14 06:58:19 +02:00
|
|
|
e = new_tree_entry();
|
2007-03-12 20:48:37 +01:00
|
|
|
e->name = to_atom(p, n);
|
2006-08-28 18:22:50 +02:00
|
|
|
e->versions[0].mode = 0;
|
2017-05-01 04:29:03 +02:00
|
|
|
oidclr(&e->versions[0].oid);
|
2006-08-14 06:58:19 +02:00
|
|
|
t->entries[t->entry_count++] = e;
|
2014-03-08 07:48:31 +01:00
|
|
|
if (*slash1) {
|
2006-08-14 06:58:19 +02:00
|
|
|
e->tree = new_tree_content(8);
|
2006-08-28 18:22:50 +02:00
|
|
|
e->versions[1].mode = S_IFDIR;
|
2017-05-07 00:09:56 +02:00
|
|
|
tree_content_set(e, slash1 + 1, oid, mode, subtree);
|
2006-08-14 06:58:19 +02:00
|
|
|
} else {
|
2007-07-10 04:58:23 +02:00
|
|
|
e->tree = subtree;
|
2006-08-28 18:22:50 +02:00
|
|
|
e->versions[1].mode = mode;
|
2017-05-07 00:09:56 +02:00
|
|
|
oidcpy(&e->versions[1].oid, oid);
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
2017-05-01 04:29:03 +02:00
|
|
|
oidclr(&root->versions[1].oid);
|
2006-08-14 06:58:19 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2007-07-10 04:58:23 +02:00
|
|
|
static int tree_content_remove(
|
|
|
|
struct tree_entry *root,
|
|
|
|
const char *p,
|
2013-06-23 16:58:22 +02:00
|
|
|
struct tree_entry *backup_leaf,
|
|
|
|
int allow_root)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
2010-10-18 03:03:38 +02:00
|
|
|
struct tree_content *t;
|
2006-08-14 06:58:19 +02:00
|
|
|
const char *slash1;
|
|
|
|
unsigned int i, n;
|
|
|
|
struct tree_entry *e;
|
|
|
|
|
2014-03-08 07:48:31 +01:00
|
|
|
slash1 = strchrnul(p, '/');
|
|
|
|
n = slash1 - p;
|
2006-08-14 06:58:19 +02:00
|
|
|
|
2010-10-18 03:03:38 +02:00
|
|
|
if (!root->tree)
|
|
|
|
load_tree(root);
|
2013-06-23 16:58:22 +02:00
|
|
|
|
|
|
|
if (!*p && allow_root) {
|
|
|
|
e = root;
|
|
|
|
goto del_entry;
|
|
|
|
}
|
|
|
|
|
2010-10-18 03:03:38 +02:00
|
|
|
t = root->tree;
|
2006-08-14 06:58:19 +02:00
|
|
|
for (i = 0; i < t->entry_count; i++) {
|
|
|
|
e = t->entries[i];
|
2016-04-22 15:01:24 +02:00
|
|
|
if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
|
2014-03-08 07:48:31 +01:00
|
|
|
if (*slash1 && !S_ISDIR(e->versions[1].mode))
|
2010-07-09 15:10:56 +02:00
|
|
|
/*
|
|
|
|
* If p names a file in some subdirectory, and a
|
|
|
|
* file or symlink matching the name of the
|
|
|
|
* parent directory of p exists, then p cannot
|
|
|
|
* exist and need not be deleted.
|
|
|
|
*/
|
|
|
|
return 1;
|
2014-03-08 07:48:31 +01:00
|
|
|
if (!*slash1 || !S_ISDIR(e->versions[1].mode))
|
2006-08-14 06:58:19 +02:00
|
|
|
goto del_entry;
|
|
|
|
if (!e->tree)
|
|
|
|
load_tree(e);
|
2013-06-23 16:58:22 +02:00
|
|
|
if (tree_content_remove(e, slash1 + 1, backup_leaf, 0)) {
|
2006-08-29 03:43:04 +02:00
|
|
|
for (n = 0; n < e->tree->entry_count; n++) {
|
|
|
|
if (e->tree->entries[n]->versions[1].mode) {
|
2017-05-01 04:29:03 +02:00
|
|
|
oidclr(&root->versions[1].oid);
|
2006-08-29 03:43:04 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
2007-07-10 04:58:23 +02:00
|
|
|
backup_leaf = NULL;
|
2006-08-29 03:43:04 +02:00
|
|
|
goto del_entry;
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
del_entry:
|
2007-07-10 04:58:23 +02:00
|
|
|
if (backup_leaf)
|
|
|
|
memcpy(backup_leaf, e, sizeof(*backup_leaf));
|
|
|
|
else if (e->tree)
|
2006-08-28 18:22:50 +02:00
|
|
|
release_tree_content_recursive(e->tree);
|
2007-07-10 04:58:23 +02:00
|
|
|
e->tree = NULL;
|
2006-08-28 18:22:50 +02:00
|
|
|
e->versions[1].mode = 0;
|
2017-05-01 04:29:03 +02:00
|
|
|
oidclr(&e->versions[1].oid);
|
|
|
|
oidclr(&root->versions[1].oid);
|
2006-08-08 06:46:13 +02:00
|
|
|
return 1;
|
2006-08-05 08:04:21 +02:00
|
|
|
}
|
|
|
|
|
2007-07-15 07:40:37 +02:00
|
|
|
static int tree_content_get(
|
|
|
|
struct tree_entry *root,
|
|
|
|
const char *p,
|
2013-06-23 16:58:21 +02:00
|
|
|
struct tree_entry *leaf,
|
|
|
|
int allow_root)
|
2007-07-15 07:40:37 +02:00
|
|
|
{
|
2010-10-18 03:03:38 +02:00
|
|
|
struct tree_content *t;
|
2007-07-15 07:40:37 +02:00
|
|
|
const char *slash1;
|
|
|
|
unsigned int i, n;
|
|
|
|
struct tree_entry *e;
|
|
|
|
|
2014-03-08 07:48:31 +01:00
|
|
|
slash1 = strchrnul(p, '/');
|
|
|
|
n = slash1 - p;
|
2013-06-23 16:58:21 +02:00
|
|
|
if (!n && !allow_root)
|
2012-03-10 05:07:22 +01:00
|
|
|
die("Empty path component found in input");
|
2007-07-15 07:40:37 +02:00
|
|
|
|
2010-10-18 03:03:38 +02:00
|
|
|
if (!root->tree)
|
|
|
|
load_tree(root);
|
2013-06-23 16:58:21 +02:00
|
|
|
|
|
|
|
if (!n) {
|
|
|
|
e = root;
|
|
|
|
goto found_entry;
|
|
|
|
}
|
|
|
|
|
2010-10-18 03:03:38 +02:00
|
|
|
t = root->tree;
|
2007-07-15 07:40:37 +02:00
|
|
|
for (i = 0; i < t->entry_count; i++) {
|
|
|
|
e = t->entries[i];
|
2016-04-22 15:01:24 +02:00
|
|
|
if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
|
2014-03-08 07:48:31 +01:00
|
|
|
if (!*slash1)
|
2013-06-23 16:58:21 +02:00
|
|
|
goto found_entry;
|
2007-07-15 07:40:37 +02:00
|
|
|
if (!S_ISDIR(e->versions[1].mode))
|
|
|
|
return 0;
|
|
|
|
if (!e->tree)
|
|
|
|
load_tree(e);
|
2013-06-23 16:58:21 +02:00
|
|
|
return tree_content_get(e, slash1 + 1, leaf, 0);
|
2007-07-15 07:40:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2013-06-23 16:58:21 +02:00
|
|
|
|
|
|
|
found_entry:
|
|
|
|
memcpy(leaf, e, sizeof(*leaf));
|
2017-05-01 04:29:03 +02:00
|
|
|
if (e->tree && is_null_oid(&e->versions[1].oid))
|
2013-06-23 16:58:21 +02:00
|
|
|
leaf->tree = dup_tree_content(e->tree);
|
|
|
|
else
|
|
|
|
leaf->tree = NULL;
|
|
|
|
return 1;
|
2007-07-15 07:40:37 +02:00
|
|
|
}
|
|
|
|
|
2007-02-06 22:08:06 +01:00
|
|
|
static int update_branch(struct branch *b)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
|
|
|
static const char *msg = "fast-import";
|
2014-04-17 01:21:13 +02:00
|
|
|
struct ref_transaction *transaction;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id old_oid;
|
2014-04-17 01:21:13 +02:00
|
|
|
struct strbuf err = STRBUF_INIT;
|
2007-02-06 22:08:06 +01:00
|
|
|
|
2017-05-01 04:29:03 +02:00
|
|
|
if (is_null_oid(&b->oid)) {
|
2014-04-20 20:59:27 +02:00
|
|
|
if (b->delete)
|
2017-02-21 02:10:32 +01:00
|
|
|
delete_ref(NULL, b->name, NULL, 0);
|
2014-04-20 20:59:27 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2017-10-16 00:06:56 +02:00
|
|
|
if (read_ref(b->name, &old_oid))
|
2017-05-07 00:09:56 +02:00
|
|
|
oidclr(&old_oid);
|
|
|
|
if (!force_update && !is_null_oid(&old_oid)) {
|
2007-02-06 22:08:06 +01:00
|
|
|
struct commit *old_cmit, *new_cmit;
|
|
|
|
|
2018-06-29 03:21:57 +02:00
|
|
|
old_cmit = lookup_commit_reference_gently(the_repository,
|
|
|
|
&old_oid, 0);
|
|
|
|
new_cmit = lookup_commit_reference_gently(the_repository,
|
|
|
|
&b->oid, 0);
|
2014-04-17 01:21:13 +02:00
|
|
|
if (!old_cmit || !new_cmit)
|
2007-02-06 22:08:06 +01:00
|
|
|
return error("Branch %s is missing commits.", b->name);
|
|
|
|
|
2012-08-27 23:46:01 +02:00
|
|
|
if (!in_merge_bases(old_cmit, new_cmit)) {
|
2007-03-31 01:07:05 +02:00
|
|
|
warning("Not updating %s"
|
2007-02-06 22:08:06 +01:00
|
|
|
" (new tip %s does not contain %s)",
|
2017-05-01 04:29:03 +02:00
|
|
|
b->name, oid_to_hex(&b->oid),
|
2017-05-07 00:09:56 +02:00
|
|
|
oid_to_hex(&old_oid));
|
2007-02-06 22:08:06 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2014-04-17 01:21:13 +02:00
|
|
|
transaction = ref_transaction_begin(&err);
|
|
|
|
if (!transaction ||
|
2017-10-16 00:06:53 +02:00
|
|
|
ref_transaction_update(transaction, b->name, &b->oid, &old_oid,
|
2015-02-17 18:00:15 +01:00
|
|
|
0, msg, &err) ||
|
2014-04-30 21:22:42 +02:00
|
|
|
ref_transaction_commit(transaction, &err)) {
|
2014-04-17 01:21:13 +02:00
|
|
|
ref_transaction_free(transaction);
|
|
|
|
error("%s", err.buf);
|
|
|
|
strbuf_release(&err);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
ref_transaction_free(transaction);
|
|
|
|
strbuf_release(&err);
|
2007-02-06 22:08:06 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_branches(void)
|
|
|
|
{
|
2006-08-14 06:58:19 +02:00
|
|
|
unsigned int i;
|
|
|
|
struct branch *b;
|
|
|
|
|
|
|
|
for (i = 0; i < branch_table_sz; i++) {
|
2007-02-06 22:08:06 +01:00
|
|
|
for (b = branch_table[i]; b; b = b->table_next_branch)
|
|
|
|
failure |= update_branch(b);
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-01-17 07:47:25 +01:00
|
|
|
static void dump_tags(void)
|
2006-08-24 09:12:13 +02:00
|
|
|
{
|
|
|
|
static const char *msg = "fast-import";
|
|
|
|
struct tag *t;
|
2014-04-29 00:23:58 +02:00
|
|
|
struct strbuf ref_name = STRBUF_INIT;
|
|
|
|
struct strbuf err = STRBUF_INIT;
|
|
|
|
struct ref_transaction *transaction;
|
2006-08-24 09:12:13 +02:00
|
|
|
|
2014-04-29 00:23:58 +02:00
|
|
|
transaction = ref_transaction_begin(&err);
|
|
|
|
if (!transaction) {
|
|
|
|
failure |= error("%s", err.buf);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2006-08-24 09:12:13 +02:00
|
|
|
for (t = first_tag; t; t = t->next_tag) {
|
2014-04-29 00:23:58 +02:00
|
|
|
strbuf_reset(&ref_name);
|
|
|
|
strbuf_addf(&ref_name, "refs/tags/%s", t->name);
|
|
|
|
|
2015-02-17 18:00:15 +01:00
|
|
|
if (ref_transaction_update(transaction, ref_name.buf,
|
2017-10-16 00:06:53 +02:00
|
|
|
&t->oid, NULL, 0, msg, &err)) {
|
2014-04-29 00:23:58 +02:00
|
|
|
failure |= error("%s", err.buf);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2006-08-24 09:12:13 +02:00
|
|
|
}
|
2014-04-30 21:22:42 +02:00
|
|
|
if (ref_transaction_commit(transaction, &err))
|
2014-04-29 00:23:58 +02:00
|
|
|
failure |= error("%s", err.buf);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
ref_transaction_free(transaction);
|
|
|
|
strbuf_release(&ref_name);
|
|
|
|
strbuf_release(&err);
|
2006-08-24 09:12:13 +02:00
|
|
|
}
|
|
|
|
|
2007-01-17 07:47:25 +01:00
|
|
|
static void dump_marks(void)
|
2006-08-25 22:03:04 +02:00
|
|
|
{
|
2018-05-09 22:55:38 +02:00
|
|
|
struct lock_file mark_lock = LOCK_INIT;
|
2007-03-08 00:05:38 +01:00
|
|
|
FILE *f;
|
|
|
|
|
2016-05-17 23:40:23 +02:00
|
|
|
if (!export_marks_file || (import_marks_file && !import_marks_file_done))
|
2007-03-08 00:05:38 +01:00
|
|
|
return;
|
|
|
|
|
fast-import: delay creating leading directories for export-marks
When we parse the --export-marks option, we don't immediately open the
file, but we do create any leading directories. This can be especially
confusing when a command-line option overrides an in-stream one, in
which case we'd create the leading directory for the in-stream file,
even though we never actually write the file.
Let's instead create the directories just before opening the file, which
means we'll create only useful directories. Note that this could change
the handling of relative paths if we chdir() in between, but we don't
actually do so; the only permanent chdir is from setup_git_directory()
which runs before either code path (potentially we should take the
pre-setup dir into account to avoid surprising the user, but that's an
orthogonal change).
The test just adapts the existing "override" test to use paths with
leading directories. This checks both that the correct directory is
created (which worked before but was not tested), and that the
overridden one is not (our new fix here).
While we're here, let's also check the error result of
safe_create_leading_directories(). We'd presumably notice any failure
immediately after when we try to open the file itself, but we can give a
more specific error message in this case.
Signed-off-by: Jeff King <peff@peff.net>
2019-08-29 19:33:48 +02:00
|
|
|
if (safe_create_leading_directories_const(export_marks_file)) {
|
|
|
|
failure |= error_errno("unable to create leading directories of %s",
|
|
|
|
export_marks_file);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-10-01 13:14:48 +02:00
|
|
|
if (hold_lock_file_for_update(&mark_lock, export_marks_file, 0) < 0) {
|
2016-05-08 11:47:45 +02:00
|
|
|
failure |= error_errno("Unable to write marks file %s",
|
|
|
|
export_marks_file);
|
2007-03-08 00:05:38 +01:00
|
|
|
return;
|
2006-08-25 22:03:04 +02:00
|
|
|
}
|
2007-03-08 00:05:38 +01:00
|
|
|
|
2014-10-01 13:14:48 +02:00
|
|
|
f = fdopen_lock_file(&mark_lock, "w");
|
2007-03-08 00:05:38 +01:00
|
|
|
if (!f) {
|
2008-01-18 19:35:49 +01:00
|
|
|
int saved_errno = errno;
|
2007-03-08 00:05:38 +01:00
|
|
|
rollback_lock_file(&mark_lock);
|
|
|
|
failure |= error("Unable to write marks file %s: %s",
|
2009-12-04 18:06:55 +01:00
|
|
|
export_marks_file, strerror(saved_errno));
|
2007-03-08 00:05:38 +01:00
|
|
|
return;
|
2006-08-25 22:03:04 +02:00
|
|
|
}
|
2007-03-08 00:05:38 +01:00
|
|
|
|
2020-02-22 21:17:48 +01:00
|
|
|
for_each_mark(marks, 0, dump_marks_fn, f);
|
2008-01-17 17:58:34 +01:00
|
|
|
if (commit_lock_file(&mark_lock)) {
|
2016-05-08 11:47:45 +02:00
|
|
|
failure |= error_errno("Unable to write file %s",
|
|
|
|
export_marks_file);
|
2008-01-17 17:58:34 +01:00
|
|
|
return;
|
|
|
|
}
|
2006-08-25 22:03:04 +02:00
|
|
|
}
|
|
|
|
|
2020-02-22 21:17:46 +01:00
|
|
|
static void insert_object_entry(struct mark_set *s, struct object_id *oid, uintmax_t mark)
|
|
|
|
{
|
|
|
|
struct object_entry *e;
|
|
|
|
e = find_object(oid);
|
|
|
|
if (!e) {
|
|
|
|
enum object_type type = oid_object_info(the_repository,
|
|
|
|
oid, NULL);
|
|
|
|
if (type < 0)
|
|
|
|
die("object not found: %s", oid_to_hex(oid));
|
|
|
|
e = insert_object(oid);
|
|
|
|
e->type = type;
|
|
|
|
e->pack_id = MAX_PACK_ID;
|
|
|
|
e->idx.offset = 1; /* just not zero! */
|
|
|
|
}
|
|
|
|
insert_mark(s, mark, e);
|
|
|
|
}
|
|
|
|
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
static void insert_oid_entry(struct mark_set *s, struct object_id *oid, uintmax_t mark)
|
|
|
|
{
|
|
|
|
insert_mark(s, mark, xmemdupz(oid, sizeof(*oid)));
|
|
|
|
}
|
|
|
|
|
2020-02-22 21:17:46 +01:00
|
|
|
static void read_mark_file(struct mark_set *s, FILE *f, mark_set_inserter_t inserter)
|
2009-12-04 18:06:55 +01:00
|
|
|
{
|
|
|
|
char line[512];
|
|
|
|
while (fgets(line, sizeof(line), f)) {
|
|
|
|
uintmax_t mark;
|
|
|
|
char *end;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid;
|
2009-12-04 18:06:55 +01:00
|
|
|
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
/* Ensure SHA-1 objects are padded with zeros. */
|
|
|
|
memset(oid.hash, 0, sizeof(oid.hash));
|
|
|
|
|
2009-12-04 18:06:55 +01:00
|
|
|
end = strchr(line, '\n');
|
|
|
|
if (line[0] != ':' || !end)
|
|
|
|
die("corrupt mark line: %s", line);
|
|
|
|
*end = 0;
|
|
|
|
mark = strtoumax(line + 1, &end, 10);
|
|
|
|
if (!mark || end == line + 1
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
|| *end != ' '
|
|
|
|
|| get_oid_hex_any(end + 1, &oid) == GIT_HASH_UNKNOWN)
|
2009-12-04 18:06:55 +01:00
|
|
|
die("corrupt mark line: %s", line);
|
2020-02-22 21:17:46 +01:00
|
|
|
inserter(s, &oid, mark);
|
2009-12-04 18:06:55 +01:00
|
|
|
}
|
2020-02-22 21:17:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void read_marks(void)
|
|
|
|
{
|
|
|
|
FILE *f = fopen(import_marks_file, "r");
|
|
|
|
if (f)
|
|
|
|
;
|
|
|
|
else if (import_marks_file_ignore_missing && errno == ENOENT)
|
|
|
|
goto done; /* Marks file does not exist */
|
|
|
|
else
|
|
|
|
die_errno("cannot read '%s'", import_marks_file);
|
2020-02-22 21:17:46 +01:00
|
|
|
read_mark_file(marks, f, insert_object_entry);
|
2009-12-04 18:06:55 +01:00
|
|
|
fclose(f);
|
2016-05-17 23:40:23 +02:00
|
|
|
done:
|
|
|
|
import_marks_file_done = 1;
|
2009-12-04 18:06:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-17 11:19:04 +02:00
|
|
|
static int read_next_command(void)
|
2006-08-15 02:16:28 +02:00
|
|
|
{
|
2007-09-17 11:19:04 +02:00
|
|
|
static int stdin_eof = 0;
|
|
|
|
|
|
|
|
if (stdin_eof) {
|
|
|
|
unread_command_buf = 0;
|
|
|
|
return EOF;
|
|
|
|
}
|
|
|
|
|
2010-11-28 20:45:58 +01:00
|
|
|
for (;;) {
|
2007-08-03 10:47:04 +02:00
|
|
|
if (unread_command_buf) {
|
2007-08-01 08:22:53 +02:00
|
|
|
unread_command_buf = 0;
|
2007-08-03 10:47:04 +02:00
|
|
|
} else {
|
|
|
|
struct recent_command *rc;
|
|
|
|
|
2016-01-14 00:31:17 +01:00
|
|
|
stdin_eof = strbuf_getline_lf(&command_buf, stdin);
|
2007-09-17 11:19:04 +02:00
|
|
|
if (stdin_eof)
|
|
|
|
return EOF;
|
2007-08-03 10:47:04 +02:00
|
|
|
|
2009-12-04 18:06:56 +01:00
|
|
|
if (!seen_data_command
|
2013-11-30 21:55:40 +01:00
|
|
|
&& !starts_with(command_buf.buf, "feature ")
|
|
|
|
&& !starts_with(command_buf.buf, "option ")) {
|
2009-12-04 18:06:57 +01:00
|
|
|
parse_argv();
|
2009-12-04 18:06:56 +01:00
|
|
|
}
|
|
|
|
|
2007-08-03 10:47:04 +02:00
|
|
|
rc = rc_free;
|
|
|
|
if (rc)
|
|
|
|
rc_free = rc->next;
|
|
|
|
else {
|
|
|
|
rc = cmd_hist.next;
|
|
|
|
cmd_hist.next = rc->next;
|
|
|
|
cmd_hist.next->prev = &cmd_hist;
|
|
|
|
free(rc->buf);
|
|
|
|
}
|
|
|
|
|
fast-import: duplicate into history rather than passing ownership
Fast-import's read_next_command() has somewhat odd memory ownership
semantics for the command_buf strbuf. After reading a command, we copy
the strbuf's pointer (without duplicating the string) into our cmd_hist
array of recent commands. And then when we're about to read a new
command, we clear the strbuf by calling strbuf_detach(), dropping
ownership from the strbuf (leaving the cmd_hist reference as the
remaining owner).
This has a few surprising implications:
- if the strbuf hasn't been copied into cmd_hist (e.g., because we
haven't ready any commands yet), then the strbuf_detach() will leak
the resulting string
- any modification to command_buf risks invalidating the pointer held
by cmd_hist. There doesn't seem to be any way to trigger this
currently (since we tend to modify it only by detaching and reading
in a new value), but it's subtly dangerous.
- any pointers into an input string will remain valid as long as
cmd_hist points to them. So in general, you can point into
command_buf.buf and call read_next_command() up to 100 times before
your string is cycled out and freed, leaving you with a dangling
pointer. This makes it easy to miss bugs during testing, as they
might trigger only for a sufficiently large commit (e.g., the bug
fixed in the previous commit).
Instead, let's make a new string to copy the command into the history
array, rather than having dual ownership with the old. Then we can drop
the strbuf_detach() calls entirely, and just reuse the same buffer
within command_buf over and over. We'd normally have to strbuf_reset()
it before using it again, but in both cases here we're using
strbuf_getline(), which does it automatically for us.
This fixes the leak, and it means that even a single call to
read_next_command() will invalidate any held pointers, making it easier
to find bugs. In fact, we can drop the extra input lines added to the
test case by the previous commit, as the unfixed bug would now trigger
just from reading the commit message, even without any modified files in
the commit.
Reported-by: Mike Hommey <mh@glandium.org>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-25 10:10:55 +02:00
|
|
|
rc->buf = xstrdup(command_buf.buf);
|
2007-08-03 10:47:04 +02:00
|
|
|
rc->prev = cmd_tail;
|
|
|
|
rc->next = cmd_hist.prev;
|
|
|
|
rc->prev->next = rc;
|
|
|
|
cmd_tail = rc;
|
|
|
|
}
|
2010-11-28 20:45:58 +01:00
|
|
|
if (command_buf.buf[0] == '#')
|
|
|
|
continue;
|
|
|
|
return 0;
|
|
|
|
}
|
2006-08-15 02:16:28 +02:00
|
|
|
}
|
|
|
|
|
2007-08-19 11:50:18 +02:00
|
|
|
static void skip_optional_lf(void)
|
2007-08-01 06:24:25 +02:00
|
|
|
{
|
|
|
|
int term_char = fgetc(stdin);
|
|
|
|
if (term_char != '\n' && term_char != EOF)
|
|
|
|
ungetc(term_char, stdin);
|
|
|
|
}
|
|
|
|
|
2008-05-16 00:35:56 +02:00
|
|
|
static void parse_mark(void)
|
2006-08-15 02:16:28 +02:00
|
|
|
{
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
const char *v;
|
|
|
|
if (skip_prefix(command_buf.buf, "mark :", &v)) {
|
|
|
|
next_mark = strtoumax(v, NULL, 10);
|
2006-08-15 02:16:28 +02:00
|
|
|
read_next_command();
|
|
|
|
}
|
|
|
|
else
|
2006-08-23 10:17:45 +02:00
|
|
|
next_mark = 0;
|
2006-08-15 02:16:28 +02:00
|
|
|
}
|
|
|
|
|
2018-11-16 08:59:56 +01:00
|
|
|
static void parse_original_identifier(void)
|
|
|
|
{
|
|
|
|
const char *v;
|
|
|
|
if (skip_prefix(command_buf.buf, "original-oid ", &v))
|
|
|
|
read_next_command();
|
|
|
|
}
|
|
|
|
|
2010-02-01 18:27:35 +01:00
|
|
|
static int parse_data(struct strbuf *sb, uintmax_t limit, uintmax_t *len_res)
|
2006-08-15 02:16:28 +02:00
|
|
|
{
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
const char *data;
|
2007-09-17 13:48:17 +02:00
|
|
|
strbuf_reset(sb);
|
2006-08-15 02:16:28 +02:00
|
|
|
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
if (!skip_prefix(command_buf.buf, "data ", &data))
|
2006-08-15 02:16:28 +02:00
|
|
|
die("Expected 'data n' command, found: %s", command_buf.buf);
|
|
|
|
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
if (skip_prefix(data, "<<", &data)) {
|
|
|
|
char *term = xstrdup(data);
|
|
|
|
size_t term_len = command_buf.len - (data - command_buf.buf);
|
2007-09-06 13:20:07 +02:00
|
|
|
|
2007-01-18 19:14:27 +01:00
|
|
|
for (;;) {
|
2016-01-14 00:31:17 +01:00
|
|
|
if (strbuf_getline_lf(&command_buf, stdin) == EOF)
|
2007-01-18 19:14:27 +01:00
|
|
|
die("EOF in data (terminator '%s' not found)", term);
|
|
|
|
if (term_len == command_buf.len
|
|
|
|
&& !strcmp(term, command_buf.buf))
|
|
|
|
break;
|
2007-09-17 13:48:17 +02:00
|
|
|
strbuf_addbuf(sb, &command_buf);
|
|
|
|
strbuf_addch(sb, '\n');
|
2007-01-18 19:14:27 +01:00
|
|
|
}
|
|
|
|
free(term);
|
|
|
|
}
|
|
|
|
else {
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
uintmax_t len = strtoumax(data, NULL, 10);
|
2010-02-01 18:27:35 +01:00
|
|
|
size_t n = 0, length = (size_t)len;
|
2007-09-06 13:20:07 +02:00
|
|
|
|
2010-02-01 18:27:35 +01:00
|
|
|
if (limit && limit < len) {
|
|
|
|
*len_res = len;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (length < len)
|
|
|
|
die("data is too large to use in this context");
|
2007-09-06 13:20:07 +02:00
|
|
|
|
2007-01-18 19:14:27 +01:00
|
|
|
while (n < length) {
|
2007-09-17 13:48:17 +02:00
|
|
|
size_t s = strbuf_fread(sb, length - n, stdin);
|
2007-01-18 19:14:27 +01:00
|
|
|
if (!s && feof(stdin))
|
2007-02-07 12:38:21 +01:00
|
|
|
die("EOF in data (%lu bytes remaining)",
|
|
|
|
(unsigned long)(length - n));
|
2007-01-18 19:14:27 +01:00
|
|
|
n += s;
|
|
|
|
}
|
2006-08-15 02:16:28 +02:00
|
|
|
}
|
|
|
|
|
2007-08-01 06:24:25 +02:00
|
|
|
skip_optional_lf();
|
2010-02-01 18:27:35 +01:00
|
|
|
return 1;
|
2006-08-15 02:16:28 +02:00
|
|
|
}
|
|
|
|
|
2020-05-30 22:25:57 +02:00
|
|
|
static int validate_raw_date(const char *src, struct strbuf *result, int strict)
|
2007-02-06 20:58:30 +01:00
|
|
|
{
|
|
|
|
const char *orig_src = src;
|
2009-03-07 21:02:10 +01:00
|
|
|
char *endp;
|
2009-09-29 08:40:09 +02:00
|
|
|
unsigned long num;
|
2007-02-06 20:58:30 +01:00
|
|
|
|
2008-12-21 02:28:48 +01:00
|
|
|
errno = 0;
|
|
|
|
|
2009-09-29 08:40:09 +02:00
|
|
|
num = strtoul(src, &endp, 10);
|
2020-05-30 22:25:57 +02:00
|
|
|
/*
|
|
|
|
* NEEDSWORK: perhaps check for reasonable values? For example, we
|
|
|
|
* could error on values representing times more than a
|
|
|
|
* day in the future.
|
|
|
|
*/
|
2008-12-21 02:28:48 +01:00
|
|
|
if (errno || endp == src || *endp != ' ')
|
2007-02-06 20:58:30 +01:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
src = endp + 1;
|
|
|
|
if (*src != '-' && *src != '+')
|
|
|
|
return -1;
|
|
|
|
|
2009-09-29 08:40:09 +02:00
|
|
|
num = strtoul(src + 1, &endp, 10);
|
2020-05-30 22:25:57 +02:00
|
|
|
/*
|
|
|
|
* NEEDSWORK: check for brokenness other than num > 1400, such as
|
|
|
|
* (num % 100) >= 60, or ((num % 100) % 15) != 0 ?
|
|
|
|
*/
|
|
|
|
if (errno || endp == src + 1 || *endp || /* did not parse */
|
|
|
|
(strict && (1400 < num)) /* parsed a broken timezone */
|
|
|
|
)
|
2007-02-06 20:58:30 +01:00
|
|
|
return -1;
|
|
|
|
|
2014-08-27 09:57:08 +02:00
|
|
|
strbuf_addstr(result, orig_src);
|
2007-02-06 20:58:30 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char *parse_ident(const char *buf)
|
|
|
|
{
|
2011-08-11 12:21:08 +02:00
|
|
|
const char *ltgt;
|
2007-02-06 20:58:30 +01:00
|
|
|
size_t name_len;
|
2014-08-27 09:57:08 +02:00
|
|
|
struct strbuf ident = STRBUF_INIT;
|
2007-02-06 20:58:30 +01:00
|
|
|
|
2011-08-11 12:21:07 +02:00
|
|
|
/* ensure there is a space delimiter even if there is no name */
|
|
|
|
if (*buf == '<')
|
|
|
|
--buf;
|
|
|
|
|
2011-08-11 12:21:08 +02:00
|
|
|
ltgt = buf + strcspn(buf, "<>");
|
|
|
|
if (*ltgt != '<')
|
|
|
|
die("Missing < in ident string: %s", buf);
|
|
|
|
if (ltgt != buf && ltgt[-1] != ' ')
|
|
|
|
die("Missing space before < in ident string: %s", buf);
|
|
|
|
ltgt = ltgt + 1 + strcspn(ltgt + 1, "<>");
|
|
|
|
if (*ltgt != '>')
|
2007-02-06 20:58:30 +01:00
|
|
|
die("Missing > in ident string: %s", buf);
|
2011-08-11 12:21:08 +02:00
|
|
|
ltgt++;
|
|
|
|
if (*ltgt != ' ')
|
2007-02-06 20:58:30 +01:00
|
|
|
die("Missing space after > in ident string: %s", buf);
|
2011-08-11 12:21:08 +02:00
|
|
|
ltgt++;
|
|
|
|
name_len = ltgt - buf;
|
2014-08-27 09:57:08 +02:00
|
|
|
strbuf_add(&ident, buf, name_len);
|
2007-02-06 20:58:30 +01:00
|
|
|
|
|
|
|
switch (whenspec) {
|
|
|
|
case WHENSPEC_RAW:
|
2020-05-30 22:25:57 +02:00
|
|
|
if (validate_raw_date(ltgt, &ident, 1) < 0)
|
|
|
|
die("Invalid raw date \"%s\" in ident: %s", ltgt, buf);
|
|
|
|
break;
|
|
|
|
case WHENSPEC_RAW_PERMISSIVE:
|
|
|
|
if (validate_raw_date(ltgt, &ident, 0) < 0)
|
2011-08-11 12:21:08 +02:00
|
|
|
die("Invalid raw date \"%s\" in ident: %s", ltgt, buf);
|
2007-02-06 20:58:30 +01:00
|
|
|
break;
|
|
|
|
case WHENSPEC_RFC2822:
|
2014-08-27 09:57:08 +02:00
|
|
|
if (parse_date(ltgt, &ident) < 0)
|
2011-08-11 12:21:08 +02:00
|
|
|
die("Invalid rfc2822 date \"%s\" in ident: %s", ltgt, buf);
|
2007-02-06 20:58:30 +01:00
|
|
|
break;
|
|
|
|
case WHENSPEC_NOW:
|
2011-08-11 12:21:08 +02:00
|
|
|
if (strcmp("now", ltgt))
|
2007-02-06 20:58:30 +01:00
|
|
|
die("Date in ident must be 'now': %s", buf);
|
2014-08-27 09:57:08 +02:00
|
|
|
datestamp(&ident);
|
2007-02-06 20:58:30 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-08-27 09:57:08 +02:00
|
|
|
return strbuf_detach(&ident, NULL);
|
2007-02-06 20:58:30 +01:00
|
|
|
}
|
|
|
|
|
2010-02-01 18:27:35 +01:00
|
|
|
static void parse_and_store_blob(
|
|
|
|
struct last_object *last,
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id *oidout,
|
2010-02-01 18:27:35 +01:00
|
|
|
uintmax_t mark)
|
2006-08-08 07:14:21 +02:00
|
|
|
{
|
2007-09-17 14:00:38 +02:00
|
|
|
static struct strbuf buf = STRBUF_INIT;
|
2010-02-01 18:27:35 +01:00
|
|
|
uintmax_t len;
|
2006-08-15 02:16:28 +02:00
|
|
|
|
2010-02-01 18:27:35 +01:00
|
|
|
if (parse_data(&buf, big_file_threshold, &len))
|
2017-05-07 00:09:56 +02:00
|
|
|
store_object(OBJ_BLOB, &buf, last, oidout, mark);
|
2010-02-01 18:27:35 +01:00
|
|
|
else {
|
|
|
|
if (last) {
|
|
|
|
strbuf_release(&last->data);
|
|
|
|
last->offset = 0;
|
|
|
|
last->depth = 0;
|
|
|
|
}
|
2017-05-07 00:09:56 +02:00
|
|
|
stream_blob(len, oidout, mark);
|
2010-02-01 18:27:35 +01:00
|
|
|
skip_optional_lf();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void parse_new_blob(void)
|
|
|
|
{
|
2006-08-15 02:16:28 +02:00
|
|
|
read_next_command();
|
2008-05-16 00:35:56 +02:00
|
|
|
parse_mark();
|
2018-11-16 08:59:56 +01:00
|
|
|
parse_original_identifier();
|
2010-02-01 18:27:35 +01:00
|
|
|
parse_and_store_blob(&last_blob, NULL, next_mark);
|
2006-08-08 07:14:21 +02:00
|
|
|
}
|
|
|
|
|
2007-01-17 07:47:25 +01:00
|
|
|
static void unload_one_branch(void)
|
2006-08-08 09:36:45 +02:00
|
|
|
{
|
2006-08-24 10:37:35 +02:00
|
|
|
while (cur_active_branches
|
|
|
|
&& cur_active_branches >= max_active_branches) {
|
2007-03-07 02:44:34 +01:00
|
|
|
uintmax_t min_commit = ULONG_MAX;
|
2006-08-14 06:58:19 +02:00
|
|
|
struct branch *e, *l = NULL, *p = NULL;
|
|
|
|
|
|
|
|
for (e = active_branches; e; e = e->active_next_branch) {
|
|
|
|
if (e->last_commit < min_commit) {
|
|
|
|
p = l;
|
|
|
|
min_commit = e->last_commit;
|
|
|
|
}
|
|
|
|
l = e;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p) {
|
|
|
|
e = p->active_next_branch;
|
|
|
|
p->active_next_branch = e->active_next_branch;
|
|
|
|
} else {
|
|
|
|
e = active_branches;
|
|
|
|
active_branches = e->active_next_branch;
|
|
|
|
}
|
2007-03-05 18:31:09 +01:00
|
|
|
e->active = 0;
|
2006-08-14 06:58:19 +02:00
|
|
|
e->active_next_branch = NULL;
|
|
|
|
if (e->branch_tree.tree) {
|
2006-08-23 07:33:47 +02:00
|
|
|
release_tree_content_recursive(e->branch_tree.tree);
|
2006-08-14 06:58:19 +02:00
|
|
|
e->branch_tree.tree = NULL;
|
|
|
|
}
|
|
|
|
cur_active_branches--;
|
2006-08-08 09:36:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-08-14 06:58:19 +02:00
|
|
|
static void load_branch(struct branch *b)
|
2006-08-08 09:36:45 +02:00
|
|
|
{
|
2006-08-14 06:58:19 +02:00
|
|
|
load_tree(&b->branch_tree);
|
2007-03-05 18:31:09 +01:00
|
|
|
if (!b->active) {
|
|
|
|
b->active = 1;
|
|
|
|
b->active_next_branch = active_branches;
|
|
|
|
active_branches = b;
|
|
|
|
cur_active_branches++;
|
|
|
|
branch_load_count++;
|
|
|
|
}
|
2006-08-08 09:36:45 +02:00
|
|
|
}
|
|
|
|
|
2009-12-07 12:27:24 +01:00
|
|
|
static unsigned char convert_num_notes_to_fanout(uintmax_t num_notes)
|
|
|
|
{
|
|
|
|
unsigned char fanout = 0;
|
|
|
|
while ((num_notes >>= 8))
|
|
|
|
fanout++;
|
|
|
|
return fanout;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void construct_path_with_fanout(const char *hex_sha1,
|
|
|
|
unsigned char fanout, char *path)
|
|
|
|
{
|
|
|
|
unsigned int i = 0, j = 0;
|
2018-02-01 03:18:42 +01:00
|
|
|
if (fanout >= the_hash_algo->rawsz)
|
2009-12-07 12:27:24 +01:00
|
|
|
die("Too large fanout (%u)", fanout);
|
|
|
|
while (fanout) {
|
|
|
|
path[i++] = hex_sha1[j++];
|
|
|
|
path[i++] = hex_sha1[j++];
|
|
|
|
path[i++] = '/';
|
|
|
|
fanout--;
|
|
|
|
}
|
2018-02-01 03:18:42 +01:00
|
|
|
memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
|
|
|
|
path[i + the_hash_algo->hexsz - j] = '\0';
|
2009-12-07 12:27:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static uintmax_t do_change_note_fanout(
|
|
|
|
struct tree_entry *orig_root, struct tree_entry *root,
|
2017-05-07 00:09:56 +02:00
|
|
|
char *hex_oid, unsigned int hex_oid_len,
|
2009-12-07 12:27:24 +01:00
|
|
|
char *fullpath, unsigned int fullpath_len,
|
|
|
|
unsigned char fanout)
|
|
|
|
{
|
fast-import: properly fanout notes when tree is imported
In typical uses of fast-import, trees are inherited from a parent
commit. In that case, the tree_entry for the branch looks like:
.versions[1].sha1 = $some_sha1
.tree = <tree structure loaded from $some_sha1>
However, when trees are imported, rather than inherited, that is not the
case. One can import a tree with a filemodify command, replacing the
root tree object.
e.g.
"M 040000 $some_sha1 \n"
In this case, the tree_entry for the branch looks like:
.versions[1].sha1 = $some_sha1
.tree = NULL
When adding new notes with the notemodify command, do_change_note_fanout
is called to get a notes count, and to do so, it loops over the
tree_entry->tree, but doesn't do anything when the tree is NULL.
In the latter case above, it means do_change_note_fanout thinks the tree
contains no notes, and new notes are added with no fanout.
Interestingly, do_change_note_fanout does check whether subdirectories
have a NULL .tree, in which case it uses load_tree(). Which means the
right behaviour happens when using the filemodify command to import
subdirectories.
This change makes do_change_note_fanount call load_tree() whenever the
tree_entry it is given has no tree loaded, making all cases handled
equally.
Signed-off-by: Mike Hommey <mh@glandium.org>
Reviewed-by: Johan Herland <johan@herland.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-12-20 22:04:48 +01:00
|
|
|
struct tree_content *t;
|
2009-12-07 12:27:24 +01:00
|
|
|
struct tree_entry *e, leaf;
|
2017-05-07 00:09:56 +02:00
|
|
|
unsigned int i, tmp_hex_oid_len, tmp_fullpath_len;
|
2009-12-07 12:27:24 +01:00
|
|
|
uintmax_t num_notes = 0;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid;
|
2019-02-19 01:05:05 +01:00
|
|
|
/* hex oid + '/' between each pair of hex digits + NUL */
|
|
|
|
char realpath[GIT_MAX_HEXSZ + ((GIT_MAX_HEXSZ / 2) - 1) + 1];
|
|
|
|
const unsigned hexsz = the_hash_algo->hexsz;
|
2009-12-07 12:27:24 +01:00
|
|
|
|
fast-import: properly fanout notes when tree is imported
In typical uses of fast-import, trees are inherited from a parent
commit. In that case, the tree_entry for the branch looks like:
.versions[1].sha1 = $some_sha1
.tree = <tree structure loaded from $some_sha1>
However, when trees are imported, rather than inherited, that is not the
case. One can import a tree with a filemodify command, replacing the
root tree object.
e.g.
"M 040000 $some_sha1 \n"
In this case, the tree_entry for the branch looks like:
.versions[1].sha1 = $some_sha1
.tree = NULL
When adding new notes with the notemodify command, do_change_note_fanout
is called to get a notes count, and to do so, it loops over the
tree_entry->tree, but doesn't do anything when the tree is NULL.
In the latter case above, it means do_change_note_fanout thinks the tree
contains no notes, and new notes are added with no fanout.
Interestingly, do_change_note_fanout does check whether subdirectories
have a NULL .tree, in which case it uses load_tree(). Which means the
right behaviour happens when using the filemodify command to import
subdirectories.
This change makes do_change_note_fanount call load_tree() whenever the
tree_entry it is given has no tree loaded, making all cases handled
equally.
Signed-off-by: Mike Hommey <mh@glandium.org>
Reviewed-by: Johan Herland <johan@herland.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-12-20 22:04:48 +01:00
|
|
|
if (!root->tree)
|
|
|
|
load_tree(root);
|
|
|
|
t = root->tree;
|
|
|
|
|
2009-12-07 12:27:24 +01:00
|
|
|
for (i = 0; t && i < t->entry_count; i++) {
|
|
|
|
e = t->entries[i];
|
2017-05-07 00:09:56 +02:00
|
|
|
tmp_hex_oid_len = hex_oid_len + e->name->str_len;
|
2009-12-07 12:27:24 +01:00
|
|
|
tmp_fullpath_len = fullpath_len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're interested in EITHER existing note entries (entries
|
|
|
|
* with exactly 40 hex chars in path, not including directory
|
|
|
|
* separators), OR directory entries that may contain note
|
|
|
|
* entries (with < 40 hex chars in path).
|
|
|
|
* Also, each path component in a note entry must be a multiple
|
|
|
|
* of 2 chars.
|
|
|
|
*/
|
|
|
|
if (!e->versions[1].mode ||
|
2019-02-19 01:05:05 +01:00
|
|
|
tmp_hex_oid_len > hexsz ||
|
2009-12-07 12:27:24 +01:00
|
|
|
e->name->str_len % 2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* This _may_ be a note entry, or a subdir containing notes */
|
2017-05-07 00:09:56 +02:00
|
|
|
memcpy(hex_oid + hex_oid_len, e->name->str_dat,
|
2009-12-07 12:27:24 +01:00
|
|
|
e->name->str_len);
|
|
|
|
if (tmp_fullpath_len)
|
|
|
|
fullpath[tmp_fullpath_len++] = '/';
|
|
|
|
memcpy(fullpath + tmp_fullpath_len, e->name->str_dat,
|
|
|
|
e->name->str_len);
|
|
|
|
tmp_fullpath_len += e->name->str_len;
|
|
|
|
fullpath[tmp_fullpath_len] = '\0';
|
|
|
|
|
2019-02-19 01:05:05 +01:00
|
|
|
if (tmp_hex_oid_len == hexsz && !get_oid_hex(hex_oid, &oid)) {
|
2009-12-07 12:27:24 +01:00
|
|
|
/* This is a note entry */
|
2011-11-25 01:09:47 +01:00
|
|
|
if (fanout == 0xff) {
|
|
|
|
/* Counting mode, no rename */
|
|
|
|
num_notes++;
|
|
|
|
continue;
|
|
|
|
}
|
2017-05-07 00:09:56 +02:00
|
|
|
construct_path_with_fanout(hex_oid, fanout, realpath);
|
2009-12-07 12:27:24 +01:00
|
|
|
if (!strcmp(fullpath, realpath)) {
|
|
|
|
/* Note entry is in correct location */
|
|
|
|
num_notes++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Rename fullpath to realpath */
|
2013-06-23 16:58:22 +02:00
|
|
|
if (!tree_content_remove(orig_root, fullpath, &leaf, 0))
|
2009-12-07 12:27:24 +01:00
|
|
|
die("Failed to remove path %s", fullpath);
|
|
|
|
tree_content_set(orig_root, realpath,
|
2017-05-07 00:09:56 +02:00
|
|
|
&leaf.versions[1].oid,
|
2009-12-07 12:27:24 +01:00
|
|
|
leaf.versions[1].mode,
|
|
|
|
leaf.tree);
|
|
|
|
} else if (S_ISDIR(e->versions[1].mode)) {
|
|
|
|
/* This is a subdir that may contain note entries */
|
|
|
|
num_notes += do_change_note_fanout(orig_root, e,
|
2017-05-07 00:09:56 +02:00
|
|
|
hex_oid, tmp_hex_oid_len,
|
2009-12-07 12:27:24 +01:00
|
|
|
fullpath, tmp_fullpath_len, fanout);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The above may have reallocated the current tree_content */
|
|
|
|
t = root->tree;
|
|
|
|
}
|
|
|
|
return num_notes;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uintmax_t change_note_fanout(struct tree_entry *root,
|
|
|
|
unsigned char fanout)
|
|
|
|
{
|
2017-05-07 00:09:56 +02:00
|
|
|
/*
|
|
|
|
* The size of path is due to one slash between every two hex digits,
|
|
|
|
* plus the terminating NUL. Note that there is no slash at the end, so
|
|
|
|
* the number of slashes is one less than half the number of hex
|
|
|
|
* characters.
|
|
|
|
*/
|
|
|
|
char hex_oid[GIT_MAX_HEXSZ], path[GIT_MAX_HEXSZ + (GIT_MAX_HEXSZ / 2) - 1 + 1];
|
|
|
|
return do_change_note_fanout(root, root, hex_oid, 0, path, 0, fanout);
|
2009-12-07 12:27:24 +01:00
|
|
|
}
|
|
|
|
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
static int parse_mapped_oid_hex(const char *hex, struct object_id *oid, const char **end)
|
|
|
|
{
|
|
|
|
int algo;
|
|
|
|
khiter_t it;
|
|
|
|
|
|
|
|
/* Make SHA-1 object IDs have all-zero padding. */
|
|
|
|
memset(oid->hash, 0, sizeof(oid->hash));
|
|
|
|
|
|
|
|
algo = parse_oid_hex_any(hex, oid, end);
|
|
|
|
if (algo == GIT_HASH_UNKNOWN)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
it = kh_get_oid_map(sub_oid_map, *oid);
|
|
|
|
/* No such object? */
|
|
|
|
if (it == kh_end(sub_oid_map)) {
|
|
|
|
/* If we're using the same algorithm, pass it through. */
|
|
|
|
if (hash_algos[algo].format_id == the_hash_algo->format_id)
|
|
|
|
return 0;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
oidcpy(oid, kh_value(sub_oid_map, it));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-08 00:59:20 +02:00
|
|
|
/*
|
|
|
|
* Given a pointer into a string, parse a mark reference:
|
|
|
|
*
|
|
|
|
* idnum ::= ':' bigint;
|
|
|
|
*
|
|
|
|
* Return the first character after the value in *endptr.
|
|
|
|
*
|
|
|
|
* Complain if the following character is not what is expected,
|
|
|
|
* either a space or end of the string.
|
|
|
|
*/
|
|
|
|
static uintmax_t parse_mark_ref(const char *p, char **endptr)
|
|
|
|
{
|
|
|
|
uintmax_t mark;
|
|
|
|
|
|
|
|
assert(*p == ':');
|
|
|
|
p++;
|
|
|
|
mark = strtoumax(p, endptr, 10);
|
|
|
|
if (*endptr == p)
|
|
|
|
die("No value after ':' in mark: %s", command_buf.buf);
|
|
|
|
return mark;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse the mark reference, and complain if this is not the end of
|
|
|
|
* the string.
|
|
|
|
*/
|
|
|
|
static uintmax_t parse_mark_ref_eol(const char *p)
|
|
|
|
{
|
|
|
|
char *end;
|
|
|
|
uintmax_t mark;
|
|
|
|
|
|
|
|
mark = parse_mark_ref(p, &end);
|
|
|
|
if (*end != '\0')
|
|
|
|
die("Garbage after mark: %s", command_buf.buf);
|
|
|
|
return mark;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse the mark reference, demanding a trailing space. Return a
|
|
|
|
* pointer to the space.
|
|
|
|
*/
|
|
|
|
static uintmax_t parse_mark_ref_space(const char **p)
|
|
|
|
{
|
|
|
|
uintmax_t mark;
|
|
|
|
char *end;
|
|
|
|
|
|
|
|
mark = parse_mark_ref(*p, &end);
|
2014-06-18 21:51:57 +02:00
|
|
|
if (*end++ != ' ')
|
2012-04-08 00:59:20 +02:00
|
|
|
die("Missing space after mark: %s", command_buf.buf);
|
|
|
|
*p = end;
|
|
|
|
return mark;
|
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void file_change_m(const char *p, struct branch *b)
|
2006-08-08 09:36:45 +02:00
|
|
|
{
|
2007-09-20 00:42:14 +02:00
|
|
|
static struct strbuf uq = STRBUF_INIT;
|
2006-08-15 02:16:28 +02:00
|
|
|
const char *endp;
|
2013-03-21 16:44:39 +01:00
|
|
|
struct object_entry *oe;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid;
|
2007-02-05 22:34:56 +01:00
|
|
|
uint16_t mode, inline_data = 0;
|
2006-08-08 09:36:45 +02:00
|
|
|
|
2006-08-15 02:16:28 +02:00
|
|
|
p = get_mode(p, &mode);
|
|
|
|
if (!p)
|
|
|
|
die("Corrupt mode: %s", command_buf.buf);
|
|
|
|
switch (mode) {
|
2009-01-14 02:37:07 +01:00
|
|
|
case 0644:
|
|
|
|
case 0755:
|
|
|
|
mode |= S_IFREG;
|
2006-08-15 02:16:28 +02:00
|
|
|
case S_IFREG | 0644:
|
|
|
|
case S_IFREG | 0755:
|
2006-08-21 09:29:13 +02:00
|
|
|
case S_IFLNK:
|
2010-07-01 05:18:19 +02:00
|
|
|
case S_IFDIR:
|
2008-07-19 14:21:24 +02:00
|
|
|
case S_IFGITLINK:
|
2006-08-15 02:16:28 +02:00
|
|
|
/* ok */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
die("Corrupt mode: %s", command_buf.buf);
|
|
|
|
}
|
|
|
|
|
2006-08-23 10:17:45 +02:00
|
|
|
if (*p == ':') {
|
2020-02-22 21:17:47 +01:00
|
|
|
oe = find_mark(marks, parse_mark_ref_space(&p));
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&oid, &oe->idx.oid);
|
2014-06-18 21:51:57 +02:00
|
|
|
} else if (skip_prefix(p, "inline ", &p)) {
|
2007-01-18 21:17:58 +01:00
|
|
|
inline_data = 1;
|
2013-03-21 16:44:39 +01:00
|
|
|
oe = NULL; /* not used with inline_data, but makes gcc happy */
|
2006-08-23 10:17:45 +02:00
|
|
|
} else {
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
if (parse_mapped_oid_hex(p, &oid, &p))
|
2012-04-08 00:59:20 +02:00
|
|
|
die("Invalid dataref: %s", command_buf.buf);
|
2017-05-07 00:09:56 +02:00
|
|
|
oe = find_object(&oid);
|
2014-06-18 21:51:57 +02:00
|
|
|
if (*p++ != ' ')
|
2012-04-08 00:59:20 +02:00
|
|
|
die("Missing space after SHA1: %s", command_buf.buf);
|
2006-08-23 10:17:45 +02:00
|
|
|
}
|
2006-08-15 02:16:28 +02:00
|
|
|
|
2007-09-20 00:42:14 +02:00
|
|
|
strbuf_reset(&uq);
|
|
|
|
if (!unquote_c_style(&uq, p, &endp)) {
|
2006-08-15 02:16:28 +02:00
|
|
|
if (*endp)
|
|
|
|
die("Garbage after path in: %s", command_buf.buf);
|
2007-09-20 00:42:14 +02:00
|
|
|
p = uq.buf;
|
2006-08-15 02:16:28 +02:00
|
|
|
}
|
2006-08-08 09:36:45 +02:00
|
|
|
|
2011-01-27 07:07:49 +01:00
|
|
|
/* Git does not track empty, non-toplevel directories. */
|
2017-05-07 00:09:56 +02:00
|
|
|
if (S_ISDIR(mode) && is_empty_tree_oid(&oid) && *p) {
|
2013-06-23 16:58:22 +02:00
|
|
|
tree_content_remove(&b->branch_tree, p, NULL, 0);
|
2011-01-27 07:07:49 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-07-19 14:21:24 +02:00
|
|
|
if (S_ISGITLINK(mode)) {
|
|
|
|
if (inline_data)
|
|
|
|
die("Git links cannot be specified 'inline': %s",
|
|
|
|
command_buf.buf);
|
|
|
|
else if (oe) {
|
|
|
|
if (oe->type != OBJ_COMMIT)
|
|
|
|
die("Not a commit (actually a %s): %s",
|
2018-02-14 19:59:24 +01:00
|
|
|
type_name(oe->type), command_buf.buf);
|
2008-07-19 14:21:24 +02:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Accept the sha1 without checking; it expected to be in
|
|
|
|
* another repository.
|
|
|
|
*/
|
|
|
|
} else if (inline_data) {
|
2010-07-01 05:18:19 +02:00
|
|
|
if (S_ISDIR(mode))
|
|
|
|
die("Directories cannot be specified 'inline': %s",
|
|
|
|
command_buf.buf);
|
2007-09-20 00:42:14 +02:00
|
|
|
if (p != uq.buf) {
|
|
|
|
strbuf_addstr(&uq, p);
|
|
|
|
p = uq.buf;
|
|
|
|
}
|
2019-02-20 23:58:45 +01:00
|
|
|
while (read_next_command() != EOF) {
|
|
|
|
const char *v;
|
|
|
|
if (skip_prefix(command_buf.buf, "cat-blob ", &v))
|
|
|
|
parse_cat_blob(v);
|
|
|
|
else {
|
|
|
|
parse_and_store_blob(&last_blob, &oid, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2006-08-14 08:50:18 +02:00
|
|
|
} else {
|
2010-07-01 05:18:19 +02:00
|
|
|
enum object_type expected = S_ISDIR(mode) ?
|
|
|
|
OBJ_TREE: OBJ_BLOB;
|
|
|
|
enum object_type type = oe ? oe->type :
|
2018-04-25 20:20:59 +02:00
|
|
|
oid_object_info(the_repository, &oid,
|
|
|
|
NULL);
|
2007-02-26 20:55:59 +01:00
|
|
|
if (type < 0)
|
2010-07-01 05:18:19 +02:00
|
|
|
die("%s not found: %s",
|
|
|
|
S_ISDIR(mode) ? "Tree" : "Blob",
|
|
|
|
command_buf.buf);
|
|
|
|
if (type != expected)
|
|
|
|
die("Not a %s (actually a %s): %s",
|
2018-02-14 19:59:24 +01:00
|
|
|
type_name(expected), type_name(type),
|
2010-07-01 05:18:19 +02:00
|
|
|
command_buf.buf);
|
2006-08-14 08:50:18 +02:00
|
|
|
}
|
2006-08-08 09:36:45 +02:00
|
|
|
|
fast-import: tighten M 040000 syntax
When tree_content_set() is asked to modify the path "foo/bar/",
it first recurses like so:
tree_content_set(root, "foo/bar/", sha1, S_IFDIR) ->
tree_content_set(root:foo, "bar/", ...) ->
tree_content_set(root:foo/bar, "", ...)
And as a side-effect of 2794ad5 (fast-import: Allow filemodify to set
the root, 2010-10-10), this last call is accepted and changes
the tree entry for root:foo/bar to refer to the specified tree.
That seems safe enough but let's reject the new syntax (we never meant
to support it) and make it harder for frontends to introduce pointless
incompatibilities with git fast-import 1.7.3.
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-10-18 03:08:53 +02:00
|
|
|
if (!*p) {
|
2017-05-07 00:09:56 +02:00
|
|
|
tree_content_replace(&b->branch_tree, &oid, mode, NULL);
|
fast-import: tighten M 040000 syntax
When tree_content_set() is asked to modify the path "foo/bar/",
it first recurses like so:
tree_content_set(root, "foo/bar/", sha1, S_IFDIR) ->
tree_content_set(root:foo, "bar/", ...) ->
tree_content_set(root:foo/bar, "", ...)
And as a side-effect of 2794ad5 (fast-import: Allow filemodify to set
the root, 2010-10-10), this last call is accepted and changes
the tree entry for root:foo/bar to refer to the specified tree.
That seems safe enough but let's reject the new syntax (we never meant
to support it) and make it harder for frontends to introduce pointless
incompatibilities with git fast-import 1.7.3.
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-10-18 03:08:53 +02:00
|
|
|
return;
|
|
|
|
}
|
2017-05-07 00:09:56 +02:00
|
|
|
tree_content_set(&b->branch_tree, p, &oid, mode, NULL);
|
2006-08-14 06:58:19 +02:00
|
|
|
}
|
2006-08-08 09:36:45 +02:00
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void file_change_d(const char *p, struct branch *b)
|
2006-08-14 06:58:19 +02:00
|
|
|
{
|
2007-09-20 00:42:14 +02:00
|
|
|
static struct strbuf uq = STRBUF_INIT;
|
2006-08-15 02:16:28 +02:00
|
|
|
const char *endp;
|
|
|
|
|
2007-09-20 00:42:14 +02:00
|
|
|
strbuf_reset(&uq);
|
|
|
|
if (!unquote_c_style(&uq, p, &endp)) {
|
2006-08-15 02:16:28 +02:00
|
|
|
if (*endp)
|
|
|
|
die("Garbage after path in: %s", command_buf.buf);
|
2007-09-20 00:42:14 +02:00
|
|
|
p = uq.buf;
|
2006-08-15 02:16:28 +02:00
|
|
|
}
|
2013-06-23 16:58:22 +02:00
|
|
|
tree_content_remove(&b->branch_tree, p, NULL, 1);
|
2006-08-08 09:36:45 +02:00
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void file_change_cr(const char *s, struct branch *b, int rename)
|
2007-07-10 04:58:23 +02:00
|
|
|
{
|
2014-06-18 21:49:12 +02:00
|
|
|
const char *d;
|
2007-09-20 00:42:14 +02:00
|
|
|
static struct strbuf s_uq = STRBUF_INIT;
|
|
|
|
static struct strbuf d_uq = STRBUF_INIT;
|
2007-07-10 04:58:23 +02:00
|
|
|
const char *endp;
|
|
|
|
struct tree_entry leaf;
|
|
|
|
|
2007-09-20 00:42:14 +02:00
|
|
|
strbuf_reset(&s_uq);
|
|
|
|
if (!unquote_c_style(&s_uq, s, &endp)) {
|
2007-07-10 04:58:23 +02:00
|
|
|
if (*endp != ' ')
|
|
|
|
die("Missing space after source: %s", command_buf.buf);
|
2007-09-20 00:42:14 +02:00
|
|
|
} else {
|
2007-07-10 04:58:23 +02:00
|
|
|
endp = strchr(s, ' ');
|
|
|
|
if (!endp)
|
|
|
|
die("Missing space after source: %s", command_buf.buf);
|
2007-09-20 00:42:14 +02:00
|
|
|
strbuf_add(&s_uq, s, endp - s);
|
2007-07-10 04:58:23 +02:00
|
|
|
}
|
2007-09-20 00:42:14 +02:00
|
|
|
s = s_uq.buf;
|
2007-07-10 04:58:23 +02:00
|
|
|
|
|
|
|
endp++;
|
|
|
|
if (!*endp)
|
|
|
|
die("Missing dest: %s", command_buf.buf);
|
|
|
|
|
|
|
|
d = endp;
|
2007-09-20 00:42:14 +02:00
|
|
|
strbuf_reset(&d_uq);
|
|
|
|
if (!unquote_c_style(&d_uq, d, &endp)) {
|
2007-07-10 04:58:23 +02:00
|
|
|
if (*endp)
|
|
|
|
die("Garbage after dest in: %s", command_buf.buf);
|
2007-09-20 00:42:14 +02:00
|
|
|
d = d_uq.buf;
|
2007-07-10 04:58:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
memset(&leaf, 0, sizeof(leaf));
|
2007-07-15 07:40:37 +02:00
|
|
|
if (rename)
|
2013-06-23 16:58:22 +02:00
|
|
|
tree_content_remove(&b->branch_tree, s, &leaf, 1);
|
2007-07-15 07:40:37 +02:00
|
|
|
else
|
2013-06-23 16:58:21 +02:00
|
|
|
tree_content_get(&b->branch_tree, s, &leaf, 1);
|
2007-07-10 04:58:23 +02:00
|
|
|
if (!leaf.versions[1].mode)
|
|
|
|
die("Path %s not in branch", s);
|
fast-import: tighten M 040000 syntax
When tree_content_set() is asked to modify the path "foo/bar/",
it first recurses like so:
tree_content_set(root, "foo/bar/", sha1, S_IFDIR) ->
tree_content_set(root:foo, "bar/", ...) ->
tree_content_set(root:foo/bar, "", ...)
And as a side-effect of 2794ad5 (fast-import: Allow filemodify to set
the root, 2010-10-10), this last call is accepted and changes
the tree entry for root:foo/bar to refer to the specified tree.
That seems safe enough but let's reject the new syntax (we never meant
to support it) and make it harder for frontends to introduce pointless
incompatibilities with git fast-import 1.7.3.
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-10-18 03:08:53 +02:00
|
|
|
if (!*d) { /* C "path/to/subdir" "" */
|
|
|
|
tree_content_replace(&b->branch_tree,
|
2017-05-07 00:09:56 +02:00
|
|
|
&leaf.versions[1].oid,
|
fast-import: tighten M 040000 syntax
When tree_content_set() is asked to modify the path "foo/bar/",
it first recurses like so:
tree_content_set(root, "foo/bar/", sha1, S_IFDIR) ->
tree_content_set(root:foo, "bar/", ...) ->
tree_content_set(root:foo/bar, "", ...)
And as a side-effect of 2794ad5 (fast-import: Allow filemodify to set
the root, 2010-10-10), this last call is accepted and changes
the tree entry for root:foo/bar to refer to the specified tree.
That seems safe enough but let's reject the new syntax (we never meant
to support it) and make it harder for frontends to introduce pointless
incompatibilities with git fast-import 1.7.3.
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-10-18 03:08:53 +02:00
|
|
|
leaf.versions[1].mode,
|
|
|
|
leaf.tree);
|
|
|
|
return;
|
|
|
|
}
|
2007-07-10 04:58:23 +02:00
|
|
|
tree_content_set(&b->branch_tree, d,
|
2017-05-07 00:09:56 +02:00
|
|
|
&leaf.versions[1].oid,
|
2007-07-10 04:58:23 +02:00
|
|
|
leaf.versions[1].mode,
|
|
|
|
leaf.tree);
|
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void note_change_n(const char *p, struct branch *b, unsigned char *old_fanout)
|
2009-10-09 12:22:02 +02:00
|
|
|
{
|
|
|
|
static struct strbuf uq = STRBUF_INIT;
|
2013-03-21 12:10:28 +01:00
|
|
|
struct object_entry *oe;
|
2009-10-09 12:22:02 +02:00
|
|
|
struct branch *s;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid, commit_oid;
|
2019-02-19 01:05:05 +01:00
|
|
|
char path[GIT_MAX_RAWSZ * 3];
|
2009-10-09 12:22:02 +02:00
|
|
|
uint16_t inline_data = 0;
|
2009-12-07 12:27:24 +01:00
|
|
|
unsigned char new_fanout;
|
2009-10-09 12:22:02 +02:00
|
|
|
|
2011-11-25 01:09:47 +01:00
|
|
|
/*
|
|
|
|
* When loading a branch, we don't traverse its tree to count the real
|
|
|
|
* number of notes (too expensive to do this for all non-note refs).
|
|
|
|
* This means that recently loaded notes refs might incorrectly have
|
|
|
|
* b->num_notes == 0, and consequently, old_fanout might be wrong.
|
|
|
|
*
|
|
|
|
* Fix this by traversing the tree and counting the number of notes
|
|
|
|
* when b->num_notes == 0. If the notes tree is truly empty, the
|
|
|
|
* calculation should not take long.
|
|
|
|
*/
|
|
|
|
if (b->num_notes == 0 && *old_fanout == 0) {
|
|
|
|
/* Invoke change_note_fanout() in "counting mode". */
|
|
|
|
b->num_notes = change_note_fanout(&b->branch_tree, 0xff);
|
|
|
|
*old_fanout = convert_num_notes_to_fanout(b->num_notes);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now parse the notemodify command. */
|
2009-10-09 12:22:02 +02:00
|
|
|
/* <dataref> or 'inline' */
|
|
|
|
if (*p == ':') {
|
2020-02-22 21:17:47 +01:00
|
|
|
oe = find_mark(marks, parse_mark_ref_space(&p));
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&oid, &oe->idx.oid);
|
2014-06-18 21:51:57 +02:00
|
|
|
} else if (skip_prefix(p, "inline ", &p)) {
|
2009-10-09 12:22:02 +02:00
|
|
|
inline_data = 1;
|
2013-03-26 20:09:44 +01:00
|
|
|
oe = NULL; /* not used with inline_data, but makes gcc happy */
|
2009-10-09 12:22:02 +02:00
|
|
|
} else {
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
if (parse_mapped_oid_hex(p, &oid, &p))
|
2012-04-08 00:59:20 +02:00
|
|
|
die("Invalid dataref: %s", command_buf.buf);
|
2017-05-07 00:09:56 +02:00
|
|
|
oe = find_object(&oid);
|
2014-06-18 21:51:57 +02:00
|
|
|
if (*p++ != ' ')
|
2012-04-08 00:59:20 +02:00
|
|
|
die("Missing space after SHA1: %s", command_buf.buf);
|
2009-10-09 12:22:02 +02:00
|
|
|
}
|
|
|
|
|
2013-09-04 21:04:31 +02:00
|
|
|
/* <commit-ish> */
|
2009-10-09 12:22:02 +02:00
|
|
|
s = lookup_branch(p);
|
|
|
|
if (s) {
|
2017-05-01 04:29:03 +02:00
|
|
|
if (is_null_oid(&s->oid))
|
2011-09-22 21:47:05 +02:00
|
|
|
die("Can't add a note on empty branch.");
|
2017-05-07 00:09:56 +02:00
|
|
|
oidcpy(&commit_oid, &s->oid);
|
2009-10-09 12:22:02 +02:00
|
|
|
} else if (*p == ':') {
|
2012-04-08 00:59:20 +02:00
|
|
|
uintmax_t commit_mark = parse_mark_ref_eol(p);
|
2020-02-22 21:17:47 +01:00
|
|
|
struct object_entry *commit_oe = find_mark(marks, commit_mark);
|
2009-10-09 12:22:02 +02:00
|
|
|
if (commit_oe->type != OBJ_COMMIT)
|
|
|
|
die("Mark :%" PRIuMAX " not a commit", commit_mark);
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&commit_oid, &commit_oe->idx.oid);
|
2017-05-07 00:09:56 +02:00
|
|
|
} else if (!get_oid(p, &commit_oid)) {
|
2009-10-09 12:22:02 +02:00
|
|
|
unsigned long size;
|
2019-06-27 11:28:47 +02:00
|
|
|
char *buf = read_object_with_reference(the_repository,
|
|
|
|
&commit_oid,
|
2018-03-12 03:27:52 +01:00
|
|
|
commit_type, &size,
|
|
|
|
&commit_oid);
|
2019-02-19 01:05:05 +01:00
|
|
|
if (!buf || size < the_hash_algo->hexsz + 6)
|
2009-10-09 12:22:02 +02:00
|
|
|
die("Not a valid commit: %s", p);
|
|
|
|
free(buf);
|
|
|
|
} else
|
|
|
|
die("Invalid ref name or SHA1 expression: %s", p);
|
|
|
|
|
|
|
|
if (inline_data) {
|
|
|
|
if (p != uq.buf) {
|
|
|
|
strbuf_addstr(&uq, p);
|
|
|
|
p = uq.buf;
|
|
|
|
}
|
|
|
|
read_next_command();
|
2017-05-07 00:09:56 +02:00
|
|
|
parse_and_store_blob(&last_blob, &oid, 0);
|
2009-10-09 12:22:02 +02:00
|
|
|
} else if (oe) {
|
|
|
|
if (oe->type != OBJ_BLOB)
|
|
|
|
die("Not a blob (actually a %s): %s",
|
2018-02-14 19:59:24 +01:00
|
|
|
type_name(oe->type), command_buf.buf);
|
2017-05-07 00:09:56 +02:00
|
|
|
} else if (!is_null_oid(&oid)) {
|
2018-04-25 20:20:59 +02:00
|
|
|
enum object_type type = oid_object_info(the_repository, &oid,
|
|
|
|
NULL);
|
2009-10-09 12:22:02 +02:00
|
|
|
if (type < 0)
|
|
|
|
die("Blob not found: %s", command_buf.buf);
|
|
|
|
if (type != OBJ_BLOB)
|
|
|
|
die("Not a blob (actually a %s): %s",
|
2018-02-14 19:59:24 +01:00
|
|
|
type_name(type), command_buf.buf);
|
2009-10-09 12:22:02 +02:00
|
|
|
}
|
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path);
|
2013-06-23 16:58:22 +02:00
|
|
|
if (tree_content_remove(&b->branch_tree, path, NULL, 0))
|
2009-12-07 12:27:24 +01:00
|
|
|
b->num_notes--;
|
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
if (is_null_oid(&oid))
|
2009-12-07 12:27:24 +01:00
|
|
|
return; /* nothing to insert */
|
|
|
|
|
|
|
|
b->num_notes++;
|
|
|
|
new_fanout = convert_num_notes_to_fanout(b->num_notes);
|
2017-05-07 00:09:56 +02:00
|
|
|
construct_path_with_fanout(oid_to_hex(&commit_oid), new_fanout, path);
|
|
|
|
tree_content_set(&b->branch_tree, path, &oid, S_IFREG | 0644, NULL);
|
2009-10-09 12:22:02 +02:00
|
|
|
}
|
|
|
|
|
2007-02-07 08:03:03 +01:00
|
|
|
static void file_change_deleteall(struct branch *b)
|
|
|
|
{
|
|
|
|
release_tree_content_recursive(b->branch_tree.tree);
|
2017-05-01 04:29:03 +02:00
|
|
|
oidclr(&b->branch_tree.versions[0].oid);
|
|
|
|
oidclr(&b->branch_tree.versions[1].oid);
|
2007-02-07 08:03:03 +01:00
|
|
|
load_tree(&b->branch_tree);
|
2009-12-07 12:27:24 +01:00
|
|
|
b->num_notes = 0;
|
2007-02-07 08:03:03 +01:00
|
|
|
}
|
|
|
|
|
2008-05-16 00:35:56 +02:00
|
|
|
static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
|
2007-05-24 06:05:19 +02:00
|
|
|
{
|
2019-02-19 01:05:05 +01:00
|
|
|
if (!buf || size < the_hash_algo->hexsz + 6)
|
2017-05-01 04:29:03 +02:00
|
|
|
die("Not a valid commit: %s", oid_to_hex(&b->oid));
|
2007-05-24 06:05:19 +02:00
|
|
|
if (memcmp("tree ", buf, 5)
|
2017-05-07 00:09:56 +02:00
|
|
|
|| get_oid_hex(buf + 5, &b->branch_tree.versions[1].oid))
|
2017-05-01 04:29:03 +02:00
|
|
|
die("The commit %s is corrupt", oid_to_hex(&b->oid));
|
|
|
|
oidcpy(&b->branch_tree.versions[0].oid,
|
|
|
|
&b->branch_tree.versions[1].oid);
|
2007-05-24 06:05:19 +02:00
|
|
|
}
|
|
|
|
|
2008-05-16 00:35:56 +02:00
|
|
|
static void parse_from_existing(struct branch *b)
|
2007-05-24 06:05:19 +02:00
|
|
|
{
|
2017-05-01 04:29:03 +02:00
|
|
|
if (is_null_oid(&b->oid)) {
|
|
|
|
oidclr(&b->branch_tree.versions[0].oid);
|
|
|
|
oidclr(&b->branch_tree.versions[1].oid);
|
2007-05-24 06:05:19 +02:00
|
|
|
} else {
|
|
|
|
unsigned long size;
|
|
|
|
char *buf;
|
|
|
|
|
2019-06-27 11:28:47 +02:00
|
|
|
buf = read_object_with_reference(the_repository,
|
|
|
|
&b->oid, commit_type, &size,
|
2018-03-12 03:27:52 +01:00
|
|
|
&b->oid);
|
2008-05-16 00:35:56 +02:00
|
|
|
parse_from_commit(b, buf, size);
|
2007-05-24 06:05:19 +02:00
|
|
|
free(buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-03 22:27:05 +02:00
|
|
|
static int parse_objectish(struct branch *b, const char *objectish)
|
2006-08-25 00:45:26 +02:00
|
|
|
{
|
|
|
|
struct branch *s;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid;
|
2006-08-25 00:45:26 +02:00
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
oidcpy(&oid, &b->branch_tree.versions[1].oid);
|
2006-08-25 00:45:26 +02:00
|
|
|
|
2019-10-03 22:27:05 +02:00
|
|
|
s = lookup_branch(objectish);
|
2006-08-25 00:45:26 +02:00
|
|
|
if (b == s)
|
|
|
|
die("Can't create a branch from itself: %s", b->name);
|
|
|
|
else if (s) {
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id *t = &s->branch_tree.versions[1].oid;
|
2017-05-01 04:29:03 +02:00
|
|
|
oidcpy(&b->oid, &s->oid);
|
2017-05-07 00:09:56 +02:00
|
|
|
oidcpy(&b->branch_tree.versions[0].oid, t);
|
|
|
|
oidcpy(&b->branch_tree.versions[1].oid, t);
|
2019-10-03 22:27:05 +02:00
|
|
|
} else if (*objectish == ':') {
|
|
|
|
uintmax_t idnum = parse_mark_ref_eol(objectish);
|
2020-02-22 21:17:47 +01:00
|
|
|
struct object_entry *oe = find_mark(marks, idnum);
|
2006-08-25 00:45:26 +02:00
|
|
|
if (oe->type != OBJ_COMMIT)
|
2007-02-21 02:34:56 +01:00
|
|
|
die("Mark :%" PRIuMAX " not a commit", idnum);
|
2018-08-28 23:22:48 +02:00
|
|
|
if (!oideq(&b->oid, &oe->idx.oid)) {
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&b->oid, &oe->idx.oid);
|
2015-07-09 08:50:09 +02:00
|
|
|
if (oe->pack_id != MAX_PACK_ID) {
|
|
|
|
unsigned long size;
|
|
|
|
char *buf = gfi_unpack_entry(oe, &size);
|
|
|
|
parse_from_commit(b, buf, size);
|
|
|
|
free(buf);
|
|
|
|
} else
|
|
|
|
parse_from_existing(b);
|
|
|
|
}
|
2019-10-03 22:27:05 +02:00
|
|
|
} else if (!get_oid(objectish, &b->oid)) {
|
2008-05-16 00:35:56 +02:00
|
|
|
parse_from_existing(b);
|
2017-05-01 04:29:03 +02:00
|
|
|
if (is_null_oid(&b->oid))
|
2014-04-20 20:59:27 +02:00
|
|
|
b->delete = 1;
|
|
|
|
}
|
2007-05-24 06:05:19 +02:00
|
|
|
else
|
2019-10-03 22:27:05 +02:00
|
|
|
die("Invalid ref name or SHA1 expression: %s", objectish);
|
2006-08-25 00:45:26 +02:00
|
|
|
|
2018-08-28 23:22:48 +02:00
|
|
|
if (b->branch_tree.tree && !oideq(&oid, &b->branch_tree.versions[1].oid)) {
|
2015-07-09 08:50:09 +02:00
|
|
|
release_tree_content_recursive(b->branch_tree.tree);
|
|
|
|
b->branch_tree.tree = NULL;
|
|
|
|
}
|
|
|
|
|
2006-08-25 00:45:26 +02:00
|
|
|
read_next_command();
|
2007-08-01 08:22:53 +02:00
|
|
|
return 1;
|
2006-08-25 00:45:26 +02:00
|
|
|
}
|
|
|
|
|
2019-10-03 22:27:05 +02:00
|
|
|
static int parse_from(struct branch *b)
|
|
|
|
{
|
|
|
|
const char *from;
|
|
|
|
|
|
|
|
if (!skip_prefix(command_buf.buf, "from ", &from))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return parse_objectish(b, from);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_objectish_with_prefix(struct branch *b, const char *prefix)
|
|
|
|
{
|
|
|
|
const char *base;
|
|
|
|
|
|
|
|
if (!skip_prefix(command_buf.buf, prefix, &base))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return parse_objectish(b, base);
|
|
|
|
}
|
|
|
|
|
2008-05-16 00:35:56 +02:00
|
|
|
static struct hash_list *parse_merge(unsigned int *count)
|
2007-01-12 04:21:38 +01:00
|
|
|
{
|
2013-03-21 12:08:17 +01:00
|
|
|
struct hash_list *list = NULL, **tail = &list, *n;
|
2007-02-06 02:30:37 +01:00
|
|
|
const char *from;
|
2007-01-12 04:21:38 +01:00
|
|
|
struct branch *s;
|
|
|
|
|
|
|
|
*count = 0;
|
2014-06-18 21:49:12 +02:00
|
|
|
while (skip_prefix(command_buf.buf, "merge ", &from)) {
|
2007-01-12 04:21:38 +01:00
|
|
|
n = xmalloc(sizeof(*n));
|
|
|
|
s = lookup_branch(from);
|
|
|
|
if (s)
|
2017-05-01 04:29:03 +02:00
|
|
|
oidcpy(&n->oid, &s->oid);
|
2007-01-12 04:21:38 +01:00
|
|
|
else if (*from == ':') {
|
2012-04-08 00:59:20 +02:00
|
|
|
uintmax_t idnum = parse_mark_ref_eol(from);
|
2020-02-22 21:17:47 +01:00
|
|
|
struct object_entry *oe = find_mark(marks, idnum);
|
2007-01-12 04:21:38 +01:00
|
|
|
if (oe->type != OBJ_COMMIT)
|
2007-02-21 02:34:56 +01:00
|
|
|
die("Mark :%" PRIuMAX " not a commit", idnum);
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&n->oid, &oe->idx.oid);
|
2017-05-07 00:09:56 +02:00
|
|
|
} else if (!get_oid(from, &n->oid)) {
|
2007-03-05 18:43:14 +01:00
|
|
|
unsigned long size;
|
2019-06-27 11:28:47 +02:00
|
|
|
char *buf = read_object_with_reference(the_repository,
|
|
|
|
&n->oid,
|
2018-03-12 03:27:52 +01:00
|
|
|
commit_type,
|
|
|
|
&size, &n->oid);
|
2019-02-19 01:05:05 +01:00
|
|
|
if (!buf || size < the_hash_algo->hexsz + 6)
|
2007-03-05 18:43:14 +01:00
|
|
|
die("Not a valid commit: %s", from);
|
|
|
|
free(buf);
|
|
|
|
} else
|
2007-01-12 04:21:38 +01:00
|
|
|
die("Invalid ref name or SHA1 expression: %s", from);
|
|
|
|
|
|
|
|
n->next = NULL;
|
2013-03-21 12:08:17 +01:00
|
|
|
*tail = n;
|
|
|
|
tail = &n->next;
|
|
|
|
|
2007-02-06 06:26:49 +01:00
|
|
|
(*count)++;
|
2007-01-12 04:21:38 +01:00
|
|
|
read_next_command();
|
|
|
|
}
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void parse_new_commit(const char *arg)
|
2006-08-08 09:36:45 +02:00
|
|
|
{
|
2007-09-17 13:48:17 +02:00
|
|
|
static struct strbuf msg = STRBUF_INIT;
|
2006-08-15 02:16:28 +02:00
|
|
|
struct branch *b;
|
|
|
|
char *author = NULL;
|
|
|
|
char *committer = NULL;
|
fast-import: duplicate parsed encoding string
We read each line of the fast-import stream into the command_buf strbuf.
When reading a commit, we parse a line like "encoding foo" by storing a
pointer to "foo", but not making a copy. We may then read an unbounded
number of other lines (e.g., one for each modified file in the commit),
each of which writes into command_buf.
This works out in practice for small cases, because we hand off
ownership of the heap buffer from command_buf to the cmd_hist array, and
read new commands into a fresh heap buffer. And thus the pointer to
"foo" remains valid as long as there aren't so many intermediate lines
that we end up dropping the original "encoding" line from the history.
But as the test modification shows, if we go over our default of 100
lines, we end up with our encoding string pointing into freed heap
memory. This seems to fail reliably by writing garbage into the output,
but running under ASan definitely detects this as a use-after-free.
We can fix it by duplicating the encoding value, just as we do for other
parsed lines (e.g., an author line ends up in parse_ident, which copies
it to a new string).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-25 10:08:21 +02:00
|
|
|
char *encoding = NULL;
|
2007-01-12 04:21:38 +01:00
|
|
|
struct hash_list *merge_list = NULL;
|
|
|
|
unsigned int merge_count;
|
2009-12-07 12:27:24 +01:00
|
|
|
unsigned char prev_fanout, new_fanout;
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
const char *v;
|
2006-08-15 02:16:28 +02:00
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
b = lookup_branch(arg);
|
2006-08-14 06:58:19 +02:00
|
|
|
if (!b)
|
2014-06-18 21:49:12 +02:00
|
|
|
b = new_branch(arg);
|
2006-08-15 02:16:28 +02:00
|
|
|
|
|
|
|
read_next_command();
|
2008-05-16 00:35:56 +02:00
|
|
|
parse_mark();
|
2018-11-16 08:59:56 +01:00
|
|
|
parse_original_identifier();
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
if (skip_prefix(command_buf.buf, "author ", &v)) {
|
|
|
|
author = parse_ident(v);
|
2006-08-15 02:16:28 +02:00
|
|
|
read_next_command();
|
|
|
|
}
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
if (skip_prefix(command_buf.buf, "committer ", &v)) {
|
|
|
|
committer = parse_ident(v);
|
2006-08-15 02:16:28 +02:00
|
|
|
read_next_command();
|
|
|
|
}
|
|
|
|
if (!committer)
|
|
|
|
die("Expected committer but didn't get one");
|
fast-import: duplicate parsed encoding string
We read each line of the fast-import stream into the command_buf strbuf.
When reading a commit, we parse a line like "encoding foo" by storing a
pointer to "foo", but not making a copy. We may then read an unbounded
number of other lines (e.g., one for each modified file in the commit),
each of which writes into command_buf.
This works out in practice for small cases, because we hand off
ownership of the heap buffer from command_buf to the cmd_hist array, and
read new commands into a fresh heap buffer. And thus the pointer to
"foo" remains valid as long as there aren't so many intermediate lines
that we end up dropping the original "encoding" line from the history.
But as the test modification shows, if we go over our default of 100
lines, we end up with our encoding string pointing into freed heap
memory. This seems to fail reliably by writing garbage into the output,
but running under ASan definitely detects this as a use-after-free.
We can fix it by duplicating the encoding value, just as we do for other
parsed lines (e.g., an author line ends up in parse_ident, which copies
it to a new string).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-25 10:08:21 +02:00
|
|
|
if (skip_prefix(command_buf.buf, "encoding ", &v)) {
|
|
|
|
encoding = xstrdup(v);
|
2019-05-14 06:30:59 +02:00
|
|
|
read_next_command();
|
fast-import: duplicate parsed encoding string
We read each line of the fast-import stream into the command_buf strbuf.
When reading a commit, we parse a line like "encoding foo" by storing a
pointer to "foo", but not making a copy. We may then read an unbounded
number of other lines (e.g., one for each modified file in the commit),
each of which writes into command_buf.
This works out in practice for small cases, because we hand off
ownership of the heap buffer from command_buf to the cmd_hist array, and
read new commands into a fresh heap buffer. And thus the pointer to
"foo" remains valid as long as there aren't so many intermediate lines
that we end up dropping the original "encoding" line from the history.
But as the test modification shows, if we go over our default of 100
lines, we end up with our encoding string pointing into freed heap
memory. This seems to fail reliably by writing garbage into the output,
but running under ASan definitely detects this as a use-after-free.
We can fix it by duplicating the encoding value, just as we do for other
parsed lines (e.g., an author line ends up in parse_ident, which copies
it to a new string).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-25 10:08:21 +02:00
|
|
|
}
|
2010-02-01 18:27:35 +01:00
|
|
|
parse_data(&msg, 0, NULL);
|
2006-08-25 04:38:13 +02:00
|
|
|
read_next_command();
|
2008-05-16 00:35:56 +02:00
|
|
|
parse_from(b);
|
|
|
|
merge_list = parse_merge(&merge_count);
|
2006-08-15 02:16:28 +02:00
|
|
|
|
|
|
|
/* ensure the branch is active/loaded */
|
2006-08-24 10:37:35 +02:00
|
|
|
if (!b->branch_tree.tree || !max_active_branches) {
|
2006-08-14 06:58:19 +02:00
|
|
|
unload_one_branch();
|
|
|
|
load_branch(b);
|
|
|
|
}
|
2006-08-08 09:36:45 +02:00
|
|
|
|
2009-12-07 12:27:24 +01:00
|
|
|
prev_fanout = convert_num_notes_to_fanout(b->num_notes);
|
|
|
|
|
2006-08-14 06:58:19 +02:00
|
|
|
/* file_change* */
|
2007-09-17 11:19:04 +02:00
|
|
|
while (command_buf.len > 0) {
|
2014-06-18 21:49:12 +02:00
|
|
|
if (skip_prefix(command_buf.buf, "M ", &v))
|
|
|
|
file_change_m(v, b);
|
|
|
|
else if (skip_prefix(command_buf.buf, "D ", &v))
|
|
|
|
file_change_d(v, b);
|
|
|
|
else if (skip_prefix(command_buf.buf, "R ", &v))
|
|
|
|
file_change_cr(v, b, 1);
|
|
|
|
else if (skip_prefix(command_buf.buf, "C ", &v))
|
|
|
|
file_change_cr(v, b, 0);
|
|
|
|
else if (skip_prefix(command_buf.buf, "N ", &v))
|
|
|
|
note_change_n(v, b, &prev_fanout);
|
2007-02-07 08:03:03 +01:00
|
|
|
else if (!strcmp("deleteall", command_buf.buf))
|
|
|
|
file_change_deleteall(b);
|
2014-06-18 21:49:12 +02:00
|
|
|
else if (skip_prefix(command_buf.buf, "ls ", &v))
|
|
|
|
parse_ls(v, b);
|
2019-02-20 23:58:45 +01:00
|
|
|
else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
|
|
|
|
parse_cat_blob(v);
|
2007-08-01 08:22:53 +02:00
|
|
|
else {
|
|
|
|
unread_command_buf = 1;
|
|
|
|
break;
|
|
|
|
}
|
2007-09-17 11:19:04 +02:00
|
|
|
if (read_next_command() == EOF)
|
|
|
|
break;
|
2006-08-08 09:36:45 +02:00
|
|
|
}
|
|
|
|
|
2009-12-07 12:27:24 +01:00
|
|
|
new_fanout = convert_num_notes_to_fanout(b->num_notes);
|
|
|
|
if (new_fanout != prev_fanout)
|
|
|
|
b->num_notes = change_note_fanout(&b->branch_tree, new_fanout);
|
|
|
|
|
2006-08-15 02:16:28 +02:00
|
|
|
/* build the tree and the commit */
|
2006-08-14 06:58:19 +02:00
|
|
|
store_tree(&b->branch_tree);
|
2017-05-01 04:29:03 +02:00
|
|
|
oidcpy(&b->branch_tree.versions[0].oid,
|
|
|
|
&b->branch_tree.versions[1].oid);
|
2007-09-17 13:48:17 +02:00
|
|
|
|
|
|
|
strbuf_reset(&new_data);
|
|
|
|
strbuf_addf(&new_data, "tree %s\n",
|
2017-05-01 04:29:03 +02:00
|
|
|
oid_to_hex(&b->branch_tree.versions[1].oid));
|
|
|
|
if (!is_null_oid(&b->oid))
|
|
|
|
strbuf_addf(&new_data, "parent %s\n",
|
|
|
|
oid_to_hex(&b->oid));
|
2007-01-12 04:21:38 +01:00
|
|
|
while (merge_list) {
|
|
|
|
struct hash_list *next = merge_list->next;
|
2017-05-01 04:29:03 +02:00
|
|
|
strbuf_addf(&new_data, "parent %s\n",
|
|
|
|
oid_to_hex(&merge_list->oid));
|
2007-01-12 04:21:38 +01:00
|
|
|
free(merge_list);
|
|
|
|
merge_list = next;
|
|
|
|
}
|
2007-09-17 13:48:17 +02:00
|
|
|
strbuf_addf(&new_data,
|
|
|
|
"author %s\n"
|
2019-05-14 06:30:59 +02:00
|
|
|
"committer %s\n",
|
2007-09-17 13:48:17 +02:00
|
|
|
author ? author : committer, committer);
|
2019-05-14 06:30:59 +02:00
|
|
|
if (encoding)
|
|
|
|
strbuf_addf(&new_data,
|
|
|
|
"encoding %s\n",
|
|
|
|
encoding);
|
|
|
|
strbuf_addch(&new_data, '\n');
|
2007-09-17 13:48:17 +02:00
|
|
|
strbuf_addbuf(&new_data, &msg);
|
2007-02-06 18:05:51 +01:00
|
|
|
free(author);
|
2006-08-15 02:16:28 +02:00
|
|
|
free(committer);
|
fast-import: duplicate parsed encoding string
We read each line of the fast-import stream into the command_buf strbuf.
When reading a commit, we parse a line like "encoding foo" by storing a
pointer to "foo", but not making a copy. We may then read an unbounded
number of other lines (e.g., one for each modified file in the commit),
each of which writes into command_buf.
This works out in practice for small cases, because we hand off
ownership of the heap buffer from command_buf to the cmd_hist array, and
read new commands into a fresh heap buffer. And thus the pointer to
"foo" remains valid as long as there aren't so many intermediate lines
that we end up dropping the original "encoding" line from the history.
But as the test modification shows, if we go over our default of 100
lines, we end up with our encoding string pointing into freed heap
memory. This seems to fail reliably by writing garbage into the output,
but running under ASan definitely detects this as a use-after-free.
We can fix it by duplicating the encoding value, just as we do for other
parsed lines (e.g., an author line ends up in parse_ident, which copies
it to a new string).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-25 10:08:21 +02:00
|
|
|
free(encoding);
|
2006-08-15 02:16:28 +02:00
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
if (!store_object(OBJ_COMMIT, &new_data, NULL, &b->oid, next_mark))
|
2007-01-17 08:42:43 +01:00
|
|
|
b->pack_id = pack_id;
|
2006-08-14 06:58:19 +02:00
|
|
|
b->last_commit = object_count_by_type[OBJ_COMMIT];
|
2006-08-08 09:36:45 +02:00
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void parse_new_tag(const char *arg)
|
2006-08-24 09:12:13 +02:00
|
|
|
{
|
2007-09-17 13:48:17 +02:00
|
|
|
static struct strbuf msg = STRBUF_INIT;
|
2006-08-24 09:12:13 +02:00
|
|
|
const char *from;
|
|
|
|
char *tagger;
|
|
|
|
struct branch *s;
|
|
|
|
struct tag *t;
|
2007-01-16 06:33:19 +01:00
|
|
|
uintmax_t from_mark = 0;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid;
|
2010-01-14 05:44:19 +01:00
|
|
|
enum object_type type;
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
const char *v;
|
2006-08-24 09:12:13 +02:00
|
|
|
|
2018-04-11 20:37:54 +02:00
|
|
|
t = mem_pool_alloc(&fi_mem_pool, sizeof(struct tag));
|
2012-03-05 14:48:49 +01:00
|
|
|
memset(t, 0, sizeof(struct tag));
|
2014-06-18 21:49:12 +02:00
|
|
|
t->name = pool_strdup(arg);
|
2006-08-24 09:12:13 +02:00
|
|
|
if (last_tag)
|
|
|
|
last_tag->next_tag = t;
|
|
|
|
else
|
|
|
|
first_tag = t;
|
|
|
|
last_tag = t;
|
|
|
|
read_next_command();
|
2019-10-03 22:27:04 +02:00
|
|
|
parse_mark();
|
2006-08-24 09:12:13 +02:00
|
|
|
|
|
|
|
/* from ... */
|
2014-06-18 21:49:12 +02:00
|
|
|
if (!skip_prefix(command_buf.buf, "from ", &from))
|
2006-08-24 09:12:13 +02:00
|
|
|
die("Expected from command, got %s", command_buf.buf);
|
|
|
|
s = lookup_branch(from);
|
|
|
|
if (s) {
|
2017-05-01 04:29:03 +02:00
|
|
|
if (is_null_oid(&s->oid))
|
2011-09-22 21:47:04 +02:00
|
|
|
die("Can't tag an empty branch.");
|
2017-05-07 00:09:56 +02:00
|
|
|
oidcpy(&oid, &s->oid);
|
2010-01-14 05:44:19 +01:00
|
|
|
type = OBJ_COMMIT;
|
2006-08-24 09:12:13 +02:00
|
|
|
} else if (*from == ':') {
|
2007-02-06 06:26:49 +01:00
|
|
|
struct object_entry *oe;
|
2012-04-08 00:59:20 +02:00
|
|
|
from_mark = parse_mark_ref_eol(from);
|
2020-02-22 21:17:47 +01:00
|
|
|
oe = find_mark(marks, from_mark);
|
2010-01-14 05:44:19 +01:00
|
|
|
type = oe->type;
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&oid, &oe->idx.oid);
|
2017-05-07 00:09:56 +02:00
|
|
|
} else if (!get_oid(from, &oid)) {
|
|
|
|
struct object_entry *oe = find_object(&oid);
|
2011-08-22 14:10:19 +02:00
|
|
|
if (!oe) {
|
2018-04-25 20:20:59 +02:00
|
|
|
type = oid_object_info(the_repository, &oid, NULL);
|
2011-08-22 14:10:19 +02:00
|
|
|
if (type < 0)
|
|
|
|
die("Not a valid object: %s", from);
|
|
|
|
} else
|
|
|
|
type = oe->type;
|
2006-08-24 09:12:13 +02:00
|
|
|
} else
|
|
|
|
die("Invalid ref name or SHA1 expression: %s", from);
|
|
|
|
read_next_command();
|
|
|
|
|
2018-11-16 08:59:56 +01:00
|
|
|
/* original-oid ... */
|
|
|
|
parse_original_identifier();
|
|
|
|
|
2006-08-24 09:12:13 +02:00
|
|
|
/* tagger ... */
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
if (skip_prefix(command_buf.buf, "tagger ", &v)) {
|
|
|
|
tagger = parse_ident(v);
|
2008-12-19 23:41:21 +01:00
|
|
|
read_next_command();
|
|
|
|
} else
|
|
|
|
tagger = NULL;
|
2006-08-24 09:12:13 +02:00
|
|
|
|
|
|
|
/* tag payload/message */
|
2010-02-01 18:27:35 +01:00
|
|
|
parse_data(&msg, 0, NULL);
|
2006-08-24 09:12:13 +02:00
|
|
|
|
|
|
|
/* build the tag object */
|
2007-09-17 13:48:17 +02:00
|
|
|
strbuf_reset(&new_data);
|
2008-12-19 23:41:21 +01:00
|
|
|
|
2007-09-17 13:48:17 +02:00
|
|
|
strbuf_addf(&new_data,
|
2008-12-19 23:41:21 +01:00
|
|
|
"object %s\n"
|
|
|
|
"type %s\n"
|
|
|
|
"tag %s\n",
|
2018-02-14 19:59:24 +01:00
|
|
|
oid_to_hex(&oid), type_name(type), t->name);
|
2008-12-19 23:41:21 +01:00
|
|
|
if (tagger)
|
|
|
|
strbuf_addf(&new_data,
|
|
|
|
"tagger %s\n", tagger);
|
|
|
|
strbuf_addch(&new_data, '\n');
|
2007-09-17 13:48:17 +02:00
|
|
|
strbuf_addbuf(&new_data, &msg);
|
2006-08-24 09:12:13 +02:00
|
|
|
free(tagger);
|
|
|
|
|
2019-10-03 22:27:04 +02:00
|
|
|
if (store_object(OBJ_TAG, &new_data, NULL, &t->oid, next_mark))
|
2007-01-17 08:42:43 +01:00
|
|
|
t->pack_id = MAX_PACK_ID;
|
|
|
|
else
|
|
|
|
t->pack_id = pack_id;
|
2006-08-24 09:12:13 +02:00
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void parse_reset_branch(const char *arg)
|
2006-08-27 12:20:49 +02:00
|
|
|
{
|
|
|
|
struct branch *b;
|
fast-import: fix handling of deleted tags
If our input stream includes a tag which is later deleted, we were not
properly deleting it. We did have a step which would delete it, but we
left a tag in the tag list noting that it needed to be updated, and the
updating of annotated tags occurred AFTER ref deletion. So, when we
record that a tag needs to be deleted, also remove it from the list of
annotated tags to update.
While this has likely been something that has not happened in practice,
it will come up more in order to support nested tags. For nested tags,
we either need to give temporary names to the intermediate tags and then
delete them, or else we need to use the final name for the intermediate
tags. If we use the final name for the intermediate tags, then in order
to keep the sanity check that someone doesn't try to update the same tag
twice, we need to delete the ref after creating the intermediate tag.
So, either way nested tags imply the need to delete temporary inner tag
references.
Helped-by: René Scharfe <l.s.r@web.de>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-03 22:27:03 +02:00
|
|
|
const char *tag_name;
|
2006-08-27 12:20:49 +02:00
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
b = lookup_branch(arg);
|
2006-08-27 12:20:49 +02:00
|
|
|
if (b) {
|
2017-05-01 04:29:03 +02:00
|
|
|
oidclr(&b->oid);
|
|
|
|
oidclr(&b->branch_tree.versions[0].oid);
|
|
|
|
oidclr(&b->branch_tree.versions[1].oid);
|
2006-08-27 12:20:49 +02:00
|
|
|
if (b->branch_tree.tree) {
|
|
|
|
release_tree_content_recursive(b->branch_tree.tree);
|
|
|
|
b->branch_tree.tree = NULL;
|
|
|
|
}
|
|
|
|
}
|
2007-01-12 04:28:39 +01:00
|
|
|
else
|
2014-06-18 21:49:12 +02:00
|
|
|
b = new_branch(arg);
|
2007-01-12 04:28:39 +01:00
|
|
|
read_next_command();
|
2008-05-16 00:35:56 +02:00
|
|
|
parse_from(b);
|
fast-import: fix handling of deleted tags
If our input stream includes a tag which is later deleted, we were not
properly deleting it. We did have a step which would delete it, but we
left a tag in the tag list noting that it needed to be updated, and the
updating of annotated tags occurred AFTER ref deletion. So, when we
record that a tag needs to be deleted, also remove it from the list of
annotated tags to update.
While this has likely been something that has not happened in practice,
it will come up more in order to support nested tags. For nested tags,
we either need to give temporary names to the intermediate tags and then
delete them, or else we need to use the final name for the intermediate
tags. If we use the final name for the intermediate tags, then in order
to keep the sanity check that someone doesn't try to update the same tag
twice, we need to delete the ref after creating the intermediate tag.
So, either way nested tags imply the need to delete temporary inner tag
references.
Helped-by: René Scharfe <l.s.r@web.de>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-03 22:27:03 +02:00
|
|
|
if (b->delete && skip_prefix(b->name, "refs/tags/", &tag_name)) {
|
|
|
|
/*
|
|
|
|
* Elsewhere, we call dump_branches() before dump_tags(),
|
|
|
|
* and dump_branches() will handle ref deletions first, so
|
|
|
|
* in order to make sure the deletion actually takes effect,
|
|
|
|
* we need to remove the tag from our list of tags to update.
|
|
|
|
*
|
|
|
|
* NEEDSWORK: replace list of tags with hashmap for faster
|
|
|
|
* deletion?
|
|
|
|
*/
|
|
|
|
struct tag *t, *prev = NULL;
|
|
|
|
for (t = first_tag; t; t = t->next_tag) {
|
|
|
|
if (!strcmp(t->name, tag_name))
|
|
|
|
break;
|
|
|
|
prev = t;
|
|
|
|
}
|
|
|
|
if (t) {
|
|
|
|
if (prev)
|
|
|
|
prev->next_tag = t->next_tag;
|
|
|
|
else
|
|
|
|
first_tag = t->next_tag;
|
|
|
|
if (!t->next_tag)
|
|
|
|
last_tag = prev;
|
|
|
|
/* There is no mem_pool_free(t) function to call. */
|
|
|
|
}
|
|
|
|
}
|
2008-03-07 21:22:17 +01:00
|
|
|
if (command_buf.len > 0)
|
2007-08-01 08:22:53 +02:00
|
|
|
unread_command_buf = 1;
|
2006-08-27 12:20:49 +02:00
|
|
|
}
|
|
|
|
|
2010-11-28 20:45:01 +01:00
|
|
|
static void cat_blob_write(const char *buf, unsigned long size)
|
|
|
|
{
|
avoid "write_in_full(fd, buf, len) != len" pattern
The return value of write_in_full() is either "-1", or the
requested number of bytes[1]. If we make a partial write
before seeing an error, we still return -1, not a partial
value. This goes back to f6aa66cb95 (write_in_full: really
write in full or return error on disk full., 2007-01-11).
So checking anything except "was the return value negative"
is pointless. And there are a couple of reasons not to do
so:
1. It can do a funny signed/unsigned comparison. If your
"len" is signed (e.g., a size_t) then the compiler will
promote the "-1" to its unsigned variant.
This works out for "!= len" (unless you really were
trying to write the maximum size_t bytes), but is a
bug if you check "< len" (an example of which was fixed
recently in config.c).
We should avoid promoting the mental model that you
need to check the length at all, so that new sites are
not tempted to copy us.
2. Checking for a negative value is shorter to type,
especially when the length is an expression.
3. Linus says so. In d34cf19b89 (Clean up write_in_full()
users, 2007-01-11), right after the write_in_full()
semantics were changed, he wrote:
I really wish every "write_in_full()" user would just
check against "<0" now, but this fixes the nasty and
stupid ones.
Appeals to authority aside, this makes it clear that
writing it this way does not have an intentional
benefit. It's a historical curiosity that we never
bothered to clean up (and which was undoubtedly
cargo-culted into new sites).
So let's convert these obviously-correct cases (this
includes write_str_in_full(), which is just a wrapper for
write_in_full()).
[1] A careful reader may notice there is one way that
write_in_full() can return a different value. If we ask
write() to write N bytes and get a return value that is
_larger_ than N, we could return a larger total. But
besides the fact that this would imply a totally broken
version of write(), it would already invoke undefined
behavior. Our internal remaining counter is an unsigned
size_t, which means that subtracting too many byte will
wrap it around to a very large number. So we'll instantly
begin reading off the end of the buffer, trying to write
gigabytes (or petabytes) of data.
Signed-off-by: Jeff King <peff@peff.net>
Reviewed-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-13 19:16:03 +02:00
|
|
|
if (write_in_full(cat_blob_fd, buf, size) < 0)
|
2010-11-28 20:45:01 +01:00
|
|
|
die_errno("Write to frontend failed");
|
|
|
|
}
|
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
static void cat_blob(struct object_entry *oe, struct object_id *oid)
|
2010-11-28 20:45:01 +01:00
|
|
|
{
|
|
|
|
struct strbuf line = STRBUF_INIT;
|
|
|
|
unsigned long size;
|
|
|
|
enum object_type type = 0;
|
|
|
|
char *buf;
|
|
|
|
|
|
|
|
if (!oe || oe->pack_id == MAX_PACK_ID) {
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:53 +01:00
|
|
|
buf = read_object_file(oid, &type, &size);
|
2010-11-28 20:45:01 +01:00
|
|
|
} else {
|
|
|
|
type = oe->type;
|
|
|
|
buf = gfi_unpack_entry(oe, &size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Output based on batch_one_object() from cat-file.c.
|
|
|
|
*/
|
|
|
|
if (type <= 0) {
|
|
|
|
strbuf_reset(&line);
|
2017-05-07 00:09:56 +02:00
|
|
|
strbuf_addf(&line, "%s missing\n", oid_to_hex(oid));
|
2010-11-28 20:45:01 +01:00
|
|
|
cat_blob_write(line.buf, line.len);
|
|
|
|
strbuf_release(&line);
|
|
|
|
free(buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!buf)
|
2017-05-07 00:09:56 +02:00
|
|
|
die("Can't read object %s", oid_to_hex(oid));
|
2010-11-28 20:45:01 +01:00
|
|
|
if (type != OBJ_BLOB)
|
|
|
|
die("Object %s is a %s but a blob was expected.",
|
2018-02-14 19:59:24 +01:00
|
|
|
oid_to_hex(oid), type_name(type));
|
2010-11-28 20:45:01 +01:00
|
|
|
strbuf_reset(&line);
|
2018-11-11 08:05:04 +01:00
|
|
|
strbuf_addf(&line, "%s %s %"PRIuMAX"\n", oid_to_hex(oid),
|
|
|
|
type_name(type), (uintmax_t)size);
|
2010-11-28 20:45:01 +01:00
|
|
|
cat_blob_write(line.buf, line.len);
|
|
|
|
strbuf_release(&line);
|
|
|
|
cat_blob_write(buf, size);
|
|
|
|
cat_blob_write("\n", 1);
|
fast-import: treat cat-blob as a delta base hint for next blob
Delta base for blobs is chosen as a previously saved blob. If we
treat cat-blob's blob as a delta base for the next blob, nothing
is likely to become worse.
For fast-import stream producer like svn-fe cat-blob is used like
following:
- svn-fe reads file delta in svn format
- to apply it, svn-fe asks cat-blob 'svn delta base'
- applies 'svn delta' to the response
- produces a blob command to store the result
Currently there is no way for svn-fe to give fast-import a hint on
object delta base. While what's requested in cat-blob is most of
the time a best delta base possible. Of course, it could be not a
good delta base, but we don't know any better one anyway.
So do treat cat-blob's result as a delta base for next blob. The
profit is nice: 2x to 7x reduction in pack size AND 1.2x to 3x
time speedup due to diff_delta being faster on good deltas. git gc
--aggressive can compress it even more, by 10% to 70%, utilizing
more cpu time, real time and 3 cpu cores.
Tested on 213M and 2.7G fast-import streams, resulting packs are 22M
and 113M, import time is 7s and 60s, both streams are produced by
svn-fe, sniffed and then used as raw input for fast-import.
For git-fast-export produced streams there is no change as it doesn't
use cat-blob and doesn't try to reorder blobs in some smart way to
make successive deltas small.
Signed-off-by: Dmitry Ivankov <divanorama@gmail.com>
Acked-by: David Barr <davidbarr@google.com>
Acked-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-08-20 21:04:12 +02:00
|
|
|
if (oe && oe->pack_id == pack_id) {
|
|
|
|
last_blob.offset = oe->idx.offset;
|
|
|
|
strbuf_attach(&last_blob.data, buf, size, size);
|
|
|
|
last_blob.depth = oe->depth;
|
|
|
|
} else
|
|
|
|
free(buf);
|
2010-11-28 20:45:01 +01:00
|
|
|
}
|
|
|
|
|
2015-07-01 17:05:58 +02:00
|
|
|
static void parse_get_mark(const char *p)
|
|
|
|
{
|
-Wuninitialized: remove some 'init-self' workarounds
The 'self-initialised' variables construct (ie <type> var = var;) has
been used to silence gcc '-W[maybe-]uninitialized' warnings. This has,
unfortunately, caused MSVC to issue 'uninitialized variable' warnings.
Also, using clang static analysis causes complaints about an 'Assigned
value is garbage or undefined'.
There are six such constructs in the current codebase. Only one of the
six causes gcc to issue a '-Wmaybe-uninitialized' warning (which will
be addressed elsewhere). The remaining five 'init-self' gcc workarounds
are noted below, along with the commit which introduced them:
1. builtin/rev-list.c: 'reaches' and 'all', see commit 457f08a030
("git-rev-list: add --bisect-vars option.", 2007-03-21).
2. merge-recursive.c:2064 'mrtree', see commit f120ae2a8e ("merge-
recursive.c: mrtree in merge() is not used before set", 2007-10-29).
3. fast-import.c:3023 'oe', see commit 85c62395b1 ("fast-import: let
importers retrieve blobs", 2010-11-28).
4. fast-import.c:3006 'oe', see commit 28c7b1f7b7 ("fast-import: add a
get-mark command", 2015-07-01).
Remove the 'self-initialised' variable constructs noted above.
Signed-off-by: Ramsay Jones <ramsay@ramsayjones.plus.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-19 18:54:35 +01:00
|
|
|
struct object_entry *oe;
|
2017-05-07 00:09:56 +02:00
|
|
|
char output[GIT_MAX_HEXSZ + 2];
|
2015-07-01 17:05:58 +02:00
|
|
|
|
|
|
|
/* get-mark SP <object> LF */
|
|
|
|
if (*p != ':')
|
|
|
|
die("Not a mark: %s", p);
|
|
|
|
|
2020-02-22 21:17:47 +01:00
|
|
|
oe = find_mark(marks, parse_mark_ref_eol(p));
|
2015-07-01 17:05:58 +02:00
|
|
|
if (!oe)
|
|
|
|
die("Unknown mark: %s", command_buf.buf);
|
|
|
|
|
2017-05-07 00:10:11 +02:00
|
|
|
xsnprintf(output, sizeof(output), "%s\n", oid_to_hex(&oe->idx.oid));
|
2019-02-19 01:05:05 +01:00
|
|
|
cat_blob_write(output, the_hash_algo->hexsz + 1);
|
2015-07-01 17:05:58 +02:00
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void parse_cat_blob(const char *p)
|
2010-11-28 20:45:01 +01:00
|
|
|
{
|
-Wuninitialized: remove some 'init-self' workarounds
The 'self-initialised' variables construct (ie <type> var = var;) has
been used to silence gcc '-W[maybe-]uninitialized' warnings. This has,
unfortunately, caused MSVC to issue 'uninitialized variable' warnings.
Also, using clang static analysis causes complaints about an 'Assigned
value is garbage or undefined'.
There are six such constructs in the current codebase. Only one of the
six causes gcc to issue a '-Wmaybe-uninitialized' warning (which will
be addressed elsewhere). The remaining five 'init-self' gcc workarounds
are noted below, along with the commit which introduced them:
1. builtin/rev-list.c: 'reaches' and 'all', see commit 457f08a030
("git-rev-list: add --bisect-vars option.", 2007-03-21).
2. merge-recursive.c:2064 'mrtree', see commit f120ae2a8e ("merge-
recursive.c: mrtree in merge() is not used before set", 2007-10-29).
3. fast-import.c:3023 'oe', see commit 85c62395b1 ("fast-import: let
importers retrieve blobs", 2010-11-28).
4. fast-import.c:3006 'oe', see commit 28c7b1f7b7 ("fast-import: add a
get-mark command", 2015-07-01).
Remove the 'self-initialised' variable constructs noted above.
Signed-off-by: Ramsay Jones <ramsay@ramsayjones.plus.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-19 18:54:35 +01:00
|
|
|
struct object_entry *oe;
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid;
|
2010-11-28 20:45:01 +01:00
|
|
|
|
|
|
|
/* cat-blob SP <object> LF */
|
|
|
|
if (*p == ':') {
|
2020-02-22 21:17:47 +01:00
|
|
|
oe = find_mark(marks, parse_mark_ref_eol(p));
|
2010-11-28 20:45:01 +01:00
|
|
|
if (!oe)
|
|
|
|
die("Unknown mark: %s", command_buf.buf);
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&oid, &oe->idx.oid);
|
2010-11-28 20:45:01 +01:00
|
|
|
} else {
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
if (parse_mapped_oid_hex(p, &oid, &p))
|
2012-04-08 00:59:20 +02:00
|
|
|
die("Invalid dataref: %s", command_buf.buf);
|
2017-05-07 00:09:56 +02:00
|
|
|
if (*p)
|
2010-11-28 20:45:01 +01:00
|
|
|
die("Garbage after SHA1: %s", command_buf.buf);
|
2017-05-07 00:09:56 +02:00
|
|
|
oe = find_object(&oid);
|
2010-11-28 20:45:01 +01:00
|
|
|
}
|
|
|
|
|
2017-05-07 00:09:56 +02:00
|
|
|
cat_blob(oe, &oid);
|
2010-11-28 20:45:01 +01:00
|
|
|
}
|
|
|
|
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
static struct object_entry *dereference(struct object_entry *oe,
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id *oid)
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
{
|
|
|
|
unsigned long size;
|
2011-02-28 22:16:59 +01:00
|
|
|
char *buf = NULL;
|
2019-02-19 01:05:05 +01:00
|
|
|
const unsigned hexsz = the_hash_algo->hexsz;
|
|
|
|
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
if (!oe) {
|
2018-04-25 20:20:59 +02:00
|
|
|
enum object_type type = oid_object_info(the_repository, oid,
|
|
|
|
NULL);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
if (type < 0)
|
2017-05-07 00:09:56 +02:00
|
|
|
die("object not found: %s", oid_to_hex(oid));
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
/* cache it! */
|
2017-05-07 00:09:56 +02:00
|
|
|
oe = insert_object(oid);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
oe->type = type;
|
|
|
|
oe->pack_id = MAX_PACK_ID;
|
|
|
|
oe->idx.offset = 1;
|
|
|
|
}
|
|
|
|
switch (oe->type) {
|
|
|
|
case OBJ_TREE: /* easy case. */
|
|
|
|
return oe;
|
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TAG:
|
|
|
|
break;
|
|
|
|
default:
|
2013-09-04 21:04:30 +02:00
|
|
|
die("Not a tree-ish: %s", command_buf.buf);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (oe->pack_id != MAX_PACK_ID) { /* in a pack being written */
|
|
|
|
buf = gfi_unpack_entry(oe, &size);
|
|
|
|
} else {
|
|
|
|
enum object_type unused;
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:53 +01:00
|
|
|
buf = read_object_file(oid, &unused, &size);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
}
|
|
|
|
if (!buf)
|
2017-05-07 00:09:56 +02:00
|
|
|
die("Can't load object %s", oid_to_hex(oid));
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
|
|
|
|
/* Peel one layer. */
|
|
|
|
switch (oe->type) {
|
|
|
|
case OBJ_TAG:
|
2019-02-19 01:05:05 +01:00
|
|
|
if (size < hexsz + strlen("object ") ||
|
2017-05-07 00:09:56 +02:00
|
|
|
get_oid_hex(buf + strlen("object "), oid))
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
die("Invalid SHA1 in tag: %s", command_buf.buf);
|
|
|
|
break;
|
|
|
|
case OBJ_COMMIT:
|
2019-02-19 01:05:05 +01:00
|
|
|
if (size < hexsz + strlen("tree ") ||
|
2017-05-07 00:09:56 +02:00
|
|
|
get_oid_hex(buf + strlen("tree "), oid))
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
die("Invalid SHA1 in commit: %s", command_buf.buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(buf);
|
2017-05-07 00:09:56 +02:00
|
|
|
return find_object(oid);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
}
|
|
|
|
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
static void insert_mapped_mark(uintmax_t mark, void *object, void *cbp)
|
|
|
|
{
|
|
|
|
struct object_id *fromoid = object;
|
|
|
|
struct object_id *tooid = find_mark(cbp, mark);
|
|
|
|
int ret;
|
|
|
|
khiter_t it;
|
|
|
|
|
|
|
|
it = kh_put_oid_map(sub_oid_map, *fromoid, &ret);
|
|
|
|
/* We've already seen this object. */
|
|
|
|
if (ret == 0)
|
|
|
|
return;
|
|
|
|
kh_value(sub_oid_map, it) = tooid;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void build_mark_map_one(struct mark_set *from, struct mark_set *to)
|
|
|
|
{
|
|
|
|
for_each_mark(from, 0, insert_mapped_mark, to);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void build_mark_map(struct string_list *from, struct string_list *to)
|
|
|
|
{
|
|
|
|
struct string_list_item *fromp, *top;
|
|
|
|
|
|
|
|
sub_oid_map = kh_init_oid_map();
|
|
|
|
|
|
|
|
for_each_string_list_item(fromp, from) {
|
|
|
|
top = string_list_lookup(to, fromp->string);
|
|
|
|
if (!fromp->util) {
|
|
|
|
die(_("Missing from marks for submodule '%s'"), fromp->string);
|
|
|
|
} else if (!top || !top->util) {
|
|
|
|
die(_("Missing to marks for submodule '%s'"), fromp->string);
|
|
|
|
}
|
|
|
|
build_mark_map_one(fromp->util, top->util);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
static struct object_entry *parse_treeish_dataref(const char **p)
|
|
|
|
{
|
2017-05-07 00:09:56 +02:00
|
|
|
struct object_id oid;
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
struct object_entry *e;
|
|
|
|
|
|
|
|
if (**p == ':') { /* <mark> */
|
2020-02-22 21:17:47 +01:00
|
|
|
e = find_mark(marks, parse_mark_ref_space(p));
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
if (!e)
|
|
|
|
die("Unknown mark: %s", command_buf.buf);
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&oid, &e->idx.oid);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
} else { /* <sha1> */
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
if (parse_mapped_oid_hex(*p, &oid, p))
|
2012-04-08 00:59:20 +02:00
|
|
|
die("Invalid dataref: %s", command_buf.buf);
|
2017-05-07 00:09:56 +02:00
|
|
|
e = find_object(&oid);
|
2014-06-18 21:51:57 +02:00
|
|
|
if (*(*p)++ != ' ')
|
|
|
|
die("Missing space after tree-ish: %s", command_buf.buf);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
while (!e || e->type != OBJ_TREE)
|
2017-05-07 00:09:56 +02:00
|
|
|
e = dereference(e, &oid);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
2019-02-19 01:05:06 +01:00
|
|
|
static void print_ls(int mode, const unsigned char *hash, const char *path)
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
{
|
|
|
|
static struct strbuf line = STRBUF_INIT;
|
|
|
|
|
|
|
|
/* See show_tree(). */
|
|
|
|
const char *type =
|
|
|
|
S_ISGITLINK(mode) ? commit_type :
|
|
|
|
S_ISDIR(mode) ? tree_type :
|
|
|
|
blob_type;
|
|
|
|
|
|
|
|
if (!mode) {
|
|
|
|
/* missing SP path LF */
|
|
|
|
strbuf_reset(&line);
|
|
|
|
strbuf_addstr(&line, "missing ");
|
|
|
|
quote_c_style(path, &line, NULL, 0);
|
|
|
|
strbuf_addch(&line, '\n');
|
|
|
|
} else {
|
|
|
|
/* mode SP type SP object_name TAB path LF */
|
|
|
|
strbuf_reset(&line);
|
|
|
|
strbuf_addf(&line, "%06o %s %s\t",
|
2019-02-19 01:05:06 +01:00
|
|
|
mode & ~NO_DELTA, type, hash_to_hex(hash));
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
quote_c_style(path, &line, NULL, 0);
|
|
|
|
strbuf_addch(&line, '\n');
|
|
|
|
}
|
|
|
|
cat_blob_write(line.buf, line.len);
|
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void parse_ls(const char *p, struct branch *b)
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
{
|
|
|
|
struct tree_entry *root = NULL;
|
Fix sparse warnings
Fix warnings from 'make check'.
- These files don't include 'builtin.h' causing sparse to complain that
cmd_* isn't declared:
builtin/clone.c:364, builtin/fetch-pack.c:797,
builtin/fmt-merge-msg.c:34, builtin/hash-object.c:78,
builtin/merge-index.c:69, builtin/merge-recursive.c:22
builtin/merge-tree.c:341, builtin/mktag.c:156, builtin/notes.c:426
builtin/notes.c:822, builtin/pack-redundant.c:596,
builtin/pack-refs.c:10, builtin/patch-id.c:60, builtin/patch-id.c:149,
builtin/remote.c:1512, builtin/remote-ext.c:240,
builtin/remote-fd.c:53, builtin/reset.c:236, builtin/send-pack.c:384,
builtin/unpack-file.c:25, builtin/var.c:75
- These files have symbols which should be marked static since they're
only file scope:
submodule.c:12, diff.c:631, replace_object.c:92, submodule.c:13,
submodule.c:14, trace.c:78, transport.c:195, transport-helper.c:79,
unpack-trees.c:19, url.c:3, url.c:18, url.c:104, url.c:117, url.c:123,
url.c:129, url.c:136, thread-utils.c:21, thread-utils.c:48
- These files redeclare symbols to be different types:
builtin/index-pack.c:210, parse-options.c:564, parse-options.c:571,
usage.c:49, usage.c:58, usage.c:63, usage.c:72
- These files use a literal integer 0 when they really should use a NULL
pointer:
daemon.c:663, fast-import.c:2942, imap-send.c:1072, notes-merge.c:362
While we're in the area, clean up some unused #includes in builtin files
(mostly exec_cmd.h).
Signed-off-by: Stephen Boyd <bebarino@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-03-22 08:51:05 +01:00
|
|
|
struct tree_entry leaf = {NULL};
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
|
2013-09-04 21:04:30 +02:00
|
|
|
/* ls SP (<tree-ish> SP)? <path> */
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
if (*p == '"') {
|
|
|
|
if (!b)
|
|
|
|
die("Not in a commit: %s", command_buf.buf);
|
|
|
|
root = &b->branch_tree;
|
|
|
|
} else {
|
|
|
|
struct object_entry *e = parse_treeish_dataref(&p);
|
|
|
|
root = new_tree_entry();
|
2017-05-07 00:10:11 +02:00
|
|
|
oidcpy(&root->versions[1].oid, &e->idx.oid);
|
2017-05-01 04:29:03 +02:00
|
|
|
if (!is_null_oid(&root->versions[1].oid))
|
2013-06-23 16:58:20 +02:00
|
|
|
root->versions[1].mode = S_IFDIR;
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
load_tree(root);
|
|
|
|
}
|
|
|
|
if (*p == '"') {
|
|
|
|
static struct strbuf uq = STRBUF_INIT;
|
|
|
|
const char *endp;
|
|
|
|
strbuf_reset(&uq);
|
|
|
|
if (unquote_c_style(&uq, p, &endp))
|
|
|
|
die("Invalid path: %s", command_buf.buf);
|
|
|
|
if (*endp)
|
|
|
|
die("Garbage after path in: %s", command_buf.buf);
|
|
|
|
p = uq.buf;
|
|
|
|
}
|
2013-06-23 16:58:21 +02:00
|
|
|
tree_content_get(root, p, &leaf, 1);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
/*
|
|
|
|
* A directory in preparation would have a sha1 of zero
|
|
|
|
* until it is saved. Save, for simplicity.
|
|
|
|
*/
|
|
|
|
if (S_ISDIR(leaf.versions[1].mode))
|
|
|
|
store_tree(&leaf);
|
|
|
|
|
2017-05-01 04:29:03 +02:00
|
|
|
print_ls(leaf.versions[1].mode, leaf.versions[1].oid.hash, p);
|
2012-03-10 04:20:34 +01:00
|
|
|
if (leaf.tree)
|
|
|
|
release_tree_content_recursive(leaf.tree);
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
if (!b || root != &b->branch_tree)
|
|
|
|
release_tree_entry(root);
|
|
|
|
}
|
|
|
|
|
2010-11-22 09:16:02 +01:00
|
|
|
static void checkpoint(void)
|
2007-01-15 12:35:41 +01:00
|
|
|
{
|
2010-11-22 09:16:02 +01:00
|
|
|
checkpoint_requested = 0;
|
2007-02-07 08:42:44 +01:00
|
|
|
if (object_count) {
|
|
|
|
cycle_packfile();
|
|
|
|
}
|
2017-09-29 05:09:36 +02:00
|
|
|
dump_branches();
|
|
|
|
dump_tags();
|
|
|
|
dump_marks();
|
2010-11-22 09:16:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void parse_checkpoint(void)
|
|
|
|
{
|
|
|
|
checkpoint_requested = 1;
|
2007-08-01 08:22:53 +02:00
|
|
|
skip_optional_lf();
|
2007-01-15 12:35:41 +01:00
|
|
|
}
|
|
|
|
|
2008-05-16 00:35:56 +02:00
|
|
|
static void parse_progress(void)
|
2007-08-01 16:23:08 +02:00
|
|
|
{
|
2007-09-06 13:20:05 +02:00
|
|
|
fwrite(command_buf.buf, 1, command_buf.len, stdout);
|
2007-08-01 16:23:08 +02:00
|
|
|
fputc('\n', stdout);
|
|
|
|
fflush(stdout);
|
|
|
|
skip_optional_lf();
|
|
|
|
}
|
|
|
|
|
2019-10-03 22:27:05 +02:00
|
|
|
static void parse_alias(void)
|
|
|
|
{
|
|
|
|
struct object_entry *e;
|
|
|
|
struct branch b;
|
|
|
|
|
|
|
|
skip_optional_lf();
|
|
|
|
read_next_command();
|
|
|
|
|
|
|
|
/* mark ... */
|
|
|
|
parse_mark();
|
|
|
|
if (!next_mark)
|
|
|
|
die(_("Expected 'mark' command, got %s"), command_buf.buf);
|
|
|
|
|
|
|
|
/* to ... */
|
|
|
|
memset(&b, 0, sizeof(b));
|
|
|
|
if (!parse_objectish_with_prefix(&b, "to "))
|
|
|
|
die(_("Expected 'to' command, got %s"), command_buf.buf);
|
|
|
|
e = find_object(&b.oid);
|
|
|
|
assert(e);
|
2020-02-22 21:17:45 +01:00
|
|
|
insert_mark(marks, next_mark, e);
|
2019-10-03 22:27:05 +02:00
|
|
|
}
|
|
|
|
|
2009-12-04 18:07:00 +01:00
|
|
|
static char* make_fast_import_path(const char *path)
|
2007-03-08 00:07:26 +01:00
|
|
|
{
|
2009-12-04 18:07:00 +01:00
|
|
|
if (!relative_marks_paths || is_absolute_path(path))
|
|
|
|
return xstrdup(path);
|
2017-04-20 23:09:09 +02:00
|
|
|
return git_pathdup("info/fast-import/%s", path);
|
2009-12-04 18:07:00 +01:00
|
|
|
}
|
|
|
|
|
2011-01-15 07:31:46 +01:00
|
|
|
static void option_import_marks(const char *marks,
|
|
|
|
int from_stream, int ignore_missing)
|
2007-03-08 00:07:26 +01:00
|
|
|
{
|
2009-12-04 18:06:59 +01:00
|
|
|
if (import_marks_file) {
|
|
|
|
if (from_stream)
|
|
|
|
die("Only one import-marks command allowed per stream");
|
|
|
|
|
|
|
|
/* read previous mark file */
|
|
|
|
if(!import_marks_file_from_stream)
|
|
|
|
read_marks();
|
2007-03-08 00:07:26 +01:00
|
|
|
}
|
2009-12-04 18:06:59 +01:00
|
|
|
|
2009-12-04 18:07:00 +01:00
|
|
|
import_marks_file = make_fast_import_path(marks);
|
2009-12-04 18:06:59 +01:00
|
|
|
import_marks_file_from_stream = from_stream;
|
2011-01-15 07:31:46 +01:00
|
|
|
import_marks_file_ignore_missing = ignore_missing;
|
2007-03-08 00:07:26 +01:00
|
|
|
}
|
|
|
|
|
2009-12-04 18:06:54 +01:00
|
|
|
static void option_date_format(const char *fmt)
|
|
|
|
{
|
|
|
|
if (!strcmp(fmt, "raw"))
|
|
|
|
whenspec = WHENSPEC_RAW;
|
2020-05-30 22:25:57 +02:00
|
|
|
else if (!strcmp(fmt, "raw-permissive"))
|
|
|
|
whenspec = WHENSPEC_RAW_PERMISSIVE;
|
2009-12-04 18:06:54 +01:00
|
|
|
else if (!strcmp(fmt, "rfc2822"))
|
|
|
|
whenspec = WHENSPEC_RFC2822;
|
|
|
|
else if (!strcmp(fmt, "now"))
|
|
|
|
whenspec = WHENSPEC_NOW;
|
|
|
|
else
|
|
|
|
die("unknown --date-format argument %s", fmt);
|
|
|
|
}
|
|
|
|
|
2010-11-28 20:42:46 +01:00
|
|
|
static unsigned long ulong_arg(const char *option, const char *arg)
|
|
|
|
{
|
|
|
|
char *endptr;
|
|
|
|
unsigned long rv = strtoul(arg, &endptr, 0);
|
|
|
|
if (strchr(arg, '-') || endptr == arg || *endptr)
|
|
|
|
die("%s: argument must be a non-negative integer", option);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2009-12-04 18:06:54 +01:00
|
|
|
static void option_depth(const char *depth)
|
|
|
|
{
|
2010-11-28 20:42:46 +01:00
|
|
|
max_depth = ulong_arg("--depth", depth);
|
2009-12-04 18:06:54 +01:00
|
|
|
if (max_depth > MAX_DEPTH)
|
|
|
|
die("--depth cannot exceed %u", MAX_DEPTH);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void option_active_branches(const char *branches)
|
|
|
|
{
|
2010-11-28 20:42:46 +01:00
|
|
|
max_active_branches = ulong_arg("--active-branches", branches);
|
2009-12-04 18:06:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void option_export_marks(const char *marks)
|
|
|
|
{
|
2009-12-04 18:07:00 +01:00
|
|
|
export_marks_file = make_fast_import_path(marks);
|
2009-12-04 18:06:54 +01:00
|
|
|
}
|
|
|
|
|
2010-11-28 20:45:01 +01:00
|
|
|
static void option_cat_blob_fd(const char *fd)
|
|
|
|
{
|
|
|
|
unsigned long n = ulong_arg("--cat-blob-fd", fd);
|
|
|
|
if (n > (unsigned long) INT_MAX)
|
|
|
|
die("--cat-blob-fd cannot exceed %d", INT_MAX);
|
|
|
|
cat_blob_fd = (int) n;
|
|
|
|
}
|
|
|
|
|
2009-12-04 18:06:54 +01:00
|
|
|
static void option_export_pack_edges(const char *edges)
|
|
|
|
{
|
|
|
|
if (pack_edges)
|
|
|
|
fclose(pack_edges);
|
2017-05-03 12:16:46 +02:00
|
|
|
pack_edges = xfopen(edges, "a");
|
2009-12-04 18:06:54 +01:00
|
|
|
}
|
|
|
|
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
static void option_rewrite_submodules(const char *arg, struct string_list *list)
|
|
|
|
{
|
|
|
|
struct mark_set *ms;
|
|
|
|
FILE *fp;
|
|
|
|
char *s = xstrdup(arg);
|
|
|
|
char *f = strchr(s, ':');
|
|
|
|
if (!f)
|
|
|
|
die(_("Expected format name:filename for submodule rewrite option"));
|
|
|
|
*f = '\0';
|
|
|
|
f++;
|
|
|
|
ms = xcalloc(1, sizeof(*ms));
|
|
|
|
string_list_insert(list, s)->util = ms;
|
|
|
|
|
|
|
|
fp = fopen(f, "r");
|
|
|
|
if (!fp)
|
|
|
|
die_errno("cannot read '%s'", f);
|
|
|
|
read_mark_file(ms, fp, insert_oid_entry);
|
|
|
|
fclose(fp);
|
|
|
|
}
|
|
|
|
|
2009-12-04 18:06:57 +01:00
|
|
|
static int parse_one_option(const char *option)
|
2009-12-04 18:06:54 +01:00
|
|
|
{
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
if (skip_prefix(option, "max-pack-size=", &option)) {
|
2010-02-04 20:10:44 +01:00
|
|
|
unsigned long v;
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
if (!git_parse_ulong(option, &v))
|
2010-02-04 20:10:44 +01:00
|
|
|
return 0;
|
|
|
|
if (v < 8192) {
|
|
|
|
warning("max-pack-size is now in bytes, assuming --max-pack-size=%lum", v);
|
|
|
|
v *= 1024 * 1024;
|
|
|
|
} else if (v < 1024 * 1024) {
|
|
|
|
warning("minimum max-pack-size is 1 MiB");
|
|
|
|
v = 1024 * 1024;
|
|
|
|
}
|
|
|
|
max_packsize = v;
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
} else if (skip_prefix(option, "big-file-threshold=", &option)) {
|
2010-02-04 03:27:08 +01:00
|
|
|
unsigned long v;
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
if (!git_parse_ulong(option, &v))
|
2010-02-04 03:27:08 +01:00
|
|
|
return 0;
|
|
|
|
big_file_threshold = v;
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
} else if (skip_prefix(option, "depth=", &option)) {
|
|
|
|
option_depth(option);
|
|
|
|
} else if (skip_prefix(option, "active-branches=", &option)) {
|
|
|
|
option_active_branches(option);
|
|
|
|
} else if (skip_prefix(option, "export-pack-edges=", &option)) {
|
|
|
|
option_export_pack_edges(option);
|
2019-08-29 17:25:45 +02:00
|
|
|
} else if (!strcmp(option, "quiet")) {
|
2009-12-04 18:06:54 +01:00
|
|
|
show_stats = 0;
|
2019-08-29 17:25:45 +02:00
|
|
|
} else if (!strcmp(option, "stats")) {
|
2009-12-04 18:06:54 +01:00
|
|
|
show_stats = 1;
|
fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <peff@peff.net>
2019-08-29 20:37:26 +02:00
|
|
|
} else if (!strcmp(option, "allow-unsafe-features")) {
|
|
|
|
; /* already handled during early option parsing */
|
2009-12-04 18:06:54 +01:00
|
|
|
} else {
|
2009-12-04 18:06:57 +01:00
|
|
|
return 0;
|
2009-12-04 18:06:54 +01:00
|
|
|
}
|
2009-12-04 18:06:57 +01:00
|
|
|
|
|
|
|
return 1;
|
2009-12-04 18:06:54 +01:00
|
|
|
}
|
|
|
|
|
fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <peff@peff.net>
2019-08-29 20:37:26 +02:00
|
|
|
static void check_unsafe_feature(const char *feature, int from_stream)
|
|
|
|
{
|
|
|
|
if (from_stream && !allow_unsafe_features)
|
|
|
|
die(_("feature '%s' forbidden in input without --allow-unsafe-features"),
|
|
|
|
feature);
|
|
|
|
}
|
|
|
|
|
2009-12-04 18:06:59 +01:00
|
|
|
static int parse_one_feature(const char *feature, int from_stream)
|
2009-12-04 18:06:56 +01:00
|
|
|
{
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
const char *arg;
|
|
|
|
|
|
|
|
if (skip_prefix(feature, "date-format=", &arg)) {
|
|
|
|
option_date_format(arg);
|
|
|
|
} else if (skip_prefix(feature, "import-marks=", &arg)) {
|
2019-08-29 21:08:42 +02:00
|
|
|
check_unsafe_feature("import-marks", from_stream);
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
option_import_marks(arg, from_stream, 0);
|
|
|
|
} else if (skip_prefix(feature, "import-marks-if-exists=", &arg)) {
|
2019-08-29 21:08:42 +02:00
|
|
|
check_unsafe_feature("import-marks-if-exists", from_stream);
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
option_import_marks(arg, from_stream, 1);
|
|
|
|
} else if (skip_prefix(feature, "export-marks=", &arg)) {
|
fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <peff@peff.net>
2019-08-29 20:37:26 +02:00
|
|
|
check_unsafe_feature(feature, from_stream);
|
use skip_prefix to avoid magic numbers
It's a common idiom to match a prefix and then skip past it
with a magic number, like:
if (starts_with(foo, "bar"))
foo += 3;
This is easy to get wrong, since you have to count the
prefix string yourself, and there's no compiler check if the
string changes. We can use skip_prefix to avoid the magic
numbers here.
Note that some of these conversions could be much shorter.
For example:
if (starts_with(arg, "--foo=")) {
bar = arg + 6;
continue;
}
could become:
if (skip_prefix(arg, "--foo=", &bar))
continue;
However, I have left it as:
if (skip_prefix(arg, "--foo=", &v)) {
bar = v;
continue;
}
to visually match nearby cases which need to actually
process the string. Like:
if (skip_prefix(arg, "--foo=", &v)) {
bar = atoi(v);
continue;
}
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-18 21:47:50 +02:00
|
|
|
option_export_marks(arg);
|
2019-10-03 22:27:05 +02:00
|
|
|
} else if (!strcmp(feature, "alias")) {
|
|
|
|
; /* Don't die - this feature is supported */
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
} else if (skip_prefix(feature, "rewrite-submodules-to=", &arg)) {
|
|
|
|
option_rewrite_submodules(arg, &sub_marks_to);
|
|
|
|
} else if (skip_prefix(feature, "rewrite-submodules-from=", &arg)) {
|
|
|
|
option_rewrite_submodules(arg, &sub_marks_from);
|
|
|
|
} else if (skip_prefix(feature, "rewrite-submodules-from=", &arg)) {
|
2015-07-01 17:05:58 +02:00
|
|
|
} else if (!strcmp(feature, "get-mark")) {
|
|
|
|
; /* Don't die - this feature is supported */
|
2010-11-28 20:45:01 +01:00
|
|
|
} else if (!strcmp(feature, "cat-blob")) {
|
|
|
|
; /* Don't die - this feature is supported */
|
2011-05-05 20:56:00 +02:00
|
|
|
} else if (!strcmp(feature, "relative-marks")) {
|
2009-12-04 18:07:00 +01:00
|
|
|
relative_marks_paths = 1;
|
2011-05-05 20:56:00 +02:00
|
|
|
} else if (!strcmp(feature, "no-relative-marks")) {
|
2009-12-04 18:07:00 +01:00
|
|
|
relative_marks_paths = 0;
|
2011-07-16 15:03:32 +02:00
|
|
|
} else if (!strcmp(feature, "done")) {
|
|
|
|
require_explicit_termination = 1;
|
2011-05-05 20:56:00 +02:00
|
|
|
} else if (!strcmp(feature, "force")) {
|
2009-12-04 18:06:56 +01:00
|
|
|
force_update = 1;
|
fast-import: add 'ls' command
Lazy fast-import frontend authors that want to rely on the backend to
keep track of the content of the imported trees _almost_ have what
they need in the 'cat-blob' command (v1.7.4-rc0~30^2~3, 2010-11-28).
But it is not quite enough, since
(1) cat-blob can be used to retrieve the content of files, but
not their mode, and
(2) using cat-blob requires the frontend to keep track of a name
(mark number or object id) for each blob to be retrieved
Introduce an 'ls' command to complement cat-blob and take care of the
remaining needs. The 'ls' command finds what is at a given path
within a given tree-ish (tag, commit, or tree):
'ls' SP <dataref> SP <path> LF
or in fast-import's active commit:
'ls' SP <path> LF
The response is a single line sent through the cat-blob channel,
imitating ls-tree output. So for example:
FE> ls :1 Documentation
gfi> 040000 tree 9e6c2b599341d28a2a375f8207507e0a2a627fe9 Documentation
FE> ls 9e6c2b599341d28a2a375f8207507e0a2a627fe9 git-fast-import.txt
gfi> 100644 blob 4f92954396e3f0f97e75b6838a5635b583708870 git-fast-import.txt
FE> ls :1 RelNotes
gfi> 120000 blob b942e499449d97aeb50c73ca2bdc1c6e6d528743 RelNotes
FE> cat-blob b942e499449d97aeb50c73ca2bdc1c6e6d528743
gfi> b942e499449d97aeb50c73ca2bdc1c6e6d528743 blob 32
gfi> Documentation/RelNotes/1.7.4.txt
The most interesting parts of the reply are the first word, which is
a 6-digit octal mode (regular file, executable, symlink, directory,
or submodule), and the part from the second space to the tab, which is
a <dataref> that can be used in later cat-blob, ls, and filemodify (M)
commands to refer to the content (blob, tree, or commit) at that path.
If there is nothing there, the response is "missing some/path".
The intent is for this command to be used to read files from the
active commit, so a frontend can apply patches to them, and to copy
files and directories from previous revisions.
For example, proposed updates to svn-fe use this command in place of
its internal representation of the repository directory structure.
This simplifies the frontend a great deal and means support for
resuming an import in a separate fast-import run (i.e., incremental
import) is basically free.
Signed-off-by: David Barr <david.barr@cordelta.com>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Junio C Hamano <gitster@pobox.com>
Improved-by: Sverre Rabbelier <srabbelier@gmail.com>
2010-12-02 11:40:20 +01:00
|
|
|
} else if (!strcmp(feature, "notes") || !strcmp(feature, "ls")) {
|
2011-02-09 23:43:57 +01:00
|
|
|
; /* do nothing; we have the feature */
|
2009-12-04 18:06:56 +01:00
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void parse_feature(const char *feature)
|
2009-12-04 18:06:56 +01:00
|
|
|
{
|
|
|
|
if (seen_data_command)
|
|
|
|
die("Got feature command '%s' after data command", feature);
|
|
|
|
|
2009-12-04 18:06:59 +01:00
|
|
|
if (parse_one_feature(feature, 1))
|
2009-12-04 18:06:56 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
die("This version of fast-import does not support feature %s.", feature);
|
|
|
|
}
|
|
|
|
|
2014-06-18 21:49:12 +02:00
|
|
|
static void parse_option(const char *option)
|
2009-12-04 18:06:57 +01:00
|
|
|
{
|
|
|
|
if (seen_data_command)
|
|
|
|
die("Got option command '%s' after data command", option);
|
|
|
|
|
|
|
|
if (parse_one_option(option))
|
|
|
|
return;
|
|
|
|
|
|
|
|
die("This version of fast-import does not support option: %s", option);
|
2007-03-08 00:07:26 +01:00
|
|
|
}
|
|
|
|
|
2014-08-13 14:22:56 +02:00
|
|
|
static void git_pack_config(void)
|
2008-01-21 05:36:54 +01:00
|
|
|
{
|
2014-08-13 14:22:56 +02:00
|
|
|
int indexversion_value;
|
2016-04-25 23:17:28 +02:00
|
|
|
int limit;
|
2014-08-13 14:22:56 +02:00
|
|
|
unsigned long packsizelimit_value;
|
|
|
|
|
|
|
|
if (!git_config_get_ulong("pack.depth", &max_depth)) {
|
2008-01-21 05:36:54 +01:00
|
|
|
if (max_depth > MAX_DEPTH)
|
|
|
|
max_depth = MAX_DEPTH;
|
|
|
|
}
|
2014-08-13 14:22:56 +02:00
|
|
|
if (!git_config_get_int("pack.indexversion", &indexversion_value)) {
|
|
|
|
pack_idx_opts.version = indexversion_value;
|
2011-02-26 00:43:25 +01:00
|
|
|
if (pack_idx_opts.version > 2)
|
2014-08-13 14:22:56 +02:00
|
|
|
git_die_config("pack.indexversion",
|
|
|
|
"bad pack.indexversion=%"PRIu32, pack_idx_opts.version);
|
2010-02-17 20:05:55 +01:00
|
|
|
}
|
2014-08-13 14:22:56 +02:00
|
|
|
if (!git_config_get_ulong("pack.packsizelimit", &packsizelimit_value))
|
|
|
|
max_packsize = packsizelimit_value;
|
|
|
|
|
2016-04-25 23:17:28 +02:00
|
|
|
if (!git_config_get_int("fastimport.unpacklimit", &limit))
|
|
|
|
unpack_limit = limit;
|
|
|
|
else if (!git_config_get_int("transfer.unpacklimit", &limit))
|
|
|
|
unpack_limit = limit;
|
|
|
|
|
2014-08-13 14:22:56 +02:00
|
|
|
git_config(git_default_config, NULL);
|
2008-01-21 05:36:54 +01:00
|
|
|
}
|
|
|
|
|
2006-08-23 08:00:31 +02:00
|
|
|
static const char fast_import_usage[] =
|
2010-10-08 19:31:15 +02:00
|
|
|
"git fast-import [--date-format=<f>] [--max-pack-size=<n>] [--big-file-threshold=<n>] [--depth=<n>] [--active-branches=<n>] [--export-marks=<marks.file>]";
|
2006-08-23 08:00:31 +02:00
|
|
|
|
2009-12-04 18:06:57 +01:00
|
|
|
static void parse_argv(void)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 1; i < global_argc; i++) {
|
|
|
|
const char *a = global_argv[i];
|
|
|
|
|
|
|
|
if (*a != '-' || !strcmp(a, "--"))
|
|
|
|
break;
|
|
|
|
|
2014-06-18 21:46:15 +02:00
|
|
|
if (!skip_prefix(a, "--", &a))
|
|
|
|
die("unknown option %s", a);
|
|
|
|
|
|
|
|
if (parse_one_option(a))
|
2009-12-04 18:06:57 +01:00
|
|
|
continue;
|
|
|
|
|
2014-06-18 21:46:15 +02:00
|
|
|
if (parse_one_feature(a, 0))
|
2009-12-04 18:06:57 +01:00
|
|
|
continue;
|
|
|
|
|
2014-06-18 21:46:15 +02:00
|
|
|
if (skip_prefix(a, "cat-blob-fd=", &a)) {
|
|
|
|
option_cat_blob_fd(a);
|
2010-11-28 20:45:01 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-06-18 21:46:15 +02:00
|
|
|
die("unknown option --%s", a);
|
2009-12-04 18:06:57 +01:00
|
|
|
}
|
|
|
|
if (i != global_argc)
|
|
|
|
usage(fast_import_usage);
|
|
|
|
|
|
|
|
seen_data_command = 1;
|
|
|
|
if (import_marks_file)
|
|
|
|
read_marks();
|
fast-import: add options for rewriting submodules
When converting a repository using submodules from one hash algorithm to
another, it is necessary to rewrite the submodules from the old
algorithm to the new algorithm, since only references to submodules, not
their contents, are written to the fast-export stream. Without rewriting
the submodules, fast-import fails with an "Invalid dataref" error when
encountering a submodule in another algorithm.
Add a pair of options, --rewrite-submodules-from and
--rewrite-submodules-to, that take a list of marks produced by
fast-export and fast-import, respectively, when processing the
submodule. Use these marks to map the submodule commits from the old
algorithm to the new algorithm.
We read marks into two corresponding struct mark_set objects and then
perform a mapping from the old to the new using a hash table. This lets
us reuse the same mark parsing code that is used elsewhere and allows us
to efficiently read and match marks based on their ID, since mark files
need not be sorted.
Note that because we're using a khash table for the object IDs, and this
table copies values of struct object_id instead of taking references to
them, it's necessary to zero the struct object_id values that we use to
insert and look up in the table. Otherwise, we would end up with SHA-1
values that don't match because of whatever stack garbage might be left
in the unused area.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-22 21:17:49 +01:00
|
|
|
build_mark_map(&sub_marks_from, &sub_marks_to);
|
2009-12-04 18:06:57 +01:00
|
|
|
}
|
|
|
|
|
add an extra level of indirection to main()
There are certain startup tasks that we expect every git
process to do. In some cases this is just to improve the
quality of the program (e.g., setting up gettext()). In
others it is a requirement for using certain functions in
libgit.a (e.g., system_path() expects that you have called
git_extract_argv0_path()).
Most commands are builtins and are covered by the git.c
version of main(). However, there are still a few external
commands that use their own main(). Each of these has to
remember to include the correct startup sequence, and we are
not always consistent.
Rather than just fix the inconsistencies, let's make this
harder to get wrong by providing a common main() that can
run this standard startup.
We basically have two options to do this:
- the compat/mingw.h file already does something like this by
adding a #define that replaces the definition of main with a
wrapper that calls mingw_startup().
The upside is that the code in each program doesn't need
to be changed at all; it's rewritten on the fly by the
preprocessor.
The downside is that it may make debugging of the startup
sequence a bit more confusing, as the preprocessor is
quietly inserting new code.
- the builtin functions are all of the form cmd_foo(),
and git.c's main() calls them.
This is much more explicit, which may make things more
obvious to somebody reading the code. It's also more
flexible (because of course we have to figure out _which_
cmd_foo() to call).
The downside is that each of the builtins must define
cmd_foo(), instead of just main().
This patch chooses the latter option, preferring the more
explicit approach, even though it is more invasive. We
introduce a new file common-main.c, with the "real" main. It
expects to call cmd_main() from whatever other objects it is
linked against.
We link common-main.o against anything that links against
libgit.a, since we know that such programs will need to do
this setup. Note that common-main.o can't actually go inside
libgit.a, as the linker would not pick up its main()
function automatically (it has no callers).
The rest of the patch is just adjusting all of the various
external programs (mostly in t/helper) to use cmd_main().
I've provided a global declaration for cmd_main(), which
means that all of the programs also need to match its
signature. In particular, many functions need to switch to
"const char **" instead of "char **" for argv. This effect
ripples out to a few other variables and functions, as well.
This makes the patch even more invasive, but the end result
is much better. We should be treating argv strings as const
anyway, and now all programs conform to the same signature
(which also matches the way builtins are defined).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-01 07:58:58 +02:00
|
|
|
int cmd_main(int argc, const char **argv)
|
2006-08-06 19:51:39 +02:00
|
|
|
{
|
2009-12-04 18:06:54 +01:00
|
|
|
unsigned int i;
|
2006-08-06 19:51:39 +02:00
|
|
|
|
2009-11-09 16:04:49 +01:00
|
|
|
if (argc == 2 && !strcmp(argv[1], "-h"))
|
|
|
|
usage(fast_import_usage);
|
|
|
|
|
2008-02-28 23:29:54 +01:00
|
|
|
setup_git_directory();
|
2011-02-26 00:43:25 +01:00
|
|
|
reset_pack_idx_option(&pack_idx_opts);
|
2014-08-13 14:22:56 +02:00
|
|
|
git_pack_config();
|
2008-01-21 05:36:54 +01:00
|
|
|
|
2007-03-07 23:09:21 +01:00
|
|
|
alloc_objects(object_entry_alloc);
|
2007-09-10 12:35:04 +02:00
|
|
|
strbuf_init(&command_buf, 0);
|
2007-03-07 23:09:21 +01:00
|
|
|
atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
|
|
|
|
branch_table = xcalloc(branch_table_sz, sizeof(struct branch*));
|
|
|
|
avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
|
2018-04-11 20:37:54 +02:00
|
|
|
marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
|
2006-08-14 06:58:19 +02:00
|
|
|
|
fast-import: replace custom hash with hashmap.c
We use a custom hash in fast-import to store the set of objects we've
imported so far. It has a fixed set of 2^16 buckets and chains any
collisions with a linked list. As the number of objects grows larger
than that, the load factor increases and we degrade to O(n) lookups and
O(n^2) insertions.
We can scale better by using our hashmap.c implementation, which will
resize the bucket count as we grow. This does incur an extra memory cost
of 8 bytes per object, as hashmap stores the integer hash value for each
entry in its hashmap_entry struct (which we really don't care about
here, because we're just reusing the embedded object hash). But I think
the numbers below justify this (and our per-object memory cost is
already much higher).
I also looked at using khash, but it seemed to perform slightly worse
than hashmap at all sizes, and worse even than the existing code for
small sizes. It's also awkward to use here, because we want to look up a
"struct object_entry" from a "struct object_id", and it doesn't handle
mismatched keys as well. Making a mapping of object_id to object_entry
would be more natural, but that would require pulling the embedded oid
out of the object_entry or incurring an extra 32 bytes per object.
In a synthetic test creating as many cheap, tiny objects as possible
perl -e '
my $bits = shift;
my $nr = 2**$bits;
for (my $i = 0; $i < $nr; $i++) {
print "blob\n";
print "data 4\n";
print pack("N", $i);
}
' $bits | git fast-import
I got these results:
nr_objects master khash hashmap
2^20 0m4.317s 0m5.109s 0m3.890s
2^21 0m10.204s 0m9.702s 0m7.933s
2^22 0m27.159s 0m17.911s 0m16.751s
2^23 1m19.038s 0m35.080s 0m31.963s
2^24 4m18.766s 1m10.233s 1m6.793s
which points to hashmap as the winner. We didn't have any perf tests for
fast-export or fast-import, so I added one as a more real-world case.
It uses an export without blobs since that's significantly cheaper than
a full one, but still is an interesting case people might use (e.g., for
rewriting history). It will emphasize this change in some ways (as a
percentage we spend more time making objects and less shuffling blob
bytes around) and less in others (the total object count is lower).
Here are the results for linux.git:
Test HEAD^ HEAD
----------------------------------------------------------------------------
9300.1: export (no-blobs) 67.64(66.96+0.67) 67.81(67.06+0.75) +0.3%
9300.2: import (no-blobs) 284.04(283.34+0.69) 198.09(196.01+0.92) -30.3%
It only has ~5.2M commits and trees, so this is a larger effect than I
expected (the 2^23 case above only improved by 50s or so, but here we
gained almost 90s). This is probably due to actually performing more
object lookups in a real import with trees and commits, as opposed to
just dumping a bunch of blobs into a pack.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-06 21:49:40 +02:00
|
|
|
hashmap_init(&object_table, object_entry_hashcmp, NULL, 0);
|
|
|
|
|
fast-import: disallow "feature export-marks" by default
The fast-import stream command "feature export-marks=<path>" lets the
stream write marks to an arbitrary path. This may be surprising if you
are running fast-import against an untrusted input (which otherwise
cannot do anything except update Git objects and refs).
Let's disallow the use of this feature by default, and provide a
command-line option to re-enable it (you can always just use the
command-line --export-marks as well, but the in-stream version provides
an easy way for exporters to control the process).
This is a backwards-incompatible change, since the default is flipping
to the new, safer behavior. However, since the main users of the
in-stream versions would be import/export-based remote helpers, and
since we trust remote helpers already (which are already running
arbitrary code), we'll pass the new option by default when reading a
remote helper's stream. This should minimize the impact.
Note that the implementation isn't totally simple, as we have to work
around the fact that fast-import doesn't parse its command-line options
until after it has read any "feature" lines from the stream. This is how
it lets command-line options override in-stream. But in our case, it's
important to parse the new --allow-unsafe-features first.
There are three options for resolving this:
1. Do a separate "early" pass over the options. This is easy for us to
do because there are no command-line options that allow the
"unstuck" form (so there's no chance of us mistaking an argument
for an option), though it does introduce a risk of incorrect
parsing later (e.g,. if we convert to parse-options).
2. Move the option parsing phase back to the start of the program, but
teach the stream-reading code never to override an existing value.
This is tricky, because stream "feature" lines override each other
(meaning we'd have to start tracking the source for every option).
3. Accept that we might parse a "feature export-marks" line that is
forbidden, as long we don't _act_ on it until after we've parsed
the command line options.
This would, in fact, work with the current code, but only because
the previous patch fixed the export-marks parser to avoid touching
the filesystem.
So while it works, it does carry risk of somebody getting it wrong
in the future in a rather subtle and unsafe way.
I've gone with option (1) here as simple, safe, and unlikely to cause
regressions.
This fixes CVE-2019-1348.
Signed-off-by: Jeff King <peff@peff.net>
2019-08-29 20:37:26 +02:00
|
|
|
/*
|
|
|
|
* We don't parse most options until after we've seen the set of
|
|
|
|
* "feature" lines at the start of the stream (which allows the command
|
|
|
|
* line to override stream data). But we must do an early parse of any
|
|
|
|
* command-line options that impact how we interpret the feature lines.
|
|
|
|
*/
|
|
|
|
for (i = 1; i < argc; i++) {
|
|
|
|
const char *arg = argv[i];
|
|
|
|
if (*arg != '-' || !strcmp(arg, "--"))
|
|
|
|
break;
|
|
|
|
if (!strcmp(arg, "--allow-unsafe-features"))
|
|
|
|
allow_unsafe_features = 1;
|
|
|
|
}
|
|
|
|
|
2009-12-04 18:06:57 +01:00
|
|
|
global_argc = argc;
|
|
|
|
global_argv = argv;
|
2006-08-23 08:00:31 +02:00
|
|
|
|
2018-04-11 20:37:54 +02:00
|
|
|
rc_free = mem_pool_alloc(&fi_mem_pool, cmd_save * sizeof(*rc_free));
|
2007-08-03 10:47:04 +02:00
|
|
|
for (i = 0; i < (cmd_save - 1); i++)
|
|
|
|
rc_free[i].next = &rc_free[i + 1];
|
|
|
|
rc_free[cmd_save - 1].next = NULL;
|
|
|
|
|
2007-01-15 10:39:05 +01:00
|
|
|
start_packfile();
|
2007-08-03 08:00:37 +02:00
|
|
|
set_die_routine(die_nicely);
|
2010-11-22 09:16:02 +01:00
|
|
|
set_checkpoint_signal();
|
2007-09-17 11:19:04 +02:00
|
|
|
while (read_next_command() != EOF) {
|
2014-06-18 21:49:12 +02:00
|
|
|
const char *v;
|
2007-09-17 11:19:04 +02:00
|
|
|
if (!strcmp("blob", command_buf.buf))
|
2008-05-16 00:35:56 +02:00
|
|
|
parse_new_blob();
|
2014-06-18 21:49:12 +02:00
|
|
|
else if (skip_prefix(command_buf.buf, "commit ", &v))
|
|
|
|
parse_new_commit(v);
|
|
|
|
else if (skip_prefix(command_buf.buf, "tag ", &v))
|
|
|
|
parse_new_tag(v);
|
|
|
|
else if (skip_prefix(command_buf.buf, "reset ", &v))
|
|
|
|
parse_reset_branch(v);
|
fast-import: check most prominent commands first
This is not a very important change, and one that I expect to have no
performance impact whatsoever, but reading the code bothered me. The
parsing of command types in cmd_main() mostly runs in order of most
common to least common commands; sure, it's hard to say for sure what
the most common are without some type of study, but it seems fairly
clear to mark the original four ("blob", "commit", "tag", "reset") as
the most prominent. Indeed, the parsing for most other commands were
added to later in the list. However, when "ls" was added, it was stuck
near the top of the list, with no rationale for that particular
location. Move it down to later to appease my Tourette's-like internal
twitching that its former location was causing.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-02-20 23:58:44 +01:00
|
|
|
else if (skip_prefix(command_buf.buf, "ls ", &v))
|
|
|
|
parse_ls(v, NULL);
|
2019-02-20 23:58:45 +01:00
|
|
|
else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
|
|
|
|
parse_cat_blob(v);
|
2019-02-20 23:58:46 +01:00
|
|
|
else if (skip_prefix(command_buf.buf, "get-mark ", &v))
|
|
|
|
parse_get_mark(v);
|
2007-01-15 12:35:41 +01:00
|
|
|
else if (!strcmp("checkpoint", command_buf.buf))
|
2008-05-16 00:35:56 +02:00
|
|
|
parse_checkpoint();
|
2011-07-16 15:03:32 +02:00
|
|
|
else if (!strcmp("done", command_buf.buf))
|
|
|
|
break;
|
2019-10-03 22:27:05 +02:00
|
|
|
else if (!strcmp("alias", command_buf.buf))
|
|
|
|
parse_alias();
|
2013-11-30 21:55:40 +01:00
|
|
|
else if (starts_with(command_buf.buf, "progress "))
|
2008-05-16 00:35:56 +02:00
|
|
|
parse_progress();
|
2014-06-18 21:49:12 +02:00
|
|
|
else if (skip_prefix(command_buf.buf, "feature ", &v))
|
|
|
|
parse_feature(v);
|
|
|
|
else if (skip_prefix(command_buf.buf, "option git ", &v))
|
|
|
|
parse_option(v);
|
2013-11-30 21:55:40 +01:00
|
|
|
else if (starts_with(command_buf.buf, "option "))
|
2009-12-04 18:06:57 +01:00
|
|
|
/* ignore non-git options*/;
|
2006-08-15 02:16:28 +02:00
|
|
|
else
|
|
|
|
die("Unsupported command: %s", command_buf.buf);
|
2010-11-22 09:16:02 +01:00
|
|
|
|
|
|
|
if (checkpoint_requested)
|
|
|
|
checkpoint();
|
2006-08-05 08:04:21 +02:00
|
|
|
}
|
2009-12-04 18:06:57 +01:00
|
|
|
|
|
|
|
/* argv hasn't been parsed yet, do so */
|
|
|
|
if (!seen_data_command)
|
|
|
|
parse_argv();
|
|
|
|
|
2011-07-16 15:03:32 +02:00
|
|
|
if (require_explicit_termination && feof(stdin))
|
|
|
|
die("stream ends early");
|
|
|
|
|
2007-01-15 10:39:05 +01:00
|
|
|
end_packfile();
|
2006-08-15 02:16:28 +02:00
|
|
|
|
2006-08-14 06:58:19 +02:00
|
|
|
dump_branches();
|
2006-08-24 09:12:13 +02:00
|
|
|
dump_tags();
|
2007-01-16 07:15:31 +01:00
|
|
|
unkeep_all_packs();
|
2006-08-25 22:03:04 +02:00
|
|
|
dump_marks();
|
2006-08-06 19:51:39 +02:00
|
|
|
|
2007-02-12 01:45:56 +01:00
|
|
|
if (pack_edges)
|
|
|
|
fclose(pack_edges);
|
|
|
|
|
2007-02-07 08:19:31 +01:00
|
|
|
if (show_stats) {
|
|
|
|
uintmax_t total_count = 0, duplicate_count = 0;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(object_count_by_type); i++)
|
|
|
|
total_count += object_count_by_type[i];
|
|
|
|
for (i = 0; i < ARRAY_SIZE(duplicate_count_by_type); i++)
|
|
|
|
duplicate_count += duplicate_count_by_type[i];
|
|
|
|
|
|
|
|
fprintf(stderr, "%s statistics:\n", argv[0]);
|
|
|
|
fprintf(stderr, "---------------------------------------------------------------------\n");
|
2007-02-21 02:34:56 +01:00
|
|
|
fprintf(stderr, "Alloc'd objects: %10" PRIuMAX "\n", alloc_count);
|
|
|
|
fprintf(stderr, "Total objects: %10" PRIuMAX " (%10" PRIuMAX " duplicates )\n", total_count, duplicate_count);
|
2011-08-20 21:04:11 +02:00
|
|
|
fprintf(stderr, " blobs : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_BLOB], duplicate_count_by_type[OBJ_BLOB], delta_count_by_type[OBJ_BLOB], delta_count_attempts_by_type[OBJ_BLOB]);
|
|
|
|
fprintf(stderr, " trees : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TREE], duplicate_count_by_type[OBJ_TREE], delta_count_by_type[OBJ_TREE], delta_count_attempts_by_type[OBJ_TREE]);
|
|
|
|
fprintf(stderr, " commits: %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_COMMIT], duplicate_count_by_type[OBJ_COMMIT], delta_count_by_type[OBJ_COMMIT], delta_count_attempts_by_type[OBJ_COMMIT]);
|
|
|
|
fprintf(stderr, " tags : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TAG], duplicate_count_by_type[OBJ_TAG], delta_count_by_type[OBJ_TAG], delta_count_attempts_by_type[OBJ_TAG]);
|
2007-02-07 08:19:31 +01:00
|
|
|
fprintf(stderr, "Total branches: %10lu (%10lu loads )\n", branch_count, branch_load_count);
|
2007-02-21 02:34:56 +01:00
|
|
|
fprintf(stderr, " marks: %10" PRIuMAX " (%10" PRIuMAX " unique )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count);
|
2007-02-07 08:19:31 +01:00
|
|
|
fprintf(stderr, " atoms: %10u\n", atom_cnt);
|
2018-04-11 20:37:54 +02:00
|
|
|
fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (tree_entry_allocd + fi_mem_pool.pool_alloc + alloc_count*sizeof(struct object_entry))/1024);
|
|
|
|
fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)((tree_entry_allocd + fi_mem_pool.pool_alloc) /1024));
|
2007-02-21 02:34:56 +01:00
|
|
|
fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
|
2007-02-07 08:19:31 +01:00
|
|
|
fprintf(stderr, "---------------------------------------------------------------------\n");
|
|
|
|
pack_report();
|
|
|
|
fprintf(stderr, "---------------------------------------------------------------------\n");
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
2006-08-05 08:04:21 +02:00
|
|
|
|
2007-02-06 22:08:06 +01:00
|
|
|
return failure ? 1 : 0;
|
2006-08-05 08:04:21 +02:00
|
|
|
}
|