git-commit-vandalism/object.c

568 lines
13 KiB
C
Raw Normal View History

#include "cache.h"
#include "object.h"
#include "replace-object.h"
#include "object-store.h"
#include "blob.h"
#include "tree.h"
#include "commit.h"
#include "tag.h"
#include "alloc.h"
#include "object-store.h"
#include "packfile.h"
#include "commit-graph.h"
unsigned int get_max_object_index(void)
{
return the_repository->parsed_objects->obj_hash_size;
}
struct object *get_indexed_object(unsigned int idx)
{
return the_repository->parsed_objects->obj_hash[idx];
}
static const char *object_type_strings[] = {
NULL, /* OBJ_NONE = 0 */
"commit", /* OBJ_COMMIT = 1 */
"tree", /* OBJ_TREE = 2 */
"blob", /* OBJ_BLOB = 3 */
"tag", /* OBJ_TAG = 4 */
};
const char *type_name(unsigned int type)
{
if (type >= ARRAY_SIZE(object_type_strings))
return NULL;
return object_type_strings[type];
}
int type_from_string_gently(const char *str, ssize_t len, int gentle)
{
int i;
if (len < 0)
len = strlen(str);
for (i = 1; i < ARRAY_SIZE(object_type_strings); i++)
if (!strncmp(str, object_type_strings[i], len) &&
object_type_strings[i][len] == '\0')
return i;
if (gentle)
return -1;
die(_("invalid object type \"%s\""), str);
}
/*
* Return a numerical hash value between 0 and n-1 for the object with
* the specified sha1. n must be a power of 2. Please note that the
* return value is *not* consistent across computer architectures.
*/
static unsigned int hash_obj(const unsigned char *sha1, unsigned int n)
{
return sha1hash(sha1) & (n - 1);
}
/*
* Insert obj into the hash table hash, which has length size (which
* must be a power of 2). On collisions, simply overflow to the next
* empty bucket.
*/
static void insert_obj_hash(struct object *obj, struct object **hash, unsigned int size)
{
unsigned int j = hash_obj(obj->oid.hash, size);
while (hash[j]) {
j++;
if (j >= size)
j = 0;
}
hash[j] = obj;
}
/*
* Look up the record for the given sha1 in the hash map stored in
* obj_hash. Return NULL if it was not found.
*/
struct object *lookup_object(struct repository *r, const unsigned char *sha1)
{
lookup_object: prioritize recently found objects The lookup_object function is backed by a hash table of all objects we have seen in the program. We manage collisions with a linear walk over the colliding entries, checking each with hashcmp(). The main cost of lookup is in these hashcmp() calls; finding our item in the first slot is cheaper than finding it in the second slot, which is cheaper than the third, and so on. If we assume that there is some locality to the object lookups (e.g., if X and Y collide, and we have just looked up X, the next lookup is more likely to be for X than for Y), then we can improve our average lookup speed by checking X before Y. This patch does so by swapping a found item to the front of the collision chain. The p0001 perf test reveals that this does indeed exploit locality in the case of "rev-list --all --objects": Test origin this tree ------------------------------------------------------------------------- 0001.1: rev-list --all 0.40(0.38+0.02) 0.40(0.36+0.03) +0.0% 0001.2: rev-list --all --objects 2.24(2.17+0.05) 1.86(1.79+0.05) -17.0% This is not surprising, as the full object traversal will hit the same tree entries over and over (e.g., for every commit that doesn't change "Documentation/", we will have to look up the same sha1 just to find out that we already processed it). The reason why this technique works (and does not violate any properties of the hash table) is subtle and bears some explanation. Let's imagine we get a lookup for sha1 `X`, and it hashes to bucket `i` in our table. That stretch of the table may look like: index | i-1 | i | i+1 | i+2 | ----------------------------------- entry ... | A | B | C | X | ... ----------------------------------- We start our probe at i, see that B does not match, nor does C, and finally find X. There may be multiple C's in the middle, but we know that there are no empty slots (or else we would not find X at all). We do not know the original index of B; it may be `i`, or it may be less than i (e.g., if it were `i-1`, it would collide with A and spill over into the `i` bucket). So it is acceptable for us to move it to the right of a contiguous stretch of entries (because we will find it from a linear walk starting anywhere at `i` or before), but never to the left (if we moved it to `i-1`, we would miss it when starting our walk at `i`). We do know the original index of X; it is `i`, so it is safe to place it anywhere in the contiguous stretch between `i` and where we found it (`i+2` in the this case). This patch does a pure swap; after finding X in the situation above, we would end with: index | i-1 | i | i+1 | i+2 | ----------------------------------- entry ... | A | X | C | B | ... ----------------------------------- We could instead bump X into the `i` slot, and then shift the whole contiguous chain down by one, resulting in: index | i-1 | i | i+1 | i+2 | ----------------------------------- entry ... | A | X | B | C | ... ----------------------------------- That puts our chain in true most-recently-used order. However, experiments show that it is not any faster (and in fact, is slightly slower due to the extra manipulation). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-01 22:34:50 +02:00
unsigned int i, first;
struct object *obj;
if (!r->parsed_objects->obj_hash)
return NULL;
first = i = hash_obj(sha1, r->parsed_objects->obj_hash_size);
while ((obj = r->parsed_objects->obj_hash[i]) != NULL) {
if (hasheq(sha1, obj->oid.hash))
break;
i++;
if (i == r->parsed_objects->obj_hash_size)
i = 0;
}
lookup_object: prioritize recently found objects The lookup_object function is backed by a hash table of all objects we have seen in the program. We manage collisions with a linear walk over the colliding entries, checking each with hashcmp(). The main cost of lookup is in these hashcmp() calls; finding our item in the first slot is cheaper than finding it in the second slot, which is cheaper than the third, and so on. If we assume that there is some locality to the object lookups (e.g., if X and Y collide, and we have just looked up X, the next lookup is more likely to be for X than for Y), then we can improve our average lookup speed by checking X before Y. This patch does so by swapping a found item to the front of the collision chain. The p0001 perf test reveals that this does indeed exploit locality in the case of "rev-list --all --objects": Test origin this tree ------------------------------------------------------------------------- 0001.1: rev-list --all 0.40(0.38+0.02) 0.40(0.36+0.03) +0.0% 0001.2: rev-list --all --objects 2.24(2.17+0.05) 1.86(1.79+0.05) -17.0% This is not surprising, as the full object traversal will hit the same tree entries over and over (e.g., for every commit that doesn't change "Documentation/", we will have to look up the same sha1 just to find out that we already processed it). The reason why this technique works (and does not violate any properties of the hash table) is subtle and bears some explanation. Let's imagine we get a lookup for sha1 `X`, and it hashes to bucket `i` in our table. That stretch of the table may look like: index | i-1 | i | i+1 | i+2 | ----------------------------------- entry ... | A | B | C | X | ... ----------------------------------- We start our probe at i, see that B does not match, nor does C, and finally find X. There may be multiple C's in the middle, but we know that there are no empty slots (or else we would not find X at all). We do not know the original index of B; it may be `i`, or it may be less than i (e.g., if it were `i-1`, it would collide with A and spill over into the `i` bucket). So it is acceptable for us to move it to the right of a contiguous stretch of entries (because we will find it from a linear walk starting anywhere at `i` or before), but never to the left (if we moved it to `i-1`, we would miss it when starting our walk at `i`). We do know the original index of X; it is `i`, so it is safe to place it anywhere in the contiguous stretch between `i` and where we found it (`i+2` in the this case). This patch does a pure swap; after finding X in the situation above, we would end with: index | i-1 | i | i+1 | i+2 | ----------------------------------- entry ... | A | X | C | B | ... ----------------------------------- We could instead bump X into the `i` slot, and then shift the whole contiguous chain down by one, resulting in: index | i-1 | i | i+1 | i+2 | ----------------------------------- entry ... | A | X | B | C | ... ----------------------------------- That puts our chain in true most-recently-used order. However, experiments show that it is not any faster (and in fact, is slightly slower due to the extra manipulation). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-01 22:34:50 +02:00
if (obj && i != first) {
/*
* Move object to where we started to look for it so
* that we do not need to walk the hash table the next
* time we look for it.
*/
SWAP(r->parsed_objects->obj_hash[i],
r->parsed_objects->obj_hash[first]);
lookup_object: prioritize recently found objects The lookup_object function is backed by a hash table of all objects we have seen in the program. We manage collisions with a linear walk over the colliding entries, checking each with hashcmp(). The main cost of lookup is in these hashcmp() calls; finding our item in the first slot is cheaper than finding it in the second slot, which is cheaper than the third, and so on. If we assume that there is some locality to the object lookups (e.g., if X and Y collide, and we have just looked up X, the next lookup is more likely to be for X than for Y), then we can improve our average lookup speed by checking X before Y. This patch does so by swapping a found item to the front of the collision chain. The p0001 perf test reveals that this does indeed exploit locality in the case of "rev-list --all --objects": Test origin this tree ------------------------------------------------------------------------- 0001.1: rev-list --all 0.40(0.38+0.02) 0.40(0.36+0.03) +0.0% 0001.2: rev-list --all --objects 2.24(2.17+0.05) 1.86(1.79+0.05) -17.0% This is not surprising, as the full object traversal will hit the same tree entries over and over (e.g., for every commit that doesn't change "Documentation/", we will have to look up the same sha1 just to find out that we already processed it). The reason why this technique works (and does not violate any properties of the hash table) is subtle and bears some explanation. Let's imagine we get a lookup for sha1 `X`, and it hashes to bucket `i` in our table. That stretch of the table may look like: index | i-1 | i | i+1 | i+2 | ----------------------------------- entry ... | A | B | C | X | ... ----------------------------------- We start our probe at i, see that B does not match, nor does C, and finally find X. There may be multiple C's in the middle, but we know that there are no empty slots (or else we would not find X at all). We do not know the original index of B; it may be `i`, or it may be less than i (e.g., if it were `i-1`, it would collide with A and spill over into the `i` bucket). So it is acceptable for us to move it to the right of a contiguous stretch of entries (because we will find it from a linear walk starting anywhere at `i` or before), but never to the left (if we moved it to `i-1`, we would miss it when starting our walk at `i`). We do know the original index of X; it is `i`, so it is safe to place it anywhere in the contiguous stretch between `i` and where we found it (`i+2` in the this case). This patch does a pure swap; after finding X in the situation above, we would end with: index | i-1 | i | i+1 | i+2 | ----------------------------------- entry ... | A | X | C | B | ... ----------------------------------- We could instead bump X into the `i` slot, and then shift the whole contiguous chain down by one, resulting in: index | i-1 | i | i+1 | i+2 | ----------------------------------- entry ... | A | X | B | C | ... ----------------------------------- That puts our chain in true most-recently-used order. However, experiments show that it is not any faster (and in fact, is slightly slower due to the extra manipulation). Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-01 22:34:50 +02:00
}
return obj;
}
/*
* Increase the size of the hash map stored in obj_hash to the next
* power of 2 (but at least 32). Copy the existing values to the new
* hash map.
*/
static void grow_object_hash(struct repository *r)
{
int i;
/*
* Note that this size must always be power-of-2 to match hash_obj
* above.
*/
int new_hash_size = r->parsed_objects->obj_hash_size < 32 ? 32 : 2 * r->parsed_objects->obj_hash_size;
struct object **new_hash;
new_hash = xcalloc(new_hash_size, sizeof(struct object *));
for (i = 0; i < r->parsed_objects->obj_hash_size; i++) {
struct object *obj = r->parsed_objects->obj_hash[i];
if (!obj)
continue;
insert_obj_hash(obj, new_hash, new_hash_size);
}
free(r->parsed_objects->obj_hash);
r->parsed_objects->obj_hash = new_hash;
r->parsed_objects->obj_hash_size = new_hash_size;
}
void *create_object(struct repository *r, const unsigned char *sha1, void *o)
{
struct object *obj = o;
obj->parsed = 0;
obj->flags = 0;
hashcpy(obj->oid.hash, sha1);
if (r->parsed_objects->obj_hash_size - 1 <= r->parsed_objects->nr_objs * 2)
grow_object_hash(r);
insert_obj_hash(obj, r->parsed_objects->obj_hash,
r->parsed_objects->obj_hash_size);
r->parsed_objects->nr_objs++;
return obj;
}
void *object_as_type(struct repository *r, struct object *obj, enum object_type type, int quiet)
add object_as_type helper for casting objects When we call lookup_commit, lookup_tree, etc, the logic goes something like: 1. Look for an existing object struct. If we don't have one, allocate and return a new one. 2. Double check that any object we have is the expected type (and complain and return NULL otherwise). 3. Convert an object with type OBJ_NONE (from a prior call to lookup_unknown_object) to the expected type. We can encapsulate steps 2 and 3 in a helper function which checks whether we have the expected object type, converts OBJ_NONE as appropriate, and returns the object. Not only does this shorten the code, but it also provides one central location for converting OBJ_NONE objects into objects of other types. Future patches will use that to enforce type-specific invariants. Since this is a refactoring, we would want it to behave exactly as the current code. It takes a little reasoning to see that this is the case: - for lookup_{commit,tree,etc} functions, we are just pulling steps 2 and 3 into a function that does the same thing. - for the call in peel_object, we currently only do step 3 (but we want to consolidate it with the others, as mentioned above). However, step 2 is a noop here, as the surrounding conditional makes sure we have OBJ_NONE (which we want to keep to avoid an extraneous call to sha1_object_info). - for the call in lookup_commit_reference_gently, we are currently doing step 2 but not step 3. However, step 3 is a noop here. The object we got will have just come from deref_tag, which must have figured out the type for each object in order to know when to stop peeling. Therefore the type will never be OBJ_NONE. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-07-13 08:42:03 +02:00
{
if (obj->type == type)
return obj;
else if (obj->type == OBJ_NONE) {
if (type == OBJ_COMMIT)
object_as_type: initialize commit-graph-related fields of 'struct commit' When the commit graph and generation numbers were introduced in commits 177722b344 (commit: integrate commit graph with commit parsing, 2018-04-10) and 83073cc994 (commit: add generation number to struct commit, 2018-04-25), they tried to make sure that the corresponding 'graph_pos' and 'generation' fields of 'struct commit' are initialized conservatively, as if the commit were not included in the commit-graph file. Alas, initializing those fields only in alloc_commit_node() missed the case when an object that happens to be a commit is first looked up via lookup_unknown_object(), and is then later converted to a 'struct commit' via the object_as_type() helper function (either calling it directly, or as part of a subsequent lookup_commit() call). Consequently, both of those fields incorrectly remain set to zero, which means e.g. that the commit is present in and is the first entry of the commit-graph file. This will result in wrong timestamp, parent and root tree hashes, if such a 'struct commit' instance is later filled from the commit-graph. Extract the initialization of 'struct commit's fields from alloc_commit_node() into a helper function, and call it from object_as_type() as well, to make sure that it properly initializes the two commit-graph-related fields, too. With this helper function it is hopefully less likely that any new fields added to 'struct commit' in the future would remain uninitialized. With this change alloc_commit_index() won't have any remaining callers outside of 'alloc.c', so mark it as static. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-01-27 14:08:32 +01:00
init_commit_node(r, (struct commit *) obj);
else
obj->type = type;
add object_as_type helper for casting objects When we call lookup_commit, lookup_tree, etc, the logic goes something like: 1. Look for an existing object struct. If we don't have one, allocate and return a new one. 2. Double check that any object we have is the expected type (and complain and return NULL otherwise). 3. Convert an object with type OBJ_NONE (from a prior call to lookup_unknown_object) to the expected type. We can encapsulate steps 2 and 3 in a helper function which checks whether we have the expected object type, converts OBJ_NONE as appropriate, and returns the object. Not only does this shorten the code, but it also provides one central location for converting OBJ_NONE objects into objects of other types. Future patches will use that to enforce type-specific invariants. Since this is a refactoring, we would want it to behave exactly as the current code. It takes a little reasoning to see that this is the case: - for lookup_{commit,tree,etc} functions, we are just pulling steps 2 and 3 into a function that does the same thing. - for the call in peel_object, we currently only do step 3 (but we want to consolidate it with the others, as mentioned above). However, step 2 is a noop here, as the surrounding conditional makes sure we have OBJ_NONE (which we want to keep to avoid an extraneous call to sha1_object_info). - for the call in lookup_commit_reference_gently, we are currently doing step 2 but not step 3. However, step 3 is a noop here. The object we got will have just come from deref_tag, which must have figured out the type for each object in order to know when to stop peeling. Therefore the type will never be OBJ_NONE. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-07-13 08:42:03 +02:00
return obj;
}
else {
if (!quiet)
error(_("object %s is a %s, not a %s"),
oid_to_hex(&obj->oid),
type_name(obj->type), type_name(type));
add object_as_type helper for casting objects When we call lookup_commit, lookup_tree, etc, the logic goes something like: 1. Look for an existing object struct. If we don't have one, allocate and return a new one. 2. Double check that any object we have is the expected type (and complain and return NULL otherwise). 3. Convert an object with type OBJ_NONE (from a prior call to lookup_unknown_object) to the expected type. We can encapsulate steps 2 and 3 in a helper function which checks whether we have the expected object type, converts OBJ_NONE as appropriate, and returns the object. Not only does this shorten the code, but it also provides one central location for converting OBJ_NONE objects into objects of other types. Future patches will use that to enforce type-specific invariants. Since this is a refactoring, we would want it to behave exactly as the current code. It takes a little reasoning to see that this is the case: - for lookup_{commit,tree,etc} functions, we are just pulling steps 2 and 3 into a function that does the same thing. - for the call in peel_object, we currently only do step 3 (but we want to consolidate it with the others, as mentioned above). However, step 2 is a noop here, as the surrounding conditional makes sure we have OBJ_NONE (which we want to keep to avoid an extraneous call to sha1_object_info). - for the call in lookup_commit_reference_gently, we are currently doing step 2 but not step 3. However, step 3 is a noop here. The object we got will have just come from deref_tag, which must have figured out the type for each object in order to know when to stop peeling. Therefore the type will never be OBJ_NONE. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-07-13 08:42:03 +02:00
return NULL;
}
}
struct object *lookup_unknown_object(const unsigned char *sha1)
{
struct object *obj = lookup_object(the_repository, sha1);
if (!obj)
obj = create_object(the_repository, sha1,
alloc_object_node(the_repository));
return obj;
}
struct object *parse_object_buffer(struct repository *r, const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p)
{
struct object *obj;
*eaten_p = 0;
obj = NULL;
if (type == OBJ_BLOB) {
struct blob *blob = lookup_blob(r, oid);
if (blob) {
if (parse_blob_buffer(blob, buffer, size))
return NULL;
obj = &blob->object;
}
} else if (type == OBJ_TREE) {
struct tree *tree = lookup_tree(r, oid);
if (tree) {
obj = &tree->object;
if (!tree->buffer)
tree->object.parsed = 0;
if (!tree->object.parsed) {
if (parse_tree_buffer(tree, buffer, size))
return NULL;
*eaten_p = 1;
}
}
} else if (type == OBJ_COMMIT) {
struct commit *commit = lookup_commit(r, oid);
if (commit) {
if (parse_commit_buffer(r, commit, buffer, size, 1))
return NULL;
if (!get_cached_commit_buffer(r, commit, NULL)) {
set_commit_buffer(r, commit, buffer, size);
*eaten_p = 1;
}
obj = &commit->object;
}
} else if (type == OBJ_TAG) {
struct tag *tag = lookup_tag(r, oid);
if (tag) {
if (parse_tag_buffer(r, tag, buffer, size))
return NULL;
obj = &tag->object;
}
} else {
warning(_("object %s has unknown type id %d"), oid_to_hex(oid), type);
obj = NULL;
}
return obj;
}
struct object *parse_object_or_die(const struct object_id *oid,
const char *name)
{
struct object *o = parse_object(the_repository, oid);
if (o)
return o;
die(_("unable to parse object: %s"), name ? name : oid_to_hex(oid));
}
struct object *parse_object(struct repository *r, const struct object_id *oid)
{
unsigned long size;
enum object_type type;
int eaten;
const struct object_id *repl = lookup_replace_object(r, oid);
parse_object: try internal cache before reading object db When parse_object is called, we do the following: 1. read the object data into a buffer via read_sha1_file 2. call parse_object_buffer, which then: a. calls the appropriate lookup_{commit,tree,blob,tag} to either create a new "struct object", or to find an existing one. We know the appropriate type from the lookup in step 1. b. calls the appropriate parse_{commit,tree,blob,tag} to parse the buffer for the new (or existing) object In step 2b, all of the called functions are no-ops for object "X" if "X->object.parsed" is set. I.e., when we have already parsed an object, we end up going to a lot of work just to find out at a low level that there is nothing left for us to do (and we throw away the data from read_sha1_file unread). We can optimize this by moving the check for "do we have an in-memory object" from 2a before the expensive call to read_sha1_file in step 1. This might seem circular, since step 2a uses the type information determined in step 1 to call the appropriate lookup function. However, we can notice that all of the lookup_* functions are backed by lookup_object. In other words, all of the objects are kept in a master hash table, and we don't actually need the type to do the "do we have it" part of the lookup, only to do the "and create it if it doesn't exist" part. This can save time whenever we call parse_object on the same sha1 twice in a single program. Some code paths already perform this optimization manually, with either: if (!obj->parsed) obj = parse_object(obj->sha1); if you already have a "struct object", or: struct object *obj = lookup_unknown_object(sha1); if (!obj || !obj->parsed) obj = parse_object(sha1); if you don't. This patch moves the optimization into parse_object itself. Most git operations won't notice any impact. Either they don't parse a lot of duplicate sha1s, or the calling code takes special care not to re-parse objects. I timed two code paths that do benefit (there may be more, but these two were immediately obvious and easy to time). The first is fast-export, which calls parse_object on each object it outputs, like this: object = parse_object(sha1); if (!object) die(...); if (object->flags & SHOWN) return; which means that just to realize we have already shown an object, we will read the whole object from disk! With this patch, my best-of-five time for "fast-export --all" on git.git dropped from 26.3s to 21.3s. The second case is upload-pack, which will call parse_object for each advertised ref (because it needs to peel tags to show "^{}" entries). This doesn't matter for most repositories, because they don't have a lot of refs pointing to the same objects. However, if you have a big alternates repository with a shared object db for a number of child repositories, then the alternates repository will have duplicated refs representing each of its children. For example, GitHub's alternates repository for git.git has ~120,000 refs, of which only ~3200 are unique. The time for upload-pack to print its list of advertised refs dropped from 3.4s to 0.76s. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-05 22:00:01 +01:00
void *buffer;
struct object *obj;
obj = lookup_object(r, oid->hash);
parse_object: try internal cache before reading object db When parse_object is called, we do the following: 1. read the object data into a buffer via read_sha1_file 2. call parse_object_buffer, which then: a. calls the appropriate lookup_{commit,tree,blob,tag} to either create a new "struct object", or to find an existing one. We know the appropriate type from the lookup in step 1. b. calls the appropriate parse_{commit,tree,blob,tag} to parse the buffer for the new (or existing) object In step 2b, all of the called functions are no-ops for object "X" if "X->object.parsed" is set. I.e., when we have already parsed an object, we end up going to a lot of work just to find out at a low level that there is nothing left for us to do (and we throw away the data from read_sha1_file unread). We can optimize this by moving the check for "do we have an in-memory object" from 2a before the expensive call to read_sha1_file in step 1. This might seem circular, since step 2a uses the type information determined in step 1 to call the appropriate lookup function. However, we can notice that all of the lookup_* functions are backed by lookup_object. In other words, all of the objects are kept in a master hash table, and we don't actually need the type to do the "do we have it" part of the lookup, only to do the "and create it if it doesn't exist" part. This can save time whenever we call parse_object on the same sha1 twice in a single program. Some code paths already perform this optimization manually, with either: if (!obj->parsed) obj = parse_object(obj->sha1); if you already have a "struct object", or: struct object *obj = lookup_unknown_object(sha1); if (!obj || !obj->parsed) obj = parse_object(sha1); if you don't. This patch moves the optimization into parse_object itself. Most git operations won't notice any impact. Either they don't parse a lot of duplicate sha1s, or the calling code takes special care not to re-parse objects. I timed two code paths that do benefit (there may be more, but these two were immediately obvious and easy to time). The first is fast-export, which calls parse_object on each object it outputs, like this: object = parse_object(sha1); if (!object) die(...); if (object->flags & SHOWN) return; which means that just to realize we have already shown an object, we will read the whole object from disk! With this patch, my best-of-five time for "fast-export --all" on git.git dropped from 26.3s to 21.3s. The second case is upload-pack, which will call parse_object for each advertised ref (because it needs to peel tags to show "^{}" entries). This doesn't matter for most repositories, because they don't have a lot of refs pointing to the same objects. However, if you have a big alternates repository with a shared object db for a number of child repositories, then the alternates repository will have duplicated refs representing each of its children. For example, GitHub's alternates repository for git.git has ~120,000 refs, of which only ~3200 are unique. The time for upload-pack to print its list of advertised refs dropped from 3.4s to 0.76s. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-05 22:00:01 +01:00
if (obj && obj->parsed)
return obj;
if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) ||
(!obj && repo_has_object_file(r, oid) &&
oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
if (check_object_signature(repl, NULL, 0, NULL) < 0) {
error(_("hash mismatch %s"), oid_to_hex(oid));
return NULL;
}
parse_blob_buffer(lookup_blob(r, oid), NULL, 0);
return lookup_object(r, oid->hash);
}
buffer = repo_read_object_file(r, oid, &type, &size);
if (buffer) {
if (check_object_signature(repl, buffer, size, type_name(type)) < 0) {
free(buffer);
error(_("hash mismatch %s"), oid_to_hex(repl));
return NULL;
}
obj = parse_object_buffer(r, oid, type, size,
buffer, &eaten);
if (!eaten)
free(buffer);
return obj;
}
return NULL;
}
struct object_list *object_list_insert(struct object *item,
struct object_list **list_p)
{
struct object_list *new_list = xmalloc(sizeof(struct object_list));
new_list->item = item;
new_list->next = *list_p;
*list_p = new_list;
return new_list;
}
int object_list_contains(struct object_list *list, struct object *obj)
{
while (list) {
if (list->item == obj)
return 1;
list = list->next;
}
return 0;
}
Add "named object array" concept We've had this notion of a "object_list" for a long time, which eventually grew a "name" member because some users (notably git-rev-list) wanted to name each object as it is generated. That object_list is great for some things, but it isn't all that wonderful for others, and the "name" member is generally not used by everybody. This patch splits the users of the object_list array up into two: the traditional list users, who want the list-like format, and who don't actually use or want the name. And another class of users that really used the list as an extensible array, and generally wanted to name the objects. The patch is fairly straightforward, but it's also biggish. Most of it really just cleans things up: switching the revision parsing and listing over to the array makes things like the builtin-diff usage much simpler (we now see exactly how many members the array has, and we don't get the objects reversed from the order they were on the command line). One of the main reasons for doing this at all is that the malloc overhead of the simple object list was actually pretty high, and the array is just a lot denser. So this patch brings down memory usage by git-rev-list by just under 3% (on top of all the other memory use optimizations) on the mozilla archive. It does add more lines than it removes, and more importantly, it adds a whole new infrastructure for maintaining lists of objects, but on the other hand, the new dynamic array code is pretty obvious. The change to builtin-diff-tree.c shows a fairly good example of why an array interface is sometimes more natural, and just much simpler for everybody. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-20 02:42:35 +02:00
object_array_entry: fix memory handling of the name field Previously, the memory management of the object_array_entry::name field was inconsistent and undocumented. object_array_entries are ultimately created by a single function, add_object_array_with_mode(), which has an argument "const char *name". This function used to simply set the name field to reference the string pointed to by the name parameter, and nobody on the object_array side ever freed the memory. Thus, it assumed that the memory for the name field would be managed by the caller, and that the lifetime of that string would be at least as long as the lifetime of the object_array_entry. But callers were inconsistent: * Some passed pointers to constant strings or argv entries, which was OK. * Some passed pointers to newly-allocated memory, but didn't arrange for the memory ever to be freed. * Some passed the return value of sha1_to_hex(), which is a pointer to a statically-allocated buffer that can be overwritten at any time. * Some passed pointers to refnames that they received from a for_each_ref()-type iteration, but the lifetimes of such refnames is not guaranteed by the refs API. Bring consistency to this mess by changing object_array to make its own copy for the object_array_entry::name field and free this memory when an object_array_entry is deleted from the array. Many callers were passing the empty string as the name parameter, so as a performance optimization, treat the empty string specially. Instead of making a copy, store a pointer to a statically-allocated empty string to object_array_entry::name. When deleting such an entry, skip the free(). Change the callers that were already passing copies to add_object_array_with_mode() to either skip the copy, or (if the memory needed to be allocated anyway) freeing the memory itself. A part of this commit effectively reverts 70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg because the copying introduced by that commit (which is still necessary) is now done at a deeper level. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 11:08:14 +02:00
/*
* A zero-length string to which object_array_entry::name can be
* initialized without requiring a malloc/free.
*/
static char object_array_slopbuf[1];
void add_object_array_with_path(struct object *obj, const char *name,
struct object_array *array,
unsigned mode, const char *path)
Add "named object array" concept We've had this notion of a "object_list" for a long time, which eventually grew a "name" member because some users (notably git-rev-list) wanted to name each object as it is generated. That object_list is great for some things, but it isn't all that wonderful for others, and the "name" member is generally not used by everybody. This patch splits the users of the object_list array up into two: the traditional list users, who want the list-like format, and who don't actually use or want the name. And another class of users that really used the list as an extensible array, and generally wanted to name the objects. The patch is fairly straightforward, but it's also biggish. Most of it really just cleans things up: switching the revision parsing and listing over to the array makes things like the builtin-diff usage much simpler (we now see exactly how many members the array has, and we don't get the objects reversed from the order they were on the command line). One of the main reasons for doing this at all is that the malloc overhead of the simple object list was actually pretty high, and the array is just a lot denser. So this patch brings down memory usage by git-rev-list by just under 3% (on top of all the other memory use optimizations) on the mozilla archive. It does add more lines than it removes, and more importantly, it adds a whole new infrastructure for maintaining lists of objects, but on the other hand, the new dynamic array code is pretty obvious. The change to builtin-diff-tree.c shows a fairly good example of why an array interface is sometimes more natural, and just much simpler for everybody. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-20 02:42:35 +02:00
{
unsigned nr = array->nr;
unsigned alloc = array->alloc;
struct object_array_entry *objects = array->objects;
object_array_entry: fix memory handling of the name field Previously, the memory management of the object_array_entry::name field was inconsistent and undocumented. object_array_entries are ultimately created by a single function, add_object_array_with_mode(), which has an argument "const char *name". This function used to simply set the name field to reference the string pointed to by the name parameter, and nobody on the object_array side ever freed the memory. Thus, it assumed that the memory for the name field would be managed by the caller, and that the lifetime of that string would be at least as long as the lifetime of the object_array_entry. But callers were inconsistent: * Some passed pointers to constant strings or argv entries, which was OK. * Some passed pointers to newly-allocated memory, but didn't arrange for the memory ever to be freed. * Some passed the return value of sha1_to_hex(), which is a pointer to a statically-allocated buffer that can be overwritten at any time. * Some passed pointers to refnames that they received from a for_each_ref()-type iteration, but the lifetimes of such refnames is not guaranteed by the refs API. Bring consistency to this mess by changing object_array to make its own copy for the object_array_entry::name field and free this memory when an object_array_entry is deleted from the array. Many callers were passing the empty string as the name parameter, so as a performance optimization, treat the empty string specially. Instead of making a copy, store a pointer to a statically-allocated empty string to object_array_entry::name. When deleting such an entry, skip the free(). Change the callers that were already passing copies to add_object_array_with_mode() to either skip the copy, or (if the memory needed to be allocated anyway) freeing the memory itself. A part of this commit effectively reverts 70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg because the copying introduced by that commit (which is still necessary) is now done at a deeper level. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 11:08:14 +02:00
struct object_array_entry *entry;
Add "named object array" concept We've had this notion of a "object_list" for a long time, which eventually grew a "name" member because some users (notably git-rev-list) wanted to name each object as it is generated. That object_list is great for some things, but it isn't all that wonderful for others, and the "name" member is generally not used by everybody. This patch splits the users of the object_list array up into two: the traditional list users, who want the list-like format, and who don't actually use or want the name. And another class of users that really used the list as an extensible array, and generally wanted to name the objects. The patch is fairly straightforward, but it's also biggish. Most of it really just cleans things up: switching the revision parsing and listing over to the array makes things like the builtin-diff usage much simpler (we now see exactly how many members the array has, and we don't get the objects reversed from the order they were on the command line). One of the main reasons for doing this at all is that the malloc overhead of the simple object list was actually pretty high, and the array is just a lot denser. So this patch brings down memory usage by git-rev-list by just under 3% (on top of all the other memory use optimizations) on the mozilla archive. It does add more lines than it removes, and more importantly, it adds a whole new infrastructure for maintaining lists of objects, but on the other hand, the new dynamic array code is pretty obvious. The change to builtin-diff-tree.c shows a fairly good example of why an array interface is sometimes more natural, and just much simpler for everybody. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-20 02:42:35 +02:00
if (nr >= alloc) {
alloc = (alloc + 32) * 2;
REALLOC_ARRAY(objects, alloc);
Add "named object array" concept We've had this notion of a "object_list" for a long time, which eventually grew a "name" member because some users (notably git-rev-list) wanted to name each object as it is generated. That object_list is great for some things, but it isn't all that wonderful for others, and the "name" member is generally not used by everybody. This patch splits the users of the object_list array up into two: the traditional list users, who want the list-like format, and who don't actually use or want the name. And another class of users that really used the list as an extensible array, and generally wanted to name the objects. The patch is fairly straightforward, but it's also biggish. Most of it really just cleans things up: switching the revision parsing and listing over to the array makes things like the builtin-diff usage much simpler (we now see exactly how many members the array has, and we don't get the objects reversed from the order they were on the command line). One of the main reasons for doing this at all is that the malloc overhead of the simple object list was actually pretty high, and the array is just a lot denser. So this patch brings down memory usage by git-rev-list by just under 3% (on top of all the other memory use optimizations) on the mozilla archive. It does add more lines than it removes, and more importantly, it adds a whole new infrastructure for maintaining lists of objects, but on the other hand, the new dynamic array code is pretty obvious. The change to builtin-diff-tree.c shows a fairly good example of why an array interface is sometimes more natural, and just much simpler for everybody. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-20 02:42:35 +02:00
array->alloc = alloc;
array->objects = objects;
}
object_array_entry: fix memory handling of the name field Previously, the memory management of the object_array_entry::name field was inconsistent and undocumented. object_array_entries are ultimately created by a single function, add_object_array_with_mode(), which has an argument "const char *name". This function used to simply set the name field to reference the string pointed to by the name parameter, and nobody on the object_array side ever freed the memory. Thus, it assumed that the memory for the name field would be managed by the caller, and that the lifetime of that string would be at least as long as the lifetime of the object_array_entry. But callers were inconsistent: * Some passed pointers to constant strings or argv entries, which was OK. * Some passed pointers to newly-allocated memory, but didn't arrange for the memory ever to be freed. * Some passed the return value of sha1_to_hex(), which is a pointer to a statically-allocated buffer that can be overwritten at any time. * Some passed pointers to refnames that they received from a for_each_ref()-type iteration, but the lifetimes of such refnames is not guaranteed by the refs API. Bring consistency to this mess by changing object_array to make its own copy for the object_array_entry::name field and free this memory when an object_array_entry is deleted from the array. Many callers were passing the empty string as the name parameter, so as a performance optimization, treat the empty string specially. Instead of making a copy, store a pointer to a statically-allocated empty string to object_array_entry::name. When deleting such an entry, skip the free(). Change the callers that were already passing copies to add_object_array_with_mode() to either skip the copy, or (if the memory needed to be allocated anyway) freeing the memory itself. A part of this commit effectively reverts 70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg because the copying introduced by that commit (which is still necessary) is now done at a deeper level. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 11:08:14 +02:00
entry = &objects[nr];
entry->item = obj;
if (!name)
entry->name = NULL;
else if (!*name)
/* Use our own empty string instead of allocating one: */
entry->name = object_array_slopbuf;
else
entry->name = xstrdup(name);
entry->mode = mode;
if (path)
entry->path = xstrdup(path);
else
entry->path = NULL;
Add "named object array" concept We've had this notion of a "object_list" for a long time, which eventually grew a "name" member because some users (notably git-rev-list) wanted to name each object as it is generated. That object_list is great for some things, but it isn't all that wonderful for others, and the "name" member is generally not used by everybody. This patch splits the users of the object_list array up into two: the traditional list users, who want the list-like format, and who don't actually use or want the name. And another class of users that really used the list as an extensible array, and generally wanted to name the objects. The patch is fairly straightforward, but it's also biggish. Most of it really just cleans things up: switching the revision parsing and listing over to the array makes things like the builtin-diff usage much simpler (we now see exactly how many members the array has, and we don't get the objects reversed from the order they were on the command line). One of the main reasons for doing this at all is that the malloc overhead of the simple object list was actually pretty high, and the array is just a lot denser. So this patch brings down memory usage by git-rev-list by just under 3% (on top of all the other memory use optimizations) on the mozilla archive. It does add more lines than it removes, and more importantly, it adds a whole new infrastructure for maintaining lists of objects, but on the other hand, the new dynamic array code is pretty obvious. The change to builtin-diff-tree.c shows a fairly good example of why an array interface is sometimes more natural, and just much simpler for everybody. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-20 02:42:35 +02:00
array->nr = ++nr;
}
void add_object_array(struct object *obj, const char *name, struct object_array *array)
{
add_object_array_with_path(obj, name, array, S_IFINVALID, NULL);
}
/*
* Free all memory associated with an entry; the result is
* in an unspecified state and should not be examined.
*/
static void object_array_release_entry(struct object_array_entry *ent)
{
if (ent->name != object_array_slopbuf)
free(ent->name);
free(ent->path);
}
object_array: add and use `object_array_pop()` In a couple of places, we pop objects off an object array `foo` by decreasing `foo.nr`. We access `foo.nr` in many places, but most if not all other times we do so read-only, e.g., as we iterate over the array. But when we change `foo.nr` behind the array's back, it feels a bit nasty and looks like it might leak memory. Leaks happen if the popped element has an allocated `name` or `path`. At the moment, that is not the case. Still, 1) the object array might gain more fields that want to be freed, 2) a code path where we pop might start using names or paths, 3) one of these code paths might be copied to somewhere where we do, and 4) using a dedicated function for popping is conceptually cleaner. Introduce and use `object_array_pop()` instead. Release memory in the new function. Document that popping an object leaves the associated elements in limbo. The converted places were identified by grepping for "\.nr\>" and looking for "--". Make the new function return NULL on an empty array. This is consistent with `pop_commit()` and allows the following: while ((o = object_array_pop(&foo)) != NULL) { // do something } But as noted above, we don't need to go out of our way to avoid reading `foo.nr`. This is probably more readable: while (foo.nr) { ... o = object_array_pop(&foo); // do something } The name of `object_array_pop()` does not quite align with `add_object_array()`. That is unfortunate. On the other hand, it matches `object_array_clear()`. Arguably it's `add_...` that is the odd one out, since it reads like it's used to "add" an "object array". For that reason, side with `object_array_clear()`. Signed-off-by: Martin Ågren <martin.agren@gmail.com> Reviewed-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-23 01:34:53 +02:00
struct object *object_array_pop(struct object_array *array)
{
struct object *ret;
if (!array->nr)
return NULL;
ret = array->objects[array->nr - 1].item;
object_array_release_entry(&array->objects[array->nr - 1]);
array->nr--;
return ret;
}
void object_array_filter(struct object_array *array,
object_array_each_func_t want, void *cb_data)
{
unsigned nr = array->nr, src, dst;
struct object_array_entry *objects = array->objects;
for (src = dst = 0; src < nr; src++) {
if (want(&objects[src], cb_data)) {
if (src != dst)
objects[dst] = objects[src];
dst++;
object_array_entry: fix memory handling of the name field Previously, the memory management of the object_array_entry::name field was inconsistent and undocumented. object_array_entries are ultimately created by a single function, add_object_array_with_mode(), which has an argument "const char *name". This function used to simply set the name field to reference the string pointed to by the name parameter, and nobody on the object_array side ever freed the memory. Thus, it assumed that the memory for the name field would be managed by the caller, and that the lifetime of that string would be at least as long as the lifetime of the object_array_entry. But callers were inconsistent: * Some passed pointers to constant strings or argv entries, which was OK. * Some passed pointers to newly-allocated memory, but didn't arrange for the memory ever to be freed. * Some passed the return value of sha1_to_hex(), which is a pointer to a statically-allocated buffer that can be overwritten at any time. * Some passed pointers to refnames that they received from a for_each_ref()-type iteration, but the lifetimes of such refnames is not guaranteed by the refs API. Bring consistency to this mess by changing object_array to make its own copy for the object_array_entry::name field and free this memory when an object_array_entry is deleted from the array. Many callers were passing the empty string as the name parameter, so as a performance optimization, treat the empty string specially. Instead of making a copy, store a pointer to a statically-allocated empty string to object_array_entry::name. When deleting such an entry, skip the free(). Change the callers that were already passing copies to add_object_array_with_mode() to either skip the copy, or (if the memory needed to be allocated anyway) freeing the memory itself. A part of this commit effectively reverts 70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg because the copying introduced by that commit (which is still necessary) is now done at a deeper level. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 11:08:14 +02:00
} else {
object_array_release_entry(&objects[src]);
}
}
array->nr = dst;
}
void object_array_clear(struct object_array *array)
{
int i;
for (i = 0; i < array->nr; i++)
object_array_release_entry(&array->objects[i]);
FREE_AND_NULL(array->objects);
array->nr = array->alloc = 0;
}
/*
* Return true iff array already contains an entry with name.
*/
static int contains_name(struct object_array *array, const char *name)
{
unsigned nr = array->nr, i;
struct object_array_entry *object = array->objects;
for (i = 0; i < nr; i++, object++)
if (!strcmp(object->name, name))
return 1;
return 0;
}
void object_array_remove_duplicates(struct object_array *array)
{
unsigned nr = array->nr, src;
struct object_array_entry *objects = array->objects;
array->nr = 0;
for (src = 0; src < nr; src++) {
if (!contains_name(array, objects[src].name)) {
if (src != array->nr)
objects[array->nr] = objects[src];
array->nr++;
object_array_entry: fix memory handling of the name field Previously, the memory management of the object_array_entry::name field was inconsistent and undocumented. object_array_entries are ultimately created by a single function, add_object_array_with_mode(), which has an argument "const char *name". This function used to simply set the name field to reference the string pointed to by the name parameter, and nobody on the object_array side ever freed the memory. Thus, it assumed that the memory for the name field would be managed by the caller, and that the lifetime of that string would be at least as long as the lifetime of the object_array_entry. But callers were inconsistent: * Some passed pointers to constant strings or argv entries, which was OK. * Some passed pointers to newly-allocated memory, but didn't arrange for the memory ever to be freed. * Some passed the return value of sha1_to_hex(), which is a pointer to a statically-allocated buffer that can be overwritten at any time. * Some passed pointers to refnames that they received from a for_each_ref()-type iteration, but the lifetimes of such refnames is not guaranteed by the refs API. Bring consistency to this mess by changing object_array to make its own copy for the object_array_entry::name field and free this memory when an object_array_entry is deleted from the array. Many callers were passing the empty string as the name parameter, so as a performance optimization, treat the empty string specially. Instead of making a copy, store a pointer to a statically-allocated empty string to object_array_entry::name. When deleting such an entry, skip the free(). Change the callers that were already passing copies to add_object_array_with_mode() to either skip the copy, or (if the memory needed to be allocated anyway) freeing the memory itself. A part of this commit effectively reverts 70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg because the copying introduced by that commit (which is still necessary) is now done at a deeper level. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 11:08:14 +02:00
} else {
object_array_release_entry(&objects[src]);
}
}
}
void clear_object_flags(unsigned flags)
{
int i;
for (i=0; i < the_repository->parsed_objects->obj_hash_size; i++) {
struct object *obj = the_repository->parsed_objects->obj_hash[i];
if (obj)
obj->flags &= ~flags;
}
}
void clear_commit_marks_all(unsigned int flags)
{
int i;
for (i = 0; i < the_repository->parsed_objects->obj_hash_size; i++) {
struct object *obj = the_repository->parsed_objects->obj_hash[i];
if (obj && obj->type == OBJ_COMMIT)
obj->flags &= ~flags;
}
}
struct parsed_object_pool *parsed_object_pool_new(void)
{
struct parsed_object_pool *o = xmalloc(sizeof(*o));
memset(o, 0, sizeof(*o));
o->blob_state = allocate_alloc_state();
o->tree_state = allocate_alloc_state();
o->commit_state = allocate_alloc_state();
o->tag_state = allocate_alloc_state();
o->object_state = allocate_alloc_state();
o->is_shallow = -1;
o->shallow_stat = xcalloc(1, sizeof(*o->shallow_stat));
o->buffer_slab = allocate_commit_buffer_slab();
return o;
}
struct raw_object_store *raw_object_store_new(void)
{
struct raw_object_store *o = xmalloc(sizeof(*o));
memset(o, 0, sizeof(*o));
INIT_LIST_HEAD(&o->packed_git_mru);
return o;
}
sha1-file: use an object_directory for the main object dir Our handling of alternate object directories is needlessly different from the main object directory. As a result, many places in the code basically look like this: do_something(r->objects->objdir); for (odb = r->objects->alt_odb_list; odb; odb = odb->next) do_something(odb->path); That gets annoying when do_something() is non-trivial, and we've resorted to gross hacks like creating fake alternates (see find_short_object_filename()). Instead, let's give each raw_object_store a unified list of object_directory structs. The first will be the main store, and everything after is an alternate. Very few callers even care about the distinction, and can just loop over the whole list (and those who care can just treat the first element differently). A few observations: - we don't need r->objects->objectdir anymore, and can just mechanically convert that to r->objects->odb->path - object_directory's path field needs to become a real pointer rather than a FLEX_ARRAY, in order to fill it with expand_base_dir() - we'll call prepare_alt_odb() earlier in many functions (i.e., outside of the loop). This may result in us calling it even when our function would be satisfied looking only at the main odb. But this doesn't matter in practice. It's not a very expensive operation in the first place, and in the majority of cases it will be a noop. We call it already (and cache its results) in prepare_packed_git(), and we'll generally check packs before loose objects. So essentially every program is going to call it immediately once per program. Arguably we should just prepare_alt_odb() immediately upon setting up the repository's object directory, which would save us sprinkling calls throughout the code base (and forgetting to do so has been a source of subtle bugs in the past). But I've stopped short of that here, since there are already a lot of other moving parts in this patch. - Most call sites just get shorter. The check_and_freshen() functions are an exception, because they have entry points to handle local and nonlocal directories separately. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 15:50:39 +01:00
static void free_object_directory(struct object_directory *odb)
{
sha1-file: use an object_directory for the main object dir Our handling of alternate object directories is needlessly different from the main object directory. As a result, many places in the code basically look like this: do_something(r->objects->objdir); for (odb = r->objects->alt_odb_list; odb; odb = odb->next) do_something(odb->path); That gets annoying when do_something() is non-trivial, and we've resorted to gross hacks like creating fake alternates (see find_short_object_filename()). Instead, let's give each raw_object_store a unified list of object_directory structs. The first will be the main store, and everything after is an alternate. Very few callers even care about the distinction, and can just loop over the whole list (and those who care can just treat the first element differently). A few observations: - we don't need r->objects->objectdir anymore, and can just mechanically convert that to r->objects->odb->path - object_directory's path field needs to become a real pointer rather than a FLEX_ARRAY, in order to fill it with expand_base_dir() - we'll call prepare_alt_odb() earlier in many functions (i.e., outside of the loop). This may result in us calling it even when our function would be satisfied looking only at the main odb. But this doesn't matter in practice. It's not a very expensive operation in the first place, and in the majority of cases it will be a noop. We call it already (and cache its results) in prepare_packed_git(), and we'll generally check packs before loose objects. So essentially every program is going to call it immediately once per program. Arguably we should just prepare_alt_odb() immediately upon setting up the repository's object directory, which would save us sprinkling calls throughout the code base (and forgetting to do so has been a source of subtle bugs in the past). But I've stopped short of that here, since there are already a lot of other moving parts in this patch. - Most call sites just get shorter. The check_and_freshen() functions are an exception, because they have entry points to handle local and nonlocal directories separately. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 15:50:39 +01:00
free(odb->path);
odb_clear_loose_cache(odb);
free(odb);
}
sha1-file: use an object_directory for the main object dir Our handling of alternate object directories is needlessly different from the main object directory. As a result, many places in the code basically look like this: do_something(r->objects->objdir); for (odb = r->objects->alt_odb_list; odb; odb = odb->next) do_something(odb->path); That gets annoying when do_something() is non-trivial, and we've resorted to gross hacks like creating fake alternates (see find_short_object_filename()). Instead, let's give each raw_object_store a unified list of object_directory structs. The first will be the main store, and everything after is an alternate. Very few callers even care about the distinction, and can just loop over the whole list (and those who care can just treat the first element differently). A few observations: - we don't need r->objects->objectdir anymore, and can just mechanically convert that to r->objects->odb->path - object_directory's path field needs to become a real pointer rather than a FLEX_ARRAY, in order to fill it with expand_base_dir() - we'll call prepare_alt_odb() earlier in many functions (i.e., outside of the loop). This may result in us calling it even when our function would be satisfied looking only at the main odb. But this doesn't matter in practice. It's not a very expensive operation in the first place, and in the majority of cases it will be a noop. We call it already (and cache its results) in prepare_packed_git(), and we'll generally check packs before loose objects. So essentially every program is going to call it immediately once per program. Arguably we should just prepare_alt_odb() immediately upon setting up the repository's object directory, which would save us sprinkling calls throughout the code base (and forgetting to do so has been a source of subtle bugs in the past). But I've stopped short of that here, since there are already a lot of other moving parts in this patch. - Most call sites just get shorter. The check_and_freshen() functions are an exception, because they have entry points to handle local and nonlocal directories separately. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 15:50:39 +01:00
static void free_object_directories(struct raw_object_store *o)
{
sha1-file: use an object_directory for the main object dir Our handling of alternate object directories is needlessly different from the main object directory. As a result, many places in the code basically look like this: do_something(r->objects->objdir); for (odb = r->objects->alt_odb_list; odb; odb = odb->next) do_something(odb->path); That gets annoying when do_something() is non-trivial, and we've resorted to gross hacks like creating fake alternates (see find_short_object_filename()). Instead, let's give each raw_object_store a unified list of object_directory structs. The first will be the main store, and everything after is an alternate. Very few callers even care about the distinction, and can just loop over the whole list (and those who care can just treat the first element differently). A few observations: - we don't need r->objects->objectdir anymore, and can just mechanically convert that to r->objects->odb->path - object_directory's path field needs to become a real pointer rather than a FLEX_ARRAY, in order to fill it with expand_base_dir() - we'll call prepare_alt_odb() earlier in many functions (i.e., outside of the loop). This may result in us calling it even when our function would be satisfied looking only at the main odb. But this doesn't matter in practice. It's not a very expensive operation in the first place, and in the majority of cases it will be a noop. We call it already (and cache its results) in prepare_packed_git(), and we'll generally check packs before loose objects. So essentially every program is going to call it immediately once per program. Arguably we should just prepare_alt_odb() immediately upon setting up the repository's object directory, which would save us sprinkling calls throughout the code base (and forgetting to do so has been a source of subtle bugs in the past). But I've stopped short of that here, since there are already a lot of other moving parts in this patch. - Most call sites just get shorter. The check_and_freshen() functions are an exception, because they have entry points to handle local and nonlocal directories separately. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 15:50:39 +01:00
while (o->odb) {
struct object_directory *next;
sha1-file: use an object_directory for the main object dir Our handling of alternate object directories is needlessly different from the main object directory. As a result, many places in the code basically look like this: do_something(r->objects->objdir); for (odb = r->objects->alt_odb_list; odb; odb = odb->next) do_something(odb->path); That gets annoying when do_something() is non-trivial, and we've resorted to gross hacks like creating fake alternates (see find_short_object_filename()). Instead, let's give each raw_object_store a unified list of object_directory structs. The first will be the main store, and everything after is an alternate. Very few callers even care about the distinction, and can just loop over the whole list (and those who care can just treat the first element differently). A few observations: - we don't need r->objects->objectdir anymore, and can just mechanically convert that to r->objects->odb->path - object_directory's path field needs to become a real pointer rather than a FLEX_ARRAY, in order to fill it with expand_base_dir() - we'll call prepare_alt_odb() earlier in many functions (i.e., outside of the loop). This may result in us calling it even when our function would be satisfied looking only at the main odb. But this doesn't matter in practice. It's not a very expensive operation in the first place, and in the majority of cases it will be a noop. We call it already (and cache its results) in prepare_packed_git(), and we'll generally check packs before loose objects. So essentially every program is going to call it immediately once per program. Arguably we should just prepare_alt_odb() immediately upon setting up the repository's object directory, which would save us sprinkling calls throughout the code base (and forgetting to do so has been a source of subtle bugs in the past). But I've stopped short of that here, since there are already a lot of other moving parts in this patch. - Most call sites just get shorter. The check_and_freshen() functions are an exception, because they have entry points to handle local and nonlocal directories separately. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 15:50:39 +01:00
next = o->odb->next;
free_object_directory(o->odb);
o->odb = next;
}
}
void raw_object_store_clear(struct raw_object_store *o)
{
FREE_AND_NULL(o->alternate_db);
oidmap_free(o->replace_map, 1);
FREE_AND_NULL(o->replace_map);
free_commit_graph(o->commit_graph);
o->commit_graph = NULL;
o->commit_graph_attempted = 0;
sha1-file: use an object_directory for the main object dir Our handling of alternate object directories is needlessly different from the main object directory. As a result, many places in the code basically look like this: do_something(r->objects->objdir); for (odb = r->objects->alt_odb_list; odb; odb = odb->next) do_something(odb->path); That gets annoying when do_something() is non-trivial, and we've resorted to gross hacks like creating fake alternates (see find_short_object_filename()). Instead, let's give each raw_object_store a unified list of object_directory structs. The first will be the main store, and everything after is an alternate. Very few callers even care about the distinction, and can just loop over the whole list (and those who care can just treat the first element differently). A few observations: - we don't need r->objects->objectdir anymore, and can just mechanically convert that to r->objects->odb->path - object_directory's path field needs to become a real pointer rather than a FLEX_ARRAY, in order to fill it with expand_base_dir() - we'll call prepare_alt_odb() earlier in many functions (i.e., outside of the loop). This may result in us calling it even when our function would be satisfied looking only at the main odb. But this doesn't matter in practice. It's not a very expensive operation in the first place, and in the majority of cases it will be a noop. We call it already (and cache its results) in prepare_packed_git(), and we'll generally check packs before loose objects. So essentially every program is going to call it immediately once per program. Arguably we should just prepare_alt_odb() immediately upon setting up the repository's object directory, which would save us sprinkling calls throughout the code base (and forgetting to do so has been a source of subtle bugs in the past). But I've stopped short of that here, since there are already a lot of other moving parts in this patch. - Most call sites just get shorter. The check_and_freshen() functions are an exception, because they have entry points to handle local and nonlocal directories separately. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 15:50:39 +01:00
free_object_directories(o);
o->odb_tail = NULL;
o->loaded_alternates = 0;
INIT_LIST_HEAD(&o->packed_git_mru);
close_all_packs(o);
o->packed_git = NULL;
}
void parsed_object_pool_clear(struct parsed_object_pool *o)
{
/*
* As objects are allocated in slabs (see alloc.c), we do
* not need to free each object, but each slab instead.
*
* Before doing so, we need to free any additional memory
* the objects may hold.
*/
unsigned i;
for (i = 0; i < o->obj_hash_size; i++) {
struct object *obj = o->obj_hash[i];
if (!obj)
continue;
if (obj->type == OBJ_TREE)
free_tree_buffer((struct tree*)obj);
else if (obj->type == OBJ_COMMIT)
release_commit_memory(o, (struct commit*)obj);
else if (obj->type == OBJ_TAG)
release_tag_memory((struct tag*)obj);
}
FREE_AND_NULL(o->obj_hash);
o->obj_hash_size = 0;
free_commit_buffer_slab(o->buffer_slab);
o->buffer_slab = NULL;
clear_alloc_state(o->blob_state);
clear_alloc_state(o->tree_state);
clear_alloc_state(o->commit_state);
clear_alloc_state(o->tag_state);
clear_alloc_state(o->object_state);
stat_validity_clear(o->shallow_stat);
FREE_AND_NULL(o->blob_state);
FREE_AND_NULL(o->tree_state);
FREE_AND_NULL(o->commit_state);
FREE_AND_NULL(o->tag_state);
FREE_AND_NULL(o->object_state);
FREE_AND_NULL(o->shallow_stat);
}