git-commit-vandalism/split-index.c

482 lines
14 KiB
C
Raw Normal View History

#include "cache.h"
#include "alloc.h"
#include "split-index.h"
#include "ewah/ewok.h"
struct split_index *init_split_index(struct index_state *istate)
{
if (!istate->split_index) {
if (istate->sparse_index)
die(_("cannot use split index with a sparse index"));
CALLOC_ARRAY(istate->split_index, 1);
istate->split_index->refcount = 1;
}
return istate->split_index;
}
int read_link_extension(struct index_state *istate,
const void *data_, unsigned long sz)
{
const unsigned char *data = data_;
struct split_index *si;
int ret;
if (sz < the_hash_algo->rawsz)
return error("corrupt link extension (too short)");
si = init_split_index(istate);
oidread(&si->base_oid, data);
data += the_hash_algo->rawsz;
sz -= the_hash_algo->rawsz;
if (!sz)
return 0;
si->delete_bitmap = ewah_new();
ret = ewah_read_mmap(si->delete_bitmap, data, sz);
if (ret < 0)
return error("corrupt delete bitmap in link extension");
data += ret;
sz -= ret;
si->replace_bitmap = ewah_new();
ret = ewah_read_mmap(si->replace_bitmap, data, sz);
if (ret < 0)
return error("corrupt replace bitmap in link extension");
if (ret != sz)
return error("garbage at the end of link extension");
return 0;
}
int write_link_extension(struct strbuf *sb,
struct index_state *istate)
{
struct split_index *si = istate->split_index;
strbuf_add(sb, si->base_oid.hash, the_hash_algo->rawsz);
if (!si->delete_bitmap && !si->replace_bitmap)
return 0;
ewah_serialize_strbuf(si->delete_bitmap, sb);
ewah_serialize_strbuf(si->replace_bitmap, sb);
return 0;
}
static void mark_base_index_entries(struct index_state *base)
{
int i;
/*
* To keep track of the shared entries between
* istate->base->cache[] and istate->cache[], base entry
* position is stored in each base entry. All positions start
* from 1 instead of 0, which is reserved to say "this is a new
* entry".
*/
for (i = 0; i < base->cache_nr; i++)
base->cache[i]->index = i + 1;
}
void move_cache_to_base_index(struct index_state *istate)
{
struct split_index *si = istate->split_index;
int i;
/*
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
* If there was a previous base index, then transfer ownership of allocated
* entries to the parent index.
*/
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
if (si->base &&
si->base->ce_mem_pool) {
mem-pool: use more standard initialization and finalization A typical memory type, such as strbuf, hashmap, or string_list can be stored on the stack or embedded within another structure. mem_pool cannot be, because of how mem_pool_init() and mem_pool_discard() are written. mem_pool_init() does essentially the following (simplified for purposes of explanation here): void mem_pool_init(struct mem_pool **pool...) { *pool = xcalloc(1, sizeof(*pool)); It seems weird to require that mem_pools can only be accessed through a pointer. It also seems slightly dangerous: unlike strbuf_release() or strbuf_reset() or string_list_clear(), all of which put the data structure into a state where it can be re-used after the call, mem_pool_discard(pool) will leave pool pointing at free'd memory. read-cache (and split-index) are the only current users of mem_pools, and they haven't fallen into a use-after-free mistake here, but it seems likely to be problematic for future users especially since several of the current callers of mem_pool_init() will only call it when the mem_pool* is not already allocated (i.e. is NULL). This type of mechanism also prevents finding synchronization points where one can free existing memory and then resume more operations. It would be natural at such points to run something like mem_pool_discard(pool...); and, if necessary, mem_pool_init(&pool...); and then carry on continuing to use the pool. However, this fails badly if several objects had a copy of the value of pool from before these commands; in such a case, those objects won't get the updated value of pool that mem_pool_init() overwrites pool with and they'll all instead be reading and writing from free'd memory. Modify mem_pool_init()/mem_pool_discard() to behave more like strbuf_init()/strbuf_release() or string_list_init()/string_list_clear() In particular: (1) make mem_pool_init() just take a mem_pool* and have it only worry about allocating struct mp_blocks, not the struct mem_pool itself, (2) make mem_pool_discard() free the memory that the pool was responsible for, but leave it in a state where it can be used to allocate more memory afterward (without the need to call mem_pool_init() again). Signed-off-by: Elijah Newren <newren@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-08-15 19:37:56 +02:00
if (!istate->ce_mem_pool) {
istate->ce_mem_pool = xmalloc(sizeof(struct mem_pool));
mem_pool_init(istate->ce_mem_pool, 0);
}
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
}
ALLOC_ARRAY(si->base, 1);
treewide: always have a valid "index_state.repo" member When the "repo" member was added to "the_index" in [1] the repo_read_index() was made to populate it, but the unpopulated "the_index" variable didn't get the same treatment. Let's do that in initialize_the_repository() when we set it up, and likewise for all of the current callers initialized an empty "struct index_state". This simplifies code that needs to deal with "the_index" or a custom "struct index_state", we no longer need to second-guess this part of the "index_state" deep in the stack. A recent example of such second-guessing is the "istate->repo ? istate->repo : the_repository" code in [2]. We can now simply use "istate->repo". We're doing this by making use of the INDEX_STATE_INIT() macro (and corresponding function) added in [3], which now have mandatory "repo" arguments. Because we now call index_state_init() in repository.c's initialize_the_repository() we don't need to handle the case where we have a "repo->index" whose "repo" member doesn't match the "repo" we're setting up, i.e. the "Complete the double-reference" code in repo_read_index() being altered here. That logic was originally added in [1], and was working around the lack of what we now have in initialize_the_repository(). For "fsmonitor-settings.c" we can remove the initialization of a NULL "r" argument to "the_repository". This was added back in [4], and was needed at the time for callers that would pass us the "r" from an "istate->repo". Before this change such a change to "fsmonitor-settings.c" would segfault all over the test suite (e.g. in t0002-gitfile.sh). This change has wider eventual implications for "fsmonitor-settings.c". The reason the other lazy loading behavior in it is required (starting with "if (!r->settings.fsmonitor) ..." is because of the previously passed "r" being "NULL". I have other local changes on top of this which move its configuration reading to "prepare_repo_settings()" in "repo-settings.c", as we could now start to rely on it being called for our "r". But let's leave all of that for now, and narrowly remove this particular part of the lazy-loading. 1. 1fd9ae517c4 (repository: add repo reference to index_state, 2021-01-23) 2. ee1f0c242ef (read-cache: add index.skipHash config option, 2023-01-06) 3. 2f6b1eb794e (cache API: add a "INDEX_STATE_INIT" macro/function, add release_index(), 2023-01-12) 4. 1e0ea5c4316 (fsmonitor: config settings are repository-specific, 2022-03-25) Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Acked-by: Derrick Stolee <derrickstolee@github.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-01-17 14:57:00 +01:00
index_state_init(si->base, istate->repo);
si->base->version = istate->version;
/* zero timestamp disables racy test in ce_write_index() */
si->base->timestamp = istate->timestamp;
ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc);
si->base->cache_nr = istate->cache_nr;
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
/*
* The mem_pool needs to move with the allocated entries.
*/
si->base->ce_mem_pool = istate->ce_mem_pool;
istate->ce_mem_pool = NULL;
COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr);
mark_base_index_entries(si->base);
for (i = 0; i < si->base->cache_nr; i++)
si->base->cache[i]->ce_flags &= ~CE_UPDATE_IN_BASE;
}
static void mark_entry_for_delete(size_t pos, void *data)
{
struct index_state *istate = data;
if (pos >= istate->cache_nr)
die("position for delete %d exceeds base index size %d",
(int)pos, istate->cache_nr);
istate->cache[pos]->ce_flags |= CE_REMOVE;
istate->split_index->nr_deletions++;
}
static void replace_entry(size_t pos, void *data)
{
struct index_state *istate = data;
struct split_index *si = istate->split_index;
struct cache_entry *dst, *src;
if (pos >= istate->cache_nr)
die("position for replacement %d exceeds base index size %d",
(int)pos, istate->cache_nr);
if (si->nr_replacements >= si->saved_cache_nr)
die("too many replacements (%d vs %d)",
si->nr_replacements, si->saved_cache_nr);
dst = istate->cache[pos];
if (dst->ce_flags & CE_REMOVE)
die("entry %d is marked as both replaced and deleted",
(int)pos);
src = si->saved_cache[si->nr_replacements];
if (ce_namelen(src))
die("corrupt link extension, entry %d should have "
"zero length name", (int)pos);
src->index = pos + 1;
src->ce_flags |= CE_UPDATE_IN_BASE;
src->ce_namelen = dst->ce_namelen;
copy_cache_entry(dst, src);
block alloc: add lifecycle APIs for cache_entry structs It has been observed that the time spent loading an index with a large number of entries is partly dominated by malloc() calls. This change is in preparation for using memory pools to reduce the number of malloc() calls made to allocate cahce entries when loading an index. Add an API to allocate and discard cache entries, abstracting the details of managing the memory backing the cache entries. This commit does actually change how memory is managed - this will be done in a later commit in the series. This change makes the distinction between cache entries that are associated with an index and cache entries that are not associated with an index. A main use of cache entries is with an index, and we can optimize the memory management around this. We still have other cases where a cache entry is not persisted with an index, and so we need to handle the "transient" use case as well. To keep the congnitive overhead of managing the cache entries, there will only be a single discard function. This means there must be enough information kept with the cache entry so that we know how to discard them. A summary of the main functions in the API is: make_cache_entry: create cache entry for use in an index. Uses specified parameters to populate cache_entry fields. make_empty_cache_entry: Create an empty cache entry for use in an index. Returns cache entry with empty fields. make_transient_cache_entry: create cache entry that is not used in an index. Uses specified parameters to populate cache_entry fields. make_empty_transient_cache_entry: create cache entry that is not used in an index. Returns cache entry with empty fields. discard_cache_entry: A single function that knows how to discard a cache entry regardless of how it was allocated. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:31 +02:00
discard_cache_entry(src);
si->nr_replacements++;
}
void merge_base_index(struct index_state *istate)
{
struct split_index *si = istate->split_index;
unsigned int i;
mark_base_index_entries(si->base);
si->saved_cache = istate->cache;
si->saved_cache_nr = istate->cache_nr;
istate->cache_nr = si->base->cache_nr;
istate->cache = NULL;
istate->cache_alloc = 0;
ALLOC_GROW(istate->cache, istate->cache_nr, istate->cache_alloc);
COPY_ARRAY(istate->cache, si->base->cache, istate->cache_nr);
si->nr_deletions = 0;
si->nr_replacements = 0;
ewah_each_bit(si->replace_bitmap, replace_entry, istate);
ewah_each_bit(si->delete_bitmap, mark_entry_for_delete, istate);
if (si->nr_deletions)
remove_marked_cache_entries(istate, 0);
for (i = si->nr_replacements; i < si->saved_cache_nr; i++) {
if (!ce_namelen(si->saved_cache[i]))
die("corrupt link extension, entry %d should "
"have non-zero length name", i);
add_index_entry(istate, si->saved_cache[i],
ADD_CACHE_OK_TO_ADD |
ADD_CACHE_KEEP_CACHE_TREE |
/*
* we may have to replay what
* merge-recursive.c:update_stages()
* does, which has this flag on
*/
ADD_CACHE_SKIP_DFCHECK);
si->saved_cache[i] = NULL;
}
ewah_free(si->delete_bitmap);
ewah_free(si->replace_bitmap);
FREE_AND_NULL(si->saved_cache);
si->delete_bitmap = NULL;
si->replace_bitmap = NULL;
si->saved_cache_nr = 0;
}
split-index: don't compare cached data of entries already marked for split index When unpack_trees() constructs a new index, it copies cache entries from the original index [1]. prepare_to_write_split_index() has to deal with this, and it has a dedicated code path for copied entries that are present in the shared index, where it compares the cached data in the corresponding copied and original entries. If the cached data matches, then they are considered the same; if it differs, then the copied entry will be marked for inclusion as a replacement entry in the just about to be written split index by setting the CE_UPDATE_IN_BASE flag. However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon reading the split index, if the entry already has a replacement entry there, or upon refreshing the cached stat data, if the corresponding file was modified. The state of this flag is then preserved when unpack_trees() copies a cache entry from the shared index. So modify prepare_to_write_split_index() to check the copied cache entries' CE_UPDATE_IN_BASE flag first, and skip the thorough comparison of cached data if the flag is already set. Those couple of lines comparing the cached data would then have too many levels of indentation, so extract them into a helper function. Note that comparing the cached data in copied and original entries in the shared index might actually be entirely unnecessary. In theory all code paths refreshing the cached stat data of an entry in the shared index should set the CE_UPDATE_IN_BASE flag in that entry, and unpack_trees() should preserve this flag when copying cache entries. This means that the cached data is only ever changed if the CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to confirm this: instrumenting the conditions in question and running the test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the cached data in a copied entry differs from the data in the shared entry only if its CE_UPDATE_IN_BASE flag is indeed set. In practice, however, our test suite doesn't have 100% coverage, GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim to possess complete understanding of what goes on in unpack_trees()... Therefore I kept the comparison of the cached data when CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future code path were to accidentally miss setting this flag upon refreshing the cached stat data or unpack_trees() were to drop this flag while copying a cache entry. [1] Note that when unpack_trees() constructs the new index and decides that a cache entry should now refer to different content than what was recorded in the original index (e.g. 'git read-tree -m HEAD^'), then that can't really be considered a copy of the original, but rather the creation of a new entry. Notably and pertinent to the split index feature, such a new entry doesn't have a reference to the original's shared index entry anymore, i.e. its 'index' field is set to 0. Consequently, such an entry is treated by prepare_to_write_split_index() as an entry not present in the shared index and it will be added to the new split index, while the original entry will be marked as deleted, and neither the above discussion nor the changes in this patch apply to them. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 11:43:08 +02:00
/*
* Compare most of the fields in two cache entries, i.e. all except the
* hashmap_entry and the name.
*/
static int compare_ce_content(struct cache_entry *a, struct cache_entry *b)
{
const unsigned int ondisk_flags = CE_STAGEMASK | CE_VALID |
CE_EXTENDED_FLAGS;
unsigned int ce_flags = a->ce_flags;
unsigned int base_flags = b->ce_flags;
int ret;
/* only on-disk flags matter */
a->ce_flags &= ondisk_flags;
b->ce_flags &= ondisk_flags;
ret = memcmp(&a->ce_stat_data, &b->ce_stat_data,
offsetof(struct cache_entry, name) -
split-index: use oideq instead of memcmp to compare object_id's cache_entry contains an object_id, and compare_ce_content() would include that field when calling memcmp on a subset of the cache_entry. Depending on which hashing algorithm is being used, only part of object_id.hash is actually being used, therefore including it in a memcmp() is incorrect. Instead we choose to exclude the object_id when calling memcmp(), and call oideq() separately. This issue was found when running t1700-split-index with MSAN, see MSAN output below (on my machine, offset 76 corresponds to 4 bytes after the start of object_id.hash). Uninitialized bytes in MemcmpInterceptorCommon at offset 76 inside [0x7f60e7c00118, 92) ==27914==WARNING: MemorySanitizer: use-of-uninitialized-value #0 0x4524ee in memcmp /home/abuild/rpmbuild/BUILD/llvm-11.0.0.src/build/../projects/compiler-rt/lib/msan/../sanitizer_common/sanitizer_common_interceptors.inc:873:10 #1 0xc867ae in compare_ce_content /home/ahunt/git/git/split-index.c:208:8 #2 0xc859fb in prepare_to_write_split_index /home/ahunt/git/git/split-index.c:336:9 #3 0xb4bbca in write_split_index /home/ahunt/git/git/read-cache.c:3107:2 #4 0xb42b4d in write_locked_index /home/ahunt/git/git/read-cache.c:3295:8 #5 0x638058 in try_merge_strategy /home/ahunt/git/git/builtin/merge.c:758:7 #6 0x63057f in cmd_merge /home/ahunt/git/git/builtin/merge.c:1663:9 #7 0x4a1e76 in run_builtin /home/ahunt/git/git/git.c:461:11 #8 0x49e1e7 in handle_builtin /home/ahunt/git/git/git.c:714:3 #9 0x4a0c08 in run_argv /home/ahunt/git/git/git.c:781:4 #10 0x49d5a8 in cmd_main /home/ahunt/git/git/git.c:912:19 #11 0x7974da in main /home/ahunt/git/git/common-main.c:52:11 #12 0x7f60e928e349 in __libc_start_main (/lib64/libc.so.6+0x24349) #13 0x421bd9 in _start /home/abuild/rpmbuild/BUILD/glibc-2.26/csu/../sysdeps/x86_64/start.S:120 Uninitialized value was stored to memory at #0 0x447eb9 in __msan_memcpy /home/abuild/rpmbuild/BUILD/llvm-11.0.0.src/build/../projects/compiler-rt/lib/msan/msan_interceptors.cpp:1558:3 #1 0xb4d1e6 in dup_cache_entry /home/ahunt/git/git/read-cache.c:3457:2 #2 0xd214fa in add_entry /home/ahunt/git/git/unpack-trees.c:215:18 #3 0xd1fae0 in keep_entry /home/ahunt/git/git/unpack-trees.c:2276:2 #4 0xd1ff9e in twoway_merge /home/ahunt/git/git/unpack-trees.c:2504:11 #5 0xd27028 in call_unpack_fn /home/ahunt/git/git/unpack-trees.c:593:12 #6 0xd2443d in unpack_nondirectories /home/ahunt/git/git/unpack-trees.c:1106:12 #7 0xd19435 in unpack_callback /home/ahunt/git/git/unpack-trees.c:1306:6 #8 0xd0d7ff in traverse_trees /home/ahunt/git/git/tree-walk.c:532:17 #9 0xd1773a in unpack_trees /home/ahunt/git/git/unpack-trees.c:1683:9 #10 0xdc6370 in checkout /home/ahunt/git/git/merge-ort.c:3590:8 #11 0xdc51c3 in merge_switch_to_result /home/ahunt/git/git/merge-ort.c:3728:7 #12 0xa195a9 in merge_ort_recursive /home/ahunt/git/git/merge-ort-wrappers.c:58:2 #13 0x637fff in try_merge_strategy /home/ahunt/git/git/builtin/merge.c:751:12 #14 0x63057f in cmd_merge /home/ahunt/git/git/builtin/merge.c:1663:9 #15 0x4a1e76 in run_builtin /home/ahunt/git/git/git.c:461:11 #16 0x49e1e7 in handle_builtin /home/ahunt/git/git/git.c:714:3 #17 0x4a0c08 in run_argv /home/ahunt/git/git/git.c:781:4 #18 0x49d5a8 in cmd_main /home/ahunt/git/git/git.c:912:19 #19 0x7974da in main /home/ahunt/git/git/common-main.c:52:11 Uninitialized value was created by a heap allocation #0 0x44e73d in malloc /home/abuild/rpmbuild/BUILD/llvm-11.0.0.src/build/../projects/compiler-rt/lib/msan/msan_interceptors.cpp:901:3 #1 0xd592f6 in do_xmalloc /home/ahunt/git/git/wrapper.c:41:8 #2 0xd59248 in xmalloc /home/ahunt/git/git/wrapper.c:62:9 #3 0xa17088 in mem_pool_alloc_block /home/ahunt/git/git/mem-pool.c:22:6 #4 0xa16f78 in mem_pool_init /home/ahunt/git/git/mem-pool.c:44:3 #5 0xb481b8 in load_all_cache_entries /home/ahunt/git/git/read-cache.c #6 0xb44d40 in do_read_index /home/ahunt/git/git/read-cache.c:2298:17 #7 0xb48a1b in read_index_from /home/ahunt/git/git/read-cache.c:2389:8 #8 0xbd5a0b in repo_read_index /home/ahunt/git/git/repository.c:276:8 #9 0xb4bcaf in repo_read_index_unmerged /home/ahunt/git/git/read-cache.c:3326:2 #10 0x62ed26 in cmd_merge /home/ahunt/git/git/builtin/merge.c:1362:6 #11 0x4a1e76 in run_builtin /home/ahunt/git/git/git.c:461:11 #12 0x49e1e7 in handle_builtin /home/ahunt/git/git/git.c:714:3 #13 0x4a0c08 in run_argv /home/ahunt/git/git/git.c:781:4 #14 0x49d5a8 in cmd_main /home/ahunt/git/git/git.c:912:19 #15 0x7974da in main /home/ahunt/git/git/common-main.c:52:11 #16 0x7f60e928e349 in __libc_start_main (/lib64/libc.so.6+0x24349) SUMMARY: MemorySanitizer: use-of-uninitialized-value /home/abuild/rpmbuild/BUILD/llvm-11.0.0.src/build/../projects/compiler-rt/lib/msan/../sanitizer_common/sanitizer_common_interceptors.inc:873:10 in memcmp Exiting Signed-off-by: Andrzej Hunt <andrzej@ahunt.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-06-14 17:51:15 +02:00
offsetof(struct cache_entry, oid)) ||
!oideq(&a->oid, &b->oid);
split-index: don't compare cached data of entries already marked for split index When unpack_trees() constructs a new index, it copies cache entries from the original index [1]. prepare_to_write_split_index() has to deal with this, and it has a dedicated code path for copied entries that are present in the shared index, where it compares the cached data in the corresponding copied and original entries. If the cached data matches, then they are considered the same; if it differs, then the copied entry will be marked for inclusion as a replacement entry in the just about to be written split index by setting the CE_UPDATE_IN_BASE flag. However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon reading the split index, if the entry already has a replacement entry there, or upon refreshing the cached stat data, if the corresponding file was modified. The state of this flag is then preserved when unpack_trees() copies a cache entry from the shared index. So modify prepare_to_write_split_index() to check the copied cache entries' CE_UPDATE_IN_BASE flag first, and skip the thorough comparison of cached data if the flag is already set. Those couple of lines comparing the cached data would then have too many levels of indentation, so extract them into a helper function. Note that comparing the cached data in copied and original entries in the shared index might actually be entirely unnecessary. In theory all code paths refreshing the cached stat data of an entry in the shared index should set the CE_UPDATE_IN_BASE flag in that entry, and unpack_trees() should preserve this flag when copying cache entries. This means that the cached data is only ever changed if the CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to confirm this: instrumenting the conditions in question and running the test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the cached data in a copied entry differs from the data in the shared entry only if its CE_UPDATE_IN_BASE flag is indeed set. In practice, however, our test suite doesn't have 100% coverage, GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim to possess complete understanding of what goes on in unpack_trees()... Therefore I kept the comparison of the cached data when CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future code path were to accidentally miss setting this flag upon refreshing the cached stat data or unpack_trees() were to drop this flag while copying a cache entry. [1] Note that when unpack_trees() constructs the new index and decides that a cache entry should now refer to different content than what was recorded in the original index (e.g. 'git read-tree -m HEAD^'), then that can't really be considered a copy of the original, but rather the creation of a new entry. Notably and pertinent to the split index feature, such a new entry doesn't have a reference to the original's shared index entry anymore, i.e. its 'index' field is set to 0. Consequently, such an entry is treated by prepare_to_write_split_index() as an entry not present in the shared index and it will be added to the new split index, while the original entry will be marked as deleted, and neither the above discussion nor the changes in this patch apply to them. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 11:43:08 +02:00
a->ce_flags = ce_flags;
b->ce_flags = base_flags;
return ret;
}
void prepare_to_write_split_index(struct index_state *istate)
{
struct split_index *si = init_split_index(istate);
struct cache_entry **entries = NULL, *ce;
int i, nr_entries = 0, nr_alloc = 0;
si->delete_bitmap = ewah_new();
si->replace_bitmap = ewah_new();
if (si->base) {
/* Go through istate->cache[] and mark CE_MATCHED to
* entry with positive index. We'll go through
* base->cache[] later to delete all entries in base
* that are not marked with either CE_MATCHED or
* CE_UPDATE_IN_BASE. If istate->cache[i] is a
* duplicate, deduplicate it.
*/
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *base;
ce = istate->cache[i];
split-index: don't compare cached data of entries already marked for split index When unpack_trees() constructs a new index, it copies cache entries from the original index [1]. prepare_to_write_split_index() has to deal with this, and it has a dedicated code path for copied entries that are present in the shared index, where it compares the cached data in the corresponding copied and original entries. If the cached data matches, then they are considered the same; if it differs, then the copied entry will be marked for inclusion as a replacement entry in the just about to be written split index by setting the CE_UPDATE_IN_BASE flag. However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon reading the split index, if the entry already has a replacement entry there, or upon refreshing the cached stat data, if the corresponding file was modified. The state of this flag is then preserved when unpack_trees() copies a cache entry from the shared index. So modify prepare_to_write_split_index() to check the copied cache entries' CE_UPDATE_IN_BASE flag first, and skip the thorough comparison of cached data if the flag is already set. Those couple of lines comparing the cached data would then have too many levels of indentation, so extract them into a helper function. Note that comparing the cached data in copied and original entries in the shared index might actually be entirely unnecessary. In theory all code paths refreshing the cached stat data of an entry in the shared index should set the CE_UPDATE_IN_BASE flag in that entry, and unpack_trees() should preserve this flag when copying cache entries. This means that the cached data is only ever changed if the CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to confirm this: instrumenting the conditions in question and running the test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the cached data in a copied entry differs from the data in the shared entry only if its CE_UPDATE_IN_BASE flag is indeed set. In practice, however, our test suite doesn't have 100% coverage, GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim to possess complete understanding of what goes on in unpack_trees()... Therefore I kept the comparison of the cached data when CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future code path were to accidentally miss setting this flag upon refreshing the cached stat data or unpack_trees() were to drop this flag while copying a cache entry. [1] Note that when unpack_trees() constructs the new index and decides that a cache entry should now refer to different content than what was recorded in the original index (e.g. 'git read-tree -m HEAD^'), then that can't really be considered a copy of the original, but rather the creation of a new entry. Notably and pertinent to the split index feature, such a new entry doesn't have a reference to the original's shared index entry anymore, i.e. its 'index' field is set to 0. Consequently, such an entry is treated by prepare_to_write_split_index() as an entry not present in the shared index and it will be added to the new split index, while the original entry will be marked as deleted, and neither the above discussion nor the changes in this patch apply to them. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 11:43:08 +02:00
if (!ce->index) {
/*
* During simple update index operations this
* is a cache entry that is not present in
* the shared index. It will be added to the
* split index.
*
* However, it might also represent a file
* that already has a cache entry in the
* shared index, but a new index has just
* been constructed by unpack_trees(), and
* this entry now refers to different content
* than what was recorded in the original
* index, e.g. during 'read-tree -m HEAD^' or
* 'checkout HEAD^'. In this case the
* original entry in the shared index will be
* marked as deleted, and this entry will be
* added to the split index.
*/
continue;
split-index: don't compare cached data of entries already marked for split index When unpack_trees() constructs a new index, it copies cache entries from the original index [1]. prepare_to_write_split_index() has to deal with this, and it has a dedicated code path for copied entries that are present in the shared index, where it compares the cached data in the corresponding copied and original entries. If the cached data matches, then they are considered the same; if it differs, then the copied entry will be marked for inclusion as a replacement entry in the just about to be written split index by setting the CE_UPDATE_IN_BASE flag. However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon reading the split index, if the entry already has a replacement entry there, or upon refreshing the cached stat data, if the corresponding file was modified. The state of this flag is then preserved when unpack_trees() copies a cache entry from the shared index. So modify prepare_to_write_split_index() to check the copied cache entries' CE_UPDATE_IN_BASE flag first, and skip the thorough comparison of cached data if the flag is already set. Those couple of lines comparing the cached data would then have too many levels of indentation, so extract them into a helper function. Note that comparing the cached data in copied and original entries in the shared index might actually be entirely unnecessary. In theory all code paths refreshing the cached stat data of an entry in the shared index should set the CE_UPDATE_IN_BASE flag in that entry, and unpack_trees() should preserve this flag when copying cache entries. This means that the cached data is only ever changed if the CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to confirm this: instrumenting the conditions in question and running the test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the cached data in a copied entry differs from the data in the shared entry only if its CE_UPDATE_IN_BASE flag is indeed set. In practice, however, our test suite doesn't have 100% coverage, GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim to possess complete understanding of what goes on in unpack_trees()... Therefore I kept the comparison of the cached data when CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future code path were to accidentally miss setting this flag upon refreshing the cached stat data or unpack_trees() were to drop this flag while copying a cache entry. [1] Note that when unpack_trees() constructs the new index and decides that a cache entry should now refer to different content than what was recorded in the original index (e.g. 'git read-tree -m HEAD^'), then that can't really be considered a copy of the original, but rather the creation of a new entry. Notably and pertinent to the split index feature, such a new entry doesn't have a reference to the original's shared index entry anymore, i.e. its 'index' field is set to 0. Consequently, such an entry is treated by prepare_to_write_split_index() as an entry not present in the shared index and it will be added to the new split index, while the original entry will be marked as deleted, and neither the above discussion nor the changes in this patch apply to them. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 11:43:08 +02:00
}
if (ce->index > si->base->cache_nr) {
BUG("ce refers to a shared ce at %d, which is beyond the shared index size %d",
ce->index, si->base->cache_nr);
}
ce->ce_flags |= CE_MATCHED; /* or "shared" */
base = si->base->cache[ce->index - 1];
split-index: smudge and add racily clean cache entries to split index Ever since the split index feature was introduced [1], refreshing a split index is prone to a variant of the classic racy git problem. Consider the following sequence of commands updating the split index when the shared index contains a racily clean cache entry, i.e. an entry whose cached stat data matches with the corresponding file in the worktree and the cached mtime matches that of the index: echo "cached content" >file git update-index --split-index --add file echo "dirty worktree" >file # size stays the same! # ... wait ... git update-index --add other-file Normally, when a non-split index is updated, then do_write_index() (the function responsible for writing all kinds of indexes, "regular", split, and shared) recognizes racily clean cache entries, and writes them with smudged stat data, i.e. with file size set to 0. When subsequent git commands read the index, they will notice that the smudged stat data doesn't match with the file in the worktree, and then go on to check the file's content and notice its dirtiness. In the above example, however, in the second 'git update-index' prepare_to_write_split_index() decides which cache entries stored only in the shared index should be replaced in the new split index. Alas, this function never looks out for racily clean cache entries, and since the file's stat data in the worktree hasn't changed since the shared index was written, it won't be replaced in the new split index. Consequently, do_write_index() doesn't even get this racily clean cache entry, and can't smudge its stat data. Subsequent git commands will then see that the index has more recent mtime than the file and that the (not smudged) cached stat data still matches with the file in the worktree, and, ultimately, will erroneously consider the file clean. Modify prepare_to_write_split_index() to recognize racily clean cache entries, and mark them to be added to the split index. Note that there are two places where it should check raciness: first those cache entries that are only stored in the shared index, and then those that have been copied by unpack_trees() from the shared index while it constructed a new index. This way do_write_index() will get these racily clean cache entries as well, and will then write them with smudged stat data to the new split index. This change makes all tests in 't1701-racy-split-index.sh' pass, so flip the two 'test_expect_failure' tests to success. Also add the '#' (as in nr. of trial) to those tests' description that were omitted when the tests expected failure. Note that after this change if the index is split when it contains a racily clean cache entry, then a smudged cache entry will be written both to the new shared and to the new split indexes. This doesn't affect regular git commands: as far as they are concerned this is just an entry in the split index replacing an outdated entry in the shared index. It did affect a few tests in 't1700-split-index.sh', though, because they actually check which entries are stored in the split index; a previous patch in this series has already made the necessary adjustments in 't1700'. And racily clean cache entries and index splitting are rare enough to not worry about the resulting duplicated smudged cache entries, and the additional complexity required to prevent them is not worth it. Several tests failed occasionally when the test suite was run with 'GIT_TEST_SPLIT_INDEX=yes'. Here are those that I managed to trace back to this racy split index problem, starting with those failing more frequently, with a link to a failing Travis CI build job for each. The highlighted line [2] shows when the racy file was written, which is not always in the failing test but in a preceeding setup test. t3903-stash.sh: https://travis-ci.org/git/git/jobs/385542084#L5858 t4024-diff-optimize-common.sh: https://travis-ci.org/git/git/jobs/386531969#L3174 t4015-diff-whitespace.sh: https://travis-ci.org/git/git/jobs/360797600#L8215 t2200-add-update.sh: https://travis-ci.org/git/git/jobs/382543426#L3051 t0090-cache-tree.sh: https://travis-ci.org/git/git/jobs/416583010#L3679 There might be others, e.g. perhaps 't1000-read-tree-m-3way.sh' and others using 'lib-read-tree-m-3way.sh', but I couldn't confirm yet. [1] In the branch leading to the merge commit v2.1.0-rc0~45 (Merge branch 'nd/split-index', 2014-07-16). [2] Note that those highlighted lines are in the 'after failure' fold, and your browser might unhelpfully fold it up before you could take a good look. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 11:43:09 +02:00
if (ce == base) {
/* The entry is present in the shared index. */
if (ce->ce_flags & CE_UPDATE_IN_BASE) {
/*
* Already marked for inclusion in
* the split index, either because
* the corresponding file was
* modified and the cached stat data
* was refreshed, or because there
* is already a replacement entry in
* the split index.
* Nothing more to do here.
*/
} else if (!ce_uptodate(ce) &&
is_racy_timestamp(istate, ce)) {
/*
* A racily clean cache entry stored
* only in the shared index: it must
* be added to the split index, so
* the subsequent do_write_index()
* can smudge its stat data.
*/
ce->ce_flags |= CE_UPDATE_IN_BASE;
} else {
/*
* The entry is only present in the
* shared index and it was not
* refreshed.
* Just leave it there.
*/
}
continue;
split-index: smudge and add racily clean cache entries to split index Ever since the split index feature was introduced [1], refreshing a split index is prone to a variant of the classic racy git problem. Consider the following sequence of commands updating the split index when the shared index contains a racily clean cache entry, i.e. an entry whose cached stat data matches with the corresponding file in the worktree and the cached mtime matches that of the index: echo "cached content" >file git update-index --split-index --add file echo "dirty worktree" >file # size stays the same! # ... wait ... git update-index --add other-file Normally, when a non-split index is updated, then do_write_index() (the function responsible for writing all kinds of indexes, "regular", split, and shared) recognizes racily clean cache entries, and writes them with smudged stat data, i.e. with file size set to 0. When subsequent git commands read the index, they will notice that the smudged stat data doesn't match with the file in the worktree, and then go on to check the file's content and notice its dirtiness. In the above example, however, in the second 'git update-index' prepare_to_write_split_index() decides which cache entries stored only in the shared index should be replaced in the new split index. Alas, this function never looks out for racily clean cache entries, and since the file's stat data in the worktree hasn't changed since the shared index was written, it won't be replaced in the new split index. Consequently, do_write_index() doesn't even get this racily clean cache entry, and can't smudge its stat data. Subsequent git commands will then see that the index has more recent mtime than the file and that the (not smudged) cached stat data still matches with the file in the worktree, and, ultimately, will erroneously consider the file clean. Modify prepare_to_write_split_index() to recognize racily clean cache entries, and mark them to be added to the split index. Note that there are two places where it should check raciness: first those cache entries that are only stored in the shared index, and then those that have been copied by unpack_trees() from the shared index while it constructed a new index. This way do_write_index() will get these racily clean cache entries as well, and will then write them with smudged stat data to the new split index. This change makes all tests in 't1701-racy-split-index.sh' pass, so flip the two 'test_expect_failure' tests to success. Also add the '#' (as in nr. of trial) to those tests' description that were omitted when the tests expected failure. Note that after this change if the index is split when it contains a racily clean cache entry, then a smudged cache entry will be written both to the new shared and to the new split indexes. This doesn't affect regular git commands: as far as they are concerned this is just an entry in the split index replacing an outdated entry in the shared index. It did affect a few tests in 't1700-split-index.sh', though, because they actually check which entries are stored in the split index; a previous patch in this series has already made the necessary adjustments in 't1700'. And racily clean cache entries and index splitting are rare enough to not worry about the resulting duplicated smudged cache entries, and the additional complexity required to prevent them is not worth it. Several tests failed occasionally when the test suite was run with 'GIT_TEST_SPLIT_INDEX=yes'. Here are those that I managed to trace back to this racy split index problem, starting with those failing more frequently, with a link to a failing Travis CI build job for each. The highlighted line [2] shows when the racy file was written, which is not always in the failing test but in a preceeding setup test. t3903-stash.sh: https://travis-ci.org/git/git/jobs/385542084#L5858 t4024-diff-optimize-common.sh: https://travis-ci.org/git/git/jobs/386531969#L3174 t4015-diff-whitespace.sh: https://travis-ci.org/git/git/jobs/360797600#L8215 t2200-add-update.sh: https://travis-ci.org/git/git/jobs/382543426#L3051 t0090-cache-tree.sh: https://travis-ci.org/git/git/jobs/416583010#L3679 There might be others, e.g. perhaps 't1000-read-tree-m-3way.sh' and others using 'lib-read-tree-m-3way.sh', but I couldn't confirm yet. [1] In the branch leading to the merge commit v2.1.0-rc0~45 (Merge branch 'nd/split-index', 2014-07-16). [2] Note that those highlighted lines are in the 'after failure' fold, and your browser might unhelpfully fold it up before you could take a good look. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 11:43:09 +02:00
}
if (ce->ce_namelen != base->ce_namelen ||
strcmp(ce->name, base->name)) {
ce->index = 0;
continue;
}
split-index: don't compare cached data of entries already marked for split index When unpack_trees() constructs a new index, it copies cache entries from the original index [1]. prepare_to_write_split_index() has to deal with this, and it has a dedicated code path for copied entries that are present in the shared index, where it compares the cached data in the corresponding copied and original entries. If the cached data matches, then they are considered the same; if it differs, then the copied entry will be marked for inclusion as a replacement entry in the just about to be written split index by setting the CE_UPDATE_IN_BASE flag. However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon reading the split index, if the entry already has a replacement entry there, or upon refreshing the cached stat data, if the corresponding file was modified. The state of this flag is then preserved when unpack_trees() copies a cache entry from the shared index. So modify prepare_to_write_split_index() to check the copied cache entries' CE_UPDATE_IN_BASE flag first, and skip the thorough comparison of cached data if the flag is already set. Those couple of lines comparing the cached data would then have too many levels of indentation, so extract them into a helper function. Note that comparing the cached data in copied and original entries in the shared index might actually be entirely unnecessary. In theory all code paths refreshing the cached stat data of an entry in the shared index should set the CE_UPDATE_IN_BASE flag in that entry, and unpack_trees() should preserve this flag when copying cache entries. This means that the cached data is only ever changed if the CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to confirm this: instrumenting the conditions in question and running the test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the cached data in a copied entry differs from the data in the shared entry only if its CE_UPDATE_IN_BASE flag is indeed set. In practice, however, our test suite doesn't have 100% coverage, GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim to possess complete understanding of what goes on in unpack_trees()... Therefore I kept the comparison of the cached data when CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future code path were to accidentally miss setting this flag upon refreshing the cached stat data or unpack_trees() were to drop this flag while copying a cache entry. [1] Note that when unpack_trees() constructs the new index and decides that a cache entry should now refer to different content than what was recorded in the original index (e.g. 'git read-tree -m HEAD^'), then that can't really be considered a copy of the original, but rather the creation of a new entry. Notably and pertinent to the split index feature, such a new entry doesn't have a reference to the original's shared index entry anymore, i.e. its 'index' field is set to 0. Consequently, such an entry is treated by prepare_to_write_split_index() as an entry not present in the shared index and it will be added to the new split index, while the original entry will be marked as deleted, and neither the above discussion nor the changes in this patch apply to them. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 11:43:08 +02:00
/*
* This is the copy of a cache entry that is present
* in the shared index, created by unpack_trees()
* while it constructed a new index.
*/
if (ce->ce_flags & CE_UPDATE_IN_BASE) {
/*
* Already marked for inclusion in the split
* index, either because the corresponding
* file was modified and the cached stat data
* was refreshed, or because the original
* entry already had a replacement entry in
* the split index.
* Nothing to do.
*/
split-index: smudge and add racily clean cache entries to split index Ever since the split index feature was introduced [1], refreshing a split index is prone to a variant of the classic racy git problem. Consider the following sequence of commands updating the split index when the shared index contains a racily clean cache entry, i.e. an entry whose cached stat data matches with the corresponding file in the worktree and the cached mtime matches that of the index: echo "cached content" >file git update-index --split-index --add file echo "dirty worktree" >file # size stays the same! # ... wait ... git update-index --add other-file Normally, when a non-split index is updated, then do_write_index() (the function responsible for writing all kinds of indexes, "regular", split, and shared) recognizes racily clean cache entries, and writes them with smudged stat data, i.e. with file size set to 0. When subsequent git commands read the index, they will notice that the smudged stat data doesn't match with the file in the worktree, and then go on to check the file's content and notice its dirtiness. In the above example, however, in the second 'git update-index' prepare_to_write_split_index() decides which cache entries stored only in the shared index should be replaced in the new split index. Alas, this function never looks out for racily clean cache entries, and since the file's stat data in the worktree hasn't changed since the shared index was written, it won't be replaced in the new split index. Consequently, do_write_index() doesn't even get this racily clean cache entry, and can't smudge its stat data. Subsequent git commands will then see that the index has more recent mtime than the file and that the (not smudged) cached stat data still matches with the file in the worktree, and, ultimately, will erroneously consider the file clean. Modify prepare_to_write_split_index() to recognize racily clean cache entries, and mark them to be added to the split index. Note that there are two places where it should check raciness: first those cache entries that are only stored in the shared index, and then those that have been copied by unpack_trees() from the shared index while it constructed a new index. This way do_write_index() will get these racily clean cache entries as well, and will then write them with smudged stat data to the new split index. This change makes all tests in 't1701-racy-split-index.sh' pass, so flip the two 'test_expect_failure' tests to success. Also add the '#' (as in nr. of trial) to those tests' description that were omitted when the tests expected failure. Note that after this change if the index is split when it contains a racily clean cache entry, then a smudged cache entry will be written both to the new shared and to the new split indexes. This doesn't affect regular git commands: as far as they are concerned this is just an entry in the split index replacing an outdated entry in the shared index. It did affect a few tests in 't1700-split-index.sh', though, because they actually check which entries are stored in the split index; a previous patch in this series has already made the necessary adjustments in 't1700'. And racily clean cache entries and index splitting are rare enough to not worry about the resulting duplicated smudged cache entries, and the additional complexity required to prevent them is not worth it. Several tests failed occasionally when the test suite was run with 'GIT_TEST_SPLIT_INDEX=yes'. Here are those that I managed to trace back to this racy split index problem, starting with those failing more frequently, with a link to a failing Travis CI build job for each. The highlighted line [2] shows when the racy file was written, which is not always in the failing test but in a preceeding setup test. t3903-stash.sh: https://travis-ci.org/git/git/jobs/385542084#L5858 t4024-diff-optimize-common.sh: https://travis-ci.org/git/git/jobs/386531969#L3174 t4015-diff-whitespace.sh: https://travis-ci.org/git/git/jobs/360797600#L8215 t2200-add-update.sh: https://travis-ci.org/git/git/jobs/382543426#L3051 t0090-cache-tree.sh: https://travis-ci.org/git/git/jobs/416583010#L3679 There might be others, e.g. perhaps 't1000-read-tree-m-3way.sh' and others using 'lib-read-tree-m-3way.sh', but I couldn't confirm yet. [1] In the branch leading to the merge commit v2.1.0-rc0~45 (Merge branch 'nd/split-index', 2014-07-16). [2] Note that those highlighted lines are in the 'after failure' fold, and your browser might unhelpfully fold it up before you could take a good look. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 11:43:09 +02:00
} else if (!ce_uptodate(ce) &&
is_racy_timestamp(istate, ce)) {
/*
* A copy of a racily clean cache entry from
* the shared index. It must be added to
* the split index, so the subsequent
* do_write_index() can smudge its stat data.
*/
ce->ce_flags |= CE_UPDATE_IN_BASE;
split-index: don't compare cached data of entries already marked for split index When unpack_trees() constructs a new index, it copies cache entries from the original index [1]. prepare_to_write_split_index() has to deal with this, and it has a dedicated code path for copied entries that are present in the shared index, where it compares the cached data in the corresponding copied and original entries. If the cached data matches, then they are considered the same; if it differs, then the copied entry will be marked for inclusion as a replacement entry in the just about to be written split index by setting the CE_UPDATE_IN_BASE flag. However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon reading the split index, if the entry already has a replacement entry there, or upon refreshing the cached stat data, if the corresponding file was modified. The state of this flag is then preserved when unpack_trees() copies a cache entry from the shared index. So modify prepare_to_write_split_index() to check the copied cache entries' CE_UPDATE_IN_BASE flag first, and skip the thorough comparison of cached data if the flag is already set. Those couple of lines comparing the cached data would then have too many levels of indentation, so extract them into a helper function. Note that comparing the cached data in copied and original entries in the shared index might actually be entirely unnecessary. In theory all code paths refreshing the cached stat data of an entry in the shared index should set the CE_UPDATE_IN_BASE flag in that entry, and unpack_trees() should preserve this flag when copying cache entries. This means that the cached data is only ever changed if the CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to confirm this: instrumenting the conditions in question and running the test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the cached data in a copied entry differs from the data in the shared entry only if its CE_UPDATE_IN_BASE flag is indeed set. In practice, however, our test suite doesn't have 100% coverage, GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim to possess complete understanding of what goes on in unpack_trees()... Therefore I kept the comparison of the cached data when CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future code path were to accidentally miss setting this flag upon refreshing the cached stat data or unpack_trees() were to drop this flag while copying a cache entry. [1] Note that when unpack_trees() constructs the new index and decides that a cache entry should now refer to different content than what was recorded in the original index (e.g. 'git read-tree -m HEAD^'), then that can't really be considered a copy of the original, but rather the creation of a new entry. Notably and pertinent to the split index feature, such a new entry doesn't have a reference to the original's shared index entry anymore, i.e. its 'index' field is set to 0. Consequently, such an entry is treated by prepare_to_write_split_index() as an entry not present in the shared index and it will be added to the new split index, while the original entry will be marked as deleted, and neither the above discussion nor the changes in this patch apply to them. Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 11:43:08 +02:00
} else {
/*
* Thoroughly compare the cached data to see
* whether it should be marked for inclusion
* in the split index.
*
* This comparison might be unnecessary, as
* code paths modifying the cached data do
* set CE_UPDATE_IN_BASE as well.
*/
if (compare_ce_content(ce, base))
ce->ce_flags |= CE_UPDATE_IN_BASE;
}
block alloc: add lifecycle APIs for cache_entry structs It has been observed that the time spent loading an index with a large number of entries is partly dominated by malloc() calls. This change is in preparation for using memory pools to reduce the number of malloc() calls made to allocate cahce entries when loading an index. Add an API to allocate and discard cache entries, abstracting the details of managing the memory backing the cache entries. This commit does actually change how memory is managed - this will be done in a later commit in the series. This change makes the distinction between cache entries that are associated with an index and cache entries that are not associated with an index. A main use of cache entries is with an index, and we can optimize the memory management around this. We still have other cases where a cache entry is not persisted with an index, and so we need to handle the "transient" use case as well. To keep the congnitive overhead of managing the cache entries, there will only be a single discard function. This means there must be enough information kept with the cache entry so that we know how to discard them. A summary of the main functions in the API is: make_cache_entry: create cache entry for use in an index. Uses specified parameters to populate cache_entry fields. make_empty_cache_entry: Create an empty cache entry for use in an index. Returns cache entry with empty fields. make_transient_cache_entry: create cache entry that is not used in an index. Uses specified parameters to populate cache_entry fields. make_empty_transient_cache_entry: create cache entry that is not used in an index. Returns cache entry with empty fields. discard_cache_entry: A single function that knows how to discard a cache entry regardless of how it was allocated. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:31 +02:00
discard_cache_entry(base);
si->base->cache[ce->index - 1] = ce;
}
for (i = 0; i < si->base->cache_nr; i++) {
ce = si->base->cache[i];
if ((ce->ce_flags & CE_REMOVE) ||
!(ce->ce_flags & CE_MATCHED))
ewah_set(si->delete_bitmap, i);
else if (ce->ce_flags & CE_UPDATE_IN_BASE) {
ewah_set(si->replace_bitmap, i);
ce->ce_flags |= CE_STRIP_NAME;
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
entries[nr_entries++] = ce;
}
if (is_null_oid(&ce->oid))
istate->drop_cache_tree = 1;
}
}
for (i = 0; i < istate->cache_nr; i++) {
ce = istate->cache[i];
if ((!si->base || !ce->index) && !(ce->ce_flags & CE_REMOVE)) {
assert(!(ce->ce_flags & CE_STRIP_NAME));
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
entries[nr_entries++] = ce;
}
ce->ce_flags &= ~CE_MATCHED;
}
/*
* take cache[] out temporarily, put entries[] in its place
* for writing
*/
si->saved_cache = istate->cache;
si->saved_cache_nr = istate->cache_nr;
istate->cache = entries;
istate->cache_nr = nr_entries;
}
void finish_writing_split_index(struct index_state *istate)
{
struct split_index *si = init_split_index(istate);
ewah_free(si->delete_bitmap);
ewah_free(si->replace_bitmap);
si->delete_bitmap = NULL;
si->replace_bitmap = NULL;
free(istate->cache);
istate->cache = si->saved_cache;
istate->cache_nr = si->saved_cache_nr;
}
void discard_split_index(struct index_state *istate)
{
struct split_index *si = istate->split_index;
if (!si)
return;
istate->split_index = NULL;
si->refcount--;
if (si->refcount)
return;
if (si->base) {
discard_index(si->base);
free(si->base);
}
free(si);
}
void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce)
{
if (ce->index &&
istate->split_index &&
istate->split_index->base &&
ce->index <= istate->split_index->base->cache_nr &&
ce == istate->split_index->base->cache[ce->index - 1])
ce->ce_flags |= CE_REMOVE;
else
block alloc: add lifecycle APIs for cache_entry structs It has been observed that the time spent loading an index with a large number of entries is partly dominated by malloc() calls. This change is in preparation for using memory pools to reduce the number of malloc() calls made to allocate cahce entries when loading an index. Add an API to allocate and discard cache entries, abstracting the details of managing the memory backing the cache entries. This commit does actually change how memory is managed - this will be done in a later commit in the series. This change makes the distinction between cache entries that are associated with an index and cache entries that are not associated with an index. A main use of cache entries is with an index, and we can optimize the memory management around this. We still have other cases where a cache entry is not persisted with an index, and so we need to handle the "transient" use case as well. To keep the congnitive overhead of managing the cache entries, there will only be a single discard function. This means there must be enough information kept with the cache entry so that we know how to discard them. A summary of the main functions in the API is: make_cache_entry: create cache entry for use in an index. Uses specified parameters to populate cache_entry fields. make_empty_cache_entry: Create an empty cache entry for use in an index. Returns cache entry with empty fields. make_transient_cache_entry: create cache entry that is not used in an index. Uses specified parameters to populate cache_entry fields. make_empty_transient_cache_entry: create cache entry that is not used in an index. Returns cache entry with empty fields. discard_cache_entry: A single function that knows how to discard a cache entry regardless of how it was allocated. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:31 +02:00
discard_cache_entry(ce);
}
void replace_index_entry_in_base(struct index_state *istate,
struct cache_entry *old_entry,
struct cache_entry *new_entry)
{
if (old_entry->index &&
istate->split_index &&
istate->split_index->base &&
old_entry->index <= istate->split_index->base->cache_nr) {
new_entry->index = old_entry->index;
if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
block alloc: add lifecycle APIs for cache_entry structs It has been observed that the time spent loading an index with a large number of entries is partly dominated by malloc() calls. This change is in preparation for using memory pools to reduce the number of malloc() calls made to allocate cahce entries when loading an index. Add an API to allocate and discard cache entries, abstracting the details of managing the memory backing the cache entries. This commit does actually change how memory is managed - this will be done in a later commit in the series. This change makes the distinction between cache entries that are associated with an index and cache entries that are not associated with an index. A main use of cache entries is with an index, and we can optimize the memory management around this. We still have other cases where a cache entry is not persisted with an index, and so we need to handle the "transient" use case as well. To keep the congnitive overhead of managing the cache entries, there will only be a single discard function. This means there must be enough information kept with the cache entry so that we know how to discard them. A summary of the main functions in the API is: make_cache_entry: create cache entry for use in an index. Uses specified parameters to populate cache_entry fields. make_empty_cache_entry: Create an empty cache entry for use in an index. Returns cache entry with empty fields. make_transient_cache_entry: create cache entry that is not used in an index. Uses specified parameters to populate cache_entry fields. make_empty_transient_cache_entry: create cache entry that is not used in an index. Returns cache entry with empty fields. discard_cache_entry: A single function that knows how to discard a cache entry regardless of how it was allocated. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:31 +02:00
discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]);
istate->split_index->base->cache[new_entry->index - 1] = new_entry;
}
}
void add_split_index(struct index_state *istate)
{
if (!istate->split_index) {
init_split_index(istate);
istate->cache_changed |= SPLIT_INDEX_ORDERED;
}
}
void remove_split_index(struct index_state *istate)
{
if (istate->split_index) {
read-cache.c: fix writing "link" index ext with null base oid Since commit 7db118303a (unpack_trees: fix breakage when o->src_index != o->dst_index - 2018-04-23) and changes in merge code to use separate index_state for source and destination, when doing a merge with split index activated, we may run into this line in unpack_trees(): o->result.split_index = init_split_index(&o->result); This is by itself not wrong. But this split index information is not fully populated (and it's only so when move_cache_to_base_index() is called, aka force splitting the index, or loading index_state from a file). Both "base_oid" and "base" in this case remain null. So when writing the main index down, we link to this index with null oid (default value after init_split_index()), which also means "no split index" internally. This triggers an incorrect base index refresh: warning: could not freshen shared index '.../sharedindex.0{40}' This patch makes sure we will not refresh null base_oid (because the file is never there). It also makes sure not to write "link" extension with null base_oid in the first place (no point having it at all). Read code already has protection against null base_oid. There is also another side fix in remove_split_index() that causes a crash when doing "git update-index --no-split-index" when base_oid in the index file is null. In this case we will not load istate->split_index->base but we dereference it anyway and are rewarded with a segfault. This should not happen anymore, but it's still wrong to dereference a potential NULL pointer, especially when we do check for NULL pointer in the next code. Reported-by: Luke Diamand <luke@diamand.org> Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-02-13 10:51:29 +01:00
if (istate->split_index->base) {
/*
* When removing the split index, we need to move
* ownership of the mem_pool associated with the
* base index to the main index. There may be cache entries
* allocated from the base's memory pool that are shared with
* the_index.cache[].
*/
mem_pool_combine(istate->ce_mem_pool,
istate->split_index->base->ce_mem_pool);
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
read-cache.c: fix writing "link" index ext with null base oid Since commit 7db118303a (unpack_trees: fix breakage when o->src_index != o->dst_index - 2018-04-23) and changes in merge code to use separate index_state for source and destination, when doing a merge with split index activated, we may run into this line in unpack_trees(): o->result.split_index = init_split_index(&o->result); This is by itself not wrong. But this split index information is not fully populated (and it's only so when move_cache_to_base_index() is called, aka force splitting the index, or loading index_state from a file). Both "base_oid" and "base" in this case remain null. So when writing the main index down, we link to this index with null oid (default value after init_split_index()), which also means "no split index" internally. This triggers an incorrect base index refresh: warning: could not freshen shared index '.../sharedindex.0{40}' This patch makes sure we will not refresh null base_oid (because the file is never there). It also makes sure not to write "link" extension with null base_oid in the first place (no point having it at all). Read code already has protection against null base_oid. There is also another side fix in remove_split_index() that causes a crash when doing "git update-index --no-split-index" when base_oid in the index file is null. In this case we will not load istate->split_index->base but we dereference it anyway and are rewarded with a segfault. This should not happen anymore, but it's still wrong to dereference a potential NULL pointer, especially when we do check for NULL pointer in the next code. Reported-by: Luke Diamand <luke@diamand.org> Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-02-13 10:51:29 +01:00
/*
* The split index no longer owns the mem_pool backing
* its cache array. As we are discarding this index,
* mark the index as having no cache entries, so it
* will not attempt to clean up the cache entries or
* validate them.
*/
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
istate->split_index->base->cache_nr = 0;
read-cache.c: fix writing "link" index ext with null base oid Since commit 7db118303a (unpack_trees: fix breakage when o->src_index != o->dst_index - 2018-04-23) and changes in merge code to use separate index_state for source and destination, when doing a merge with split index activated, we may run into this line in unpack_trees(): o->result.split_index = init_split_index(&o->result); This is by itself not wrong. But this split index information is not fully populated (and it's only so when move_cache_to_base_index() is called, aka force splitting the index, or loading index_state from a file). Both "base_oid" and "base" in this case remain null. So when writing the main index down, we link to this index with null oid (default value after init_split_index()), which also means "no split index" internally. This triggers an incorrect base index refresh: warning: could not freshen shared index '.../sharedindex.0{40}' This patch makes sure we will not refresh null base_oid (because the file is never there). It also makes sure not to write "link" extension with null base_oid in the first place (no point having it at all). Read code already has protection against null base_oid. There is also another side fix in remove_split_index() that causes a crash when doing "git update-index --no-split-index" when base_oid in the index file is null. In this case we will not load istate->split_index->base but we dereference it anyway and are rewarded with a segfault. This should not happen anymore, but it's still wrong to dereference a potential NULL pointer, especially when we do check for NULL pointer in the next code. Reported-by: Luke Diamand <luke@diamand.org> Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-02-13 10:51:29 +01:00
}
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
/*
* We can discard the split index because its
* memory pool has been incorporated into the
* memory pool associated with the the_index.
*/
discard_split_index(istate);
istate->cache_changed |= SOMETHING_CHANGED;
}
}