git-commit-vandalism/preload-index.c
Junio C Hamano 7e794d0a3f Merge branch 'nd/unpack-trees-with-cache-tree'
The unpack_trees() API used in checking out a branch and merging
walks one or more trees along with the index.  When the cache-tree
in the index tells us that we are walking a tree whose flattened
contents is known (i.e. matches a span in the index), as linearly
scanning a span in the index is much more efficient than having to
open tree objects recursively and listing their entries, the walk
can be optimized, which is done in this topic.

* nd/unpack-trees-with-cache-tree:
  Document update for nd/unpack-trees-with-cache-tree
  cache-tree: verify valid cache-tree in the test suite
  unpack-trees: add missing cache invalidation
  unpack-trees: reuse (still valid) cache-tree from src_index
  unpack-trees: reduce malloc in cache-tree walk
  unpack-trees: optimize walking same trees with cache-tree
  unpack-trees: add performance tracing
  trace.h: support nested performance tracing
2018-09-17 13:53:53 -07:00

124 lines
2.8 KiB
C

/*
* Copyright (C) 2008 Linus Torvalds
*/
#include "cache.h"
#include "pathspec.h"
#include "dir.h"
#include "fsmonitor.h"
#ifdef NO_PTHREADS
static void preload_index(struct index_state *index,
const struct pathspec *pathspec)
{
; /* nothing */
}
#else
#include <pthread.h>
/*
* Mostly randomly chosen maximum thread counts: we
* cap the parallelism to 20 threads, and we want
* to have at least 500 lstat's per thread for it to
* be worth starting a thread.
*/
#define MAX_PARALLEL (20)
#define THREAD_COST (500)
struct thread_data {
pthread_t pthread;
struct index_state *index;
struct pathspec pathspec;
int offset, nr;
};
static void *preload_thread(void *_data)
{
int nr;
struct thread_data *p = _data;
struct index_state *index = p->index;
struct cache_entry **cep = index->cache + p->offset;
struct cache_def cache = CACHE_DEF_INIT;
nr = p->nr;
if (nr + p->offset > index->cache_nr)
nr = index->cache_nr - p->offset;
do {
struct cache_entry *ce = *cep++;
struct stat st;
if (ce_stage(ce))
continue;
if (S_ISGITLINK(ce->ce_mode))
continue;
if (ce_uptodate(ce))
continue;
if (ce_skip_worktree(ce))
continue;
if (ce->ce_flags & CE_FSMONITOR_VALID)
continue;
if (!ce_path_match(index, ce, &p->pathspec, NULL))
continue;
if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
continue;
if (lstat(ce->name, &st))
continue;
if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY|CE_MATCH_IGNORE_FSMONITOR))
continue;
ce_mark_uptodate(ce);
mark_fsmonitor_valid(ce);
} while (--nr > 0);
cache_def_clear(&cache);
return NULL;
}
static void preload_index(struct index_state *index,
const struct pathspec *pathspec)
{
int threads, i, work, offset;
struct thread_data data[MAX_PARALLEL];
if (!core_preload_index)
return;
threads = index->cache_nr / THREAD_COST;
if ((index->cache_nr > 1) && (threads < 2) && getenv("GIT_FORCE_PRELOAD_TEST"))
threads = 2;
if (threads < 2)
return;
trace_performance_enter();
if (threads > MAX_PARALLEL)
threads = MAX_PARALLEL;
offset = 0;
work = DIV_ROUND_UP(index->cache_nr, threads);
memset(&data, 0, sizeof(data));
for (i = 0; i < threads; i++) {
struct thread_data *p = data+i;
p->index = index;
if (pathspec)
copy_pathspec(&p->pathspec, pathspec);
p->offset = offset;
p->nr = work;
offset += work;
if (pthread_create(&p->pthread, NULL, preload_thread, p))
die("unable to create threaded lstat");
}
for (i = 0; i < threads; i++) {
struct thread_data *p = data+i;
if (pthread_join(p->pthread, NULL))
die("unable to join threaded lstat");
}
trace_performance_leave("preload index");
}
#endif
int read_index_preload(struct index_state *index,
const struct pathspec *pathspec)
{
int retval = read_index(index);
preload_index(index, pathspec);
return retval;
}