Merge branch 'nd/trace-index-ops'
* nd/trace-index-ops: trace: measure where the time is spent in the index-heavy operations
This commit is contained in:
commit
090dbea684
@ -92,6 +92,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
|
||||
int diff_unmerged_stage = revs->max_count;
|
||||
unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
|
||||
? CE_MATCH_RACY_IS_DIRTY : 0);
|
||||
uint64_t start = getnanotime();
|
||||
|
||||
diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
|
||||
|
||||
@ -246,6 +247,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
|
||||
}
|
||||
diffcore_std(&revs->diffopt);
|
||||
diff_flush(&revs->diffopt);
|
||||
trace_performance_since(start, "diff-files");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -512,6 +514,7 @@ static int diff_cache(struct rev_info *revs,
|
||||
int run_diff_index(struct rev_info *revs, int cached)
|
||||
{
|
||||
struct object_array_entry *ent;
|
||||
uint64_t start = getnanotime();
|
||||
|
||||
ent = revs->pending.objects;
|
||||
if (diff_cache(revs, &ent->item->oid, ent->name, cached))
|
||||
@ -521,6 +524,7 @@ int run_diff_index(struct rev_info *revs, int cached)
|
||||
diffcore_fix_diff_index(&revs->diffopt);
|
||||
diffcore_std(&revs->diffopt);
|
||||
diff_flush(&revs->diffopt);
|
||||
trace_performance_since(start, "diff-index");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
2
dir.c
2
dir.c
@ -2245,6 +2245,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
|
||||
const char *path, int len, const struct pathspec *pathspec)
|
||||
{
|
||||
struct untracked_cache_dir *untracked;
|
||||
uint64_t start = getnanotime();
|
||||
|
||||
if (has_symlink_leading_path(path, len))
|
||||
return dir->nr;
|
||||
@ -2283,6 +2284,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
|
||||
dir->nr = i;
|
||||
}
|
||||
|
||||
trace_performance_since(start, "read directory %.*s", len, path);
|
||||
if (dir->untracked) {
|
||||
static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS);
|
||||
trace_printf_key(&trace_untracked_stats,
|
||||
|
@ -578,6 +578,8 @@ static void threaded_lazy_init_name_hash(
|
||||
|
||||
static void lazy_init_name_hash(struct index_state *istate)
|
||||
{
|
||||
uint64_t start = getnanotime();
|
||||
|
||||
if (istate->name_hash_initialized)
|
||||
return;
|
||||
hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
|
||||
@ -600,6 +602,7 @@ static void lazy_init_name_hash(struct index_state *istate)
|
||||
}
|
||||
|
||||
istate->name_hash_initialized = 1;
|
||||
trace_performance_since(start, "initialize name hash");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -78,6 +78,7 @@ static void preload_index(struct index_state *index,
|
||||
{
|
||||
int threads, i, work, offset;
|
||||
struct thread_data data[MAX_PARALLEL];
|
||||
uint64_t start = getnanotime();
|
||||
|
||||
if (!core_preload_index)
|
||||
return;
|
||||
@ -108,6 +109,7 @@ static void preload_index(struct index_state *index,
|
||||
if (pthread_join(p->pthread, NULL))
|
||||
die("unable to join threaded lstat");
|
||||
}
|
||||
trace_performance_since(start, "preload index");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1371,6 +1371,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
|
||||
const char *typechange_fmt;
|
||||
const char *added_fmt;
|
||||
const char *unmerged_fmt;
|
||||
uint64_t start = getnanotime();
|
||||
|
||||
modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
|
||||
deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
|
||||
@ -1441,6 +1442,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
|
||||
|
||||
replace_index_entry(istate, i, new);
|
||||
}
|
||||
trace_performance_since(start, "refresh index");
|
||||
return has_errors;
|
||||
}
|
||||
|
||||
@ -1871,6 +1873,7 @@ static void freshen_shared_index(const char *shared_index, int warn)
|
||||
int read_index_from(struct index_state *istate, const char *path,
|
||||
const char *gitdir)
|
||||
{
|
||||
uint64_t start = getnanotime();
|
||||
struct split_index *split_index;
|
||||
int ret;
|
||||
char *base_sha1_hex;
|
||||
@ -1881,6 +1884,7 @@ int read_index_from(struct index_state *istate, const char *path,
|
||||
return istate->cache_nr;
|
||||
|
||||
ret = do_read_index(istate, path, 0);
|
||||
trace_performance_since(start, "read cache %s", path);
|
||||
|
||||
split_index = istate->split_index;
|
||||
if (!split_index || is_null_sha1(split_index->base_sha1)) {
|
||||
@ -1904,6 +1908,7 @@ int read_index_from(struct index_state *istate, const char *path,
|
||||
freshen_shared_index(base_path, 0);
|
||||
merge_base_index(istate);
|
||||
post_read_index_from(istate);
|
||||
trace_performance_since(start, "read cache %s", base_path);
|
||||
free(base_path);
|
||||
return ret;
|
||||
}
|
||||
@ -2233,6 +2238,7 @@ void update_index_if_able(struct index_state *istate, struct lock_file *lockfile
|
||||
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
|
||||
int strip_extensions)
|
||||
{
|
||||
uint64_t start = getnanotime();
|
||||
int newfd = tempfile->fd;
|
||||
git_SHA_CTX c;
|
||||
struct cache_header hdr;
|
||||
@ -2373,6 +2379,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
|
||||
return -1;
|
||||
istate->timestamp.sec = (unsigned int)st.st_mtime;
|
||||
istate->timestamp.nsec = ST_MTIME_NSEC(st);
|
||||
trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user