Merge branch 'mh/mmap-packed-refs'
Operations that do not touch (majority of) packed refs have been optimized by making accesses to packed-refs file lazy; we no longer pre-parse everything, and an access to a single ref in the packed-refs does not touch majority of irrelevant refs, either. * mh/mmap-packed-refs: (21 commits) packed-backend.c: rename a bunch of things and update comments mmapped_ref_iterator: inline into `packed_ref_iterator` ref_cache: remove support for storing peeled values packed_ref_store: get rid of the `ref_cache` entirely ref_store: implement `refs_peel_ref()` generically packed_read_raw_ref(): read the reference from the mmapped buffer packed_ref_iterator_begin(): iterate using `mmapped_ref_iterator` read_packed_refs(): ensure that references are ordered when read packed_ref_cache: keep the `packed-refs` file mmapped if possible packed-backend.c: reorder some definitions mmapped_ref_iterator_advance(): no peeled value for broken refs mmapped_ref_iterator: add iterator over a packed-refs file packed_ref_cache: remember the file-wide peeling state read_packed_refs(): read references with minimal copying read_packed_refs(): make parsing of the header line more robust read_packed_refs(): only check for a header at the top of the file read_packed_refs(): use mmap to read the `packed-refs` file die_unterminated_line(), die_invalid_line(): new functions packed_ref_cache: add a backlink to the associated `packed_ref_store` prefix_ref_iterator: break when we leave the prefix ...
This commit is contained in:
commit
1a2e1a76ec
6
Makefile
6
Makefile
@ -205,6 +205,9 @@ all::
|
|||||||
#
|
#
|
||||||
# Define NO_MMAP if you want to avoid mmap.
|
# Define NO_MMAP if you want to avoid mmap.
|
||||||
#
|
#
|
||||||
|
# Define MMAP_PREVENTS_DELETE if a file that is currently mmapped cannot be
|
||||||
|
# deleted or cannot be replaced using rename().
|
||||||
|
#
|
||||||
# Define NO_SYS_POLL_H if you don't have sys/poll.h.
|
# Define NO_SYS_POLL_H if you don't have sys/poll.h.
|
||||||
#
|
#
|
||||||
# Define NO_POLL if you do not have or don't want to use poll().
|
# Define NO_POLL if you do not have or don't want to use poll().
|
||||||
@ -1391,6 +1394,9 @@ else
|
|||||||
COMPAT_OBJS += compat/win32mmap.o
|
COMPAT_OBJS += compat/win32mmap.o
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
ifdef MMAP_PREVENTS_DELETE
|
||||||
|
BASIC_CFLAGS += -DMMAP_PREVENTS_DELETE
|
||||||
|
endif
|
||||||
ifdef OBJECT_CREATION_USES_RENAMES
|
ifdef OBJECT_CREATION_USES_RENAMES
|
||||||
COMPAT_CFLAGS += -DOBJECT_CREATION_MODE=1
|
COMPAT_CFLAGS += -DOBJECT_CREATION_MODE=1
|
||||||
endif
|
endif
|
||||||
|
@ -184,6 +184,7 @@ ifeq ($(uname_O),Cygwin)
|
|||||||
UNRELIABLE_FSTAT = UnfortunatelyYes
|
UNRELIABLE_FSTAT = UnfortunatelyYes
|
||||||
SPARSE_FLAGS = -isystem /usr/include/w32api -Wno-one-bit-signed-bitfield
|
SPARSE_FLAGS = -isystem /usr/include/w32api -Wno-one-bit-signed-bitfield
|
||||||
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
|
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
|
||||||
|
MMAP_PREVENTS_DELETE = UnfortunatelyYes
|
||||||
COMPAT_OBJS += compat/cygwin.o
|
COMPAT_OBJS += compat/cygwin.o
|
||||||
FREAD_READS_DIRECTORIES = UnfortunatelyYes
|
FREAD_READS_DIRECTORIES = UnfortunatelyYes
|
||||||
endif
|
endif
|
||||||
@ -353,6 +354,7 @@ ifeq ($(uname_S),Windows)
|
|||||||
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
|
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
|
||||||
NO_NSEC = YesPlease
|
NO_NSEC = YesPlease
|
||||||
USE_WIN32_MMAP = YesPlease
|
USE_WIN32_MMAP = YesPlease
|
||||||
|
MMAP_PREVENTS_DELETE = UnfortunatelyYes
|
||||||
# USE_NED_ALLOCATOR = YesPlease
|
# USE_NED_ALLOCATOR = YesPlease
|
||||||
UNRELIABLE_FSTAT = UnfortunatelyYes
|
UNRELIABLE_FSTAT = UnfortunatelyYes
|
||||||
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
|
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
|
||||||
@ -501,6 +503,7 @@ ifneq (,$(findstring MINGW,$(uname_S)))
|
|||||||
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
|
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
|
||||||
NO_NSEC = YesPlease
|
NO_NSEC = YesPlease
|
||||||
USE_WIN32_MMAP = YesPlease
|
USE_WIN32_MMAP = YesPlease
|
||||||
|
MMAP_PREVENTS_DELETE = UnfortunatelyYes
|
||||||
USE_NED_ALLOCATOR = YesPlease
|
USE_NED_ALLOCATOR = YesPlease
|
||||||
UNRELIABLE_FSTAT = UnfortunatelyYes
|
UNRELIABLE_FSTAT = UnfortunatelyYes
|
||||||
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
|
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
|
||||||
|
22
refs.c
22
refs.c
@ -1285,6 +1285,10 @@ struct ref_iterator *refs_ref_iterator_begin(
|
|||||||
if (trim)
|
if (trim)
|
||||||
iter = prefix_ref_iterator_begin(iter, "", trim);
|
iter = prefix_ref_iterator_begin(iter, "", trim);
|
||||||
|
|
||||||
|
/* Sanity check for subclasses: */
|
||||||
|
if (!iter->ordered)
|
||||||
|
BUG("reference iterator is not ordered");
|
||||||
|
|
||||||
return iter;
|
return iter;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1686,7 +1690,23 @@ int refs_pack_refs(struct ref_store *refs, unsigned int flags)
|
|||||||
int refs_peel_ref(struct ref_store *refs, const char *refname,
|
int refs_peel_ref(struct ref_store *refs, const char *refname,
|
||||||
unsigned char *sha1)
|
unsigned char *sha1)
|
||||||
{
|
{
|
||||||
return refs->be->peel_ref(refs, refname, sha1);
|
int flag;
|
||||||
|
unsigned char base[20];
|
||||||
|
|
||||||
|
if (current_ref_iter && current_ref_iter->refname == refname) {
|
||||||
|
struct object_id peeled;
|
||||||
|
|
||||||
|
if (ref_iterator_peel(current_ref_iter, &peeled))
|
||||||
|
return -1;
|
||||||
|
hashcpy(sha1, peeled.hash);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (refs_read_ref_full(refs, refname,
|
||||||
|
RESOLVE_REF_READING, base, &flag))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
return peel_object(base, sha1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int peel_ref(const char *refname, unsigned char *sha1)
|
int peel_ref(const char *refname, unsigned char *sha1)
|
||||||
|
@ -641,43 +641,6 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int files_peel_ref(struct ref_store *ref_store,
|
|
||||||
const char *refname, unsigned char *sha1)
|
|
||||||
{
|
|
||||||
struct files_ref_store *refs =
|
|
||||||
files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
|
|
||||||
"peel_ref");
|
|
||||||
int flag;
|
|
||||||
unsigned char base[20];
|
|
||||||
|
|
||||||
if (current_ref_iter && current_ref_iter->refname == refname) {
|
|
||||||
struct object_id peeled;
|
|
||||||
|
|
||||||
if (ref_iterator_peel(current_ref_iter, &peeled))
|
|
||||||
return -1;
|
|
||||||
hashcpy(sha1, peeled.hash);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (refs_read_ref_full(ref_store, refname,
|
|
||||||
RESOLVE_REF_READING, base, &flag))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the reference is packed, read its ref_entry from the
|
|
||||||
* cache in the hope that we already know its peeled value.
|
|
||||||
* We only try this optimization on packed references because
|
|
||||||
* (a) forcing the filling of the loose reference cache could
|
|
||||||
* be expensive and (b) loose references anyway usually do not
|
|
||||||
* have REF_KNOWS_PEELED.
|
|
||||||
*/
|
|
||||||
if (flag & REF_ISPACKED &&
|
|
||||||
!refs_peel_ref(refs->packed_ref_store, refname, sha1))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return peel_object(base, sha1);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct files_ref_iterator {
|
struct files_ref_iterator {
|
||||||
struct ref_iterator base;
|
struct ref_iterator base;
|
||||||
|
|
||||||
@ -748,7 +711,7 @@ static struct ref_iterator *files_ref_iterator_begin(
|
|||||||
const char *prefix, unsigned int flags)
|
const char *prefix, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct files_ref_store *refs;
|
struct files_ref_store *refs;
|
||||||
struct ref_iterator *loose_iter, *packed_iter;
|
struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
|
||||||
struct files_ref_iterator *iter;
|
struct files_ref_iterator *iter;
|
||||||
struct ref_iterator *ref_iterator;
|
struct ref_iterator *ref_iterator;
|
||||||
unsigned int required_flags = REF_STORE_READ;
|
unsigned int required_flags = REF_STORE_READ;
|
||||||
@ -758,10 +721,6 @@ static struct ref_iterator *files_ref_iterator_begin(
|
|||||||
|
|
||||||
refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
|
refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
|
||||||
|
|
||||||
iter = xcalloc(1, sizeof(*iter));
|
|
||||||
ref_iterator = &iter->base;
|
|
||||||
base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We must make sure that all loose refs are read before
|
* We must make sure that all loose refs are read before
|
||||||
* accessing the packed-refs file; this avoids a race
|
* accessing the packed-refs file; this avoids a race
|
||||||
@ -797,7 +756,13 @@ static struct ref_iterator *files_ref_iterator_begin(
|
|||||||
refs->packed_ref_store, prefix, 0,
|
refs->packed_ref_store, prefix, 0,
|
||||||
DO_FOR_EACH_INCLUDE_BROKEN);
|
DO_FOR_EACH_INCLUDE_BROKEN);
|
||||||
|
|
||||||
iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
|
overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
|
||||||
|
|
||||||
|
iter = xcalloc(1, sizeof(*iter));
|
||||||
|
ref_iterator = &iter->base;
|
||||||
|
base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
|
||||||
|
overlay_iter->ordered);
|
||||||
|
iter->iter0 = overlay_iter;
|
||||||
iter->flags = flags;
|
iter->flags = flags;
|
||||||
|
|
||||||
return ref_iterator;
|
return ref_iterator;
|
||||||
@ -2094,7 +2059,7 @@ static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
|
|||||||
struct ref_iterator *ref_iterator = &iter->base;
|
struct ref_iterator *ref_iterator = &iter->base;
|
||||||
struct strbuf sb = STRBUF_INIT;
|
struct strbuf sb = STRBUF_INIT;
|
||||||
|
|
||||||
base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
|
base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
|
||||||
strbuf_addf(&sb, "%s/logs", gitdir);
|
strbuf_addf(&sb, "%s/logs", gitdir);
|
||||||
iter->dir_iterator = dir_iterator_begin(sb.buf);
|
iter->dir_iterator = dir_iterator_begin(sb.buf);
|
||||||
iter->ref_store = ref_store;
|
iter->ref_store = ref_store;
|
||||||
@ -2138,6 +2103,7 @@ static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_st
|
|||||||
return reflog_iterator_begin(ref_store, refs->gitcommondir);
|
return reflog_iterator_begin(ref_store, refs->gitcommondir);
|
||||||
} else {
|
} else {
|
||||||
return merge_ref_iterator_begin(
|
return merge_ref_iterator_begin(
|
||||||
|
0,
|
||||||
reflog_iterator_begin(ref_store, refs->gitdir),
|
reflog_iterator_begin(ref_store, refs->gitdir),
|
||||||
reflog_iterator_begin(ref_store, refs->gitcommondir),
|
reflog_iterator_begin(ref_store, refs->gitcommondir),
|
||||||
reflog_iterator_select, refs);
|
reflog_iterator_select, refs);
|
||||||
@ -3089,7 +3055,6 @@ struct ref_storage_be refs_be_files = {
|
|||||||
files_initial_transaction_commit,
|
files_initial_transaction_commit,
|
||||||
|
|
||||||
files_pack_refs,
|
files_pack_refs,
|
||||||
files_peel_ref,
|
|
||||||
files_create_symref,
|
files_create_symref,
|
||||||
files_delete_refs,
|
files_delete_refs,
|
||||||
files_rename_ref,
|
files_rename_ref,
|
||||||
|
@ -25,9 +25,11 @@ int ref_iterator_abort(struct ref_iterator *ref_iterator)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void base_ref_iterator_init(struct ref_iterator *iter,
|
void base_ref_iterator_init(struct ref_iterator *iter,
|
||||||
struct ref_iterator_vtable *vtable)
|
struct ref_iterator_vtable *vtable,
|
||||||
|
int ordered)
|
||||||
{
|
{
|
||||||
iter->vtable = vtable;
|
iter->vtable = vtable;
|
||||||
|
iter->ordered = !!ordered;
|
||||||
iter->refname = NULL;
|
iter->refname = NULL;
|
||||||
iter->oid = NULL;
|
iter->oid = NULL;
|
||||||
iter->flags = 0;
|
iter->flags = 0;
|
||||||
@ -72,7 +74,7 @@ struct ref_iterator *empty_ref_iterator_begin(void)
|
|||||||
struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter));
|
struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter));
|
||||||
struct ref_iterator *ref_iterator = &iter->base;
|
struct ref_iterator *ref_iterator = &iter->base;
|
||||||
|
|
||||||
base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable);
|
base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable, 1);
|
||||||
return ref_iterator;
|
return ref_iterator;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,6 +207,7 @@ static struct ref_iterator_vtable merge_ref_iterator_vtable = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct ref_iterator *merge_ref_iterator_begin(
|
struct ref_iterator *merge_ref_iterator_begin(
|
||||||
|
int ordered,
|
||||||
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
||||||
ref_iterator_select_fn *select, void *cb_data)
|
ref_iterator_select_fn *select, void *cb_data)
|
||||||
{
|
{
|
||||||
@ -219,7 +222,7 @@ struct ref_iterator *merge_ref_iterator_begin(
|
|||||||
* references through only if they exist in both iterators.
|
* references through only if they exist in both iterators.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable);
|
base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable, ordered);
|
||||||
iter->iter0 = iter0;
|
iter->iter0 = iter0;
|
||||||
iter->iter1 = iter1;
|
iter->iter1 = iter1;
|
||||||
iter->select = select;
|
iter->select = select;
|
||||||
@ -268,9 +271,11 @@ struct ref_iterator *overlay_ref_iterator_begin(
|
|||||||
} else if (is_empty_ref_iterator(back)) {
|
} else if (is_empty_ref_iterator(back)) {
|
||||||
ref_iterator_abort(back);
|
ref_iterator_abort(back);
|
||||||
return front;
|
return front;
|
||||||
|
} else if (!front->ordered || !back->ordered) {
|
||||||
|
BUG("overlay_ref_iterator requires ordered inputs");
|
||||||
}
|
}
|
||||||
|
|
||||||
return merge_ref_iterator_begin(front, back,
|
return merge_ref_iterator_begin(1, front, back,
|
||||||
overlay_iterator_select, NULL);
|
overlay_iterator_select, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,6 +287,20 @@ struct prefix_ref_iterator {
|
|||||||
int trim;
|
int trim;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Return -1, 0, 1 if refname is before, inside, or after the prefix. */
|
||||||
|
static int compare_prefix(const char *refname, const char *prefix)
|
||||||
|
{
|
||||||
|
while (*prefix) {
|
||||||
|
if (*refname != *prefix)
|
||||||
|
return ((unsigned char)*refname < (unsigned char)*prefix) ? -1 : +1;
|
||||||
|
|
||||||
|
refname++;
|
||||||
|
prefix++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||||
{
|
{
|
||||||
struct prefix_ref_iterator *iter =
|
struct prefix_ref_iterator *iter =
|
||||||
@ -289,9 +308,25 @@ static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
|||||||
int ok;
|
int ok;
|
||||||
|
|
||||||
while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
|
while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
|
||||||
if (!starts_with(iter->iter0->refname, iter->prefix))
|
int cmp = compare_prefix(iter->iter0->refname, iter->prefix);
|
||||||
|
|
||||||
|
if (cmp < 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (cmp > 0) {
|
||||||
|
/*
|
||||||
|
* If the source iterator is ordered, then we
|
||||||
|
* can stop the iteration as soon as we see a
|
||||||
|
* refname that comes after the prefix:
|
||||||
|
*/
|
||||||
|
if (iter->iter0->ordered) {
|
||||||
|
ok = ref_iterator_abort(iter->iter0);
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (iter->trim) {
|
if (iter->trim) {
|
||||||
/*
|
/*
|
||||||
* It is nonsense to trim off characters that
|
* It is nonsense to trim off characters that
|
||||||
@ -361,7 +396,7 @@ struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
|
|||||||
iter = xcalloc(1, sizeof(*iter));
|
iter = xcalloc(1, sizeof(*iter));
|
||||||
ref_iterator = &iter->base;
|
ref_iterator = &iter->base;
|
||||||
|
|
||||||
base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable);
|
base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable, iter0->ordered);
|
||||||
|
|
||||||
iter->iter0 = iter0;
|
iter->iter0 = iter0;
|
||||||
iter->prefix = xstrdup(prefix);
|
iter->prefix = xstrdup(prefix);
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -38,7 +38,6 @@ struct ref_entry *create_ref_entry(const char *refname,
|
|||||||
|
|
||||||
FLEX_ALLOC_STR(ref, name, refname);
|
FLEX_ALLOC_STR(ref, name, refname);
|
||||||
oidcpy(&ref->u.value.oid, oid);
|
oidcpy(&ref->u.value.oid, oid);
|
||||||
oidclr(&ref->u.value.peeled);
|
|
||||||
ref->flag = flag;
|
ref->flag = flag;
|
||||||
return ref;
|
return ref;
|
||||||
}
|
}
|
||||||
@ -491,49 +490,10 @@ static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum peel_status peel_entry(struct ref_entry *entry, int repeel)
|
|
||||||
{
|
|
||||||
enum peel_status status;
|
|
||||||
|
|
||||||
if (entry->flag & REF_KNOWS_PEELED) {
|
|
||||||
if (repeel) {
|
|
||||||
entry->flag &= ~REF_KNOWS_PEELED;
|
|
||||||
oidclr(&entry->u.value.peeled);
|
|
||||||
} else {
|
|
||||||
return is_null_oid(&entry->u.value.peeled) ?
|
|
||||||
PEEL_NON_TAG : PEEL_PEELED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (entry->flag & REF_ISBROKEN)
|
|
||||||
return PEEL_BROKEN;
|
|
||||||
if (entry->flag & REF_ISSYMREF)
|
|
||||||
return PEEL_IS_SYMREF;
|
|
||||||
|
|
||||||
status = peel_object(entry->u.value.oid.hash, entry->u.value.peeled.hash);
|
|
||||||
if (status == PEEL_PEELED || status == PEEL_NON_TAG)
|
|
||||||
entry->flag |= REF_KNOWS_PEELED;
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||||
struct object_id *peeled)
|
struct object_id *peeled)
|
||||||
{
|
{
|
||||||
struct cache_ref_iterator *iter =
|
return peel_object(ref_iterator->oid->hash, peeled->hash);
|
||||||
(struct cache_ref_iterator *)ref_iterator;
|
|
||||||
struct cache_ref_iterator_level *level;
|
|
||||||
struct ref_entry *entry;
|
|
||||||
|
|
||||||
level = &iter->levels[iter->levels_nr - 1];
|
|
||||||
|
|
||||||
if (level->index == -1)
|
|
||||||
die("BUG: peel called before advance for cache iterator");
|
|
||||||
|
|
||||||
entry = level->dir->entries[level->index];
|
|
||||||
|
|
||||||
if (peel_entry(entry, 0))
|
|
||||||
return -1;
|
|
||||||
oidcpy(peeled, &entry->u.value.peeled);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||||
@ -574,7 +534,7 @@ struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
|
|||||||
|
|
||||||
iter = xcalloc(1, sizeof(*iter));
|
iter = xcalloc(1, sizeof(*iter));
|
||||||
ref_iterator = &iter->base;
|
ref_iterator = &iter->base;
|
||||||
base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable);
|
base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable, 1);
|
||||||
ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
|
ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
|
||||||
|
|
||||||
iter->levels_nr = 1;
|
iter->levels_nr = 1;
|
||||||
|
@ -38,14 +38,6 @@ struct ref_value {
|
|||||||
* referred to by the last reference in the symlink chain.
|
* referred to by the last reference in the symlink chain.
|
||||||
*/
|
*/
|
||||||
struct object_id oid;
|
struct object_id oid;
|
||||||
|
|
||||||
/*
|
|
||||||
* If REF_KNOWS_PEELED, then this field holds the peeled value
|
|
||||||
* of this reference, or null if the reference is known not to
|
|
||||||
* be peelable. See the documentation for peel_ref() for an
|
|
||||||
* exact definition of "peelable".
|
|
||||||
*/
|
|
||||||
struct object_id peeled;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -97,21 +89,14 @@ struct ref_dir {
|
|||||||
* public values; see refs.h.
|
* public values; see refs.h.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
* The field ref_entry->u.value.peeled of this value entry contains
|
|
||||||
* the correct peeled value for the reference, which might be
|
|
||||||
* null_sha1 if the reference is not a tag or if it is broken.
|
|
||||||
*/
|
|
||||||
#define REF_KNOWS_PEELED 0x10
|
|
||||||
|
|
||||||
/* ref_entry represents a directory of references */
|
/* ref_entry represents a directory of references */
|
||||||
#define REF_DIR 0x20
|
#define REF_DIR 0x10
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Entry has not yet been read from disk (used only for REF_DIR
|
* Entry has not yet been read from disk (used only for REF_DIR
|
||||||
* entries representing loose references)
|
* entries representing loose references)
|
||||||
*/
|
*/
|
||||||
#define REF_INCOMPLETE 0x40
|
#define REF_INCOMPLETE 0x20
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A ref_entry represents either a reference or a "subdirectory" of
|
* A ref_entry represents either a reference or a "subdirectory" of
|
||||||
@ -245,23 +230,11 @@ struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname);
|
|||||||
* Start iterating over references in `cache`. If `prefix` is
|
* Start iterating over references in `cache`. If `prefix` is
|
||||||
* specified, only include references whose names start with that
|
* specified, only include references whose names start with that
|
||||||
* prefix. If `prime_dir` is true, then fill any incomplete
|
* prefix. If `prime_dir` is true, then fill any incomplete
|
||||||
* directories before beginning the iteration.
|
* directories before beginning the iteration. The output is ordered
|
||||||
|
* by refname.
|
||||||
*/
|
*/
|
||||||
struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
|
struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
|
||||||
const char *prefix,
|
const char *prefix,
|
||||||
int prime_dir);
|
int prime_dir);
|
||||||
|
|
||||||
/*
|
|
||||||
* Peel the entry (if possible) and return its new peel_status. If
|
|
||||||
* repeel is true, re-peel the entry even if there is an old peeled
|
|
||||||
* value that is already stored in it.
|
|
||||||
*
|
|
||||||
* It is OK to call this function with a packed reference entry that
|
|
||||||
* might be stale and might even refer to an object that has since
|
|
||||||
* been garbage-collected. In such a case, if the entry has
|
|
||||||
* REF_KNOWS_PEELED then leave the status unchanged and return
|
|
||||||
* PEEL_PEELED or PEEL_NON_TAG; otherwise, return PEEL_INVALID.
|
|
||||||
*/
|
|
||||||
enum peel_status peel_entry(struct ref_entry *entry, int repeel);
|
|
||||||
|
|
||||||
#endif /* REFS_REF_CACHE_H */
|
#endif /* REFS_REF_CACHE_H */
|
||||||
|
@ -329,6 +329,13 @@ int refs_rename_ref_available(struct ref_store *refs,
|
|||||||
*/
|
*/
|
||||||
struct ref_iterator {
|
struct ref_iterator {
|
||||||
struct ref_iterator_vtable *vtable;
|
struct ref_iterator_vtable *vtable;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Does this `ref_iterator` iterate over references in order
|
||||||
|
* by refname?
|
||||||
|
*/
|
||||||
|
unsigned int ordered : 1;
|
||||||
|
|
||||||
const char *refname;
|
const char *refname;
|
||||||
const struct object_id *oid;
|
const struct object_id *oid;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
@ -374,7 +381,7 @@ int is_empty_ref_iterator(struct ref_iterator *ref_iterator);
|
|||||||
* which the refname begins with prefix. If trim is non-zero, then
|
* which the refname begins with prefix. If trim is non-zero, then
|
||||||
* trim that many characters off the beginning of each refname. flags
|
* trim that many characters off the beginning of each refname. flags
|
||||||
* can be DO_FOR_EACH_INCLUDE_BROKEN to include broken references in
|
* can be DO_FOR_EACH_INCLUDE_BROKEN to include broken references in
|
||||||
* the iteration.
|
* the iteration. The output is ordered by refname.
|
||||||
*/
|
*/
|
||||||
struct ref_iterator *refs_ref_iterator_begin(
|
struct ref_iterator *refs_ref_iterator_begin(
|
||||||
struct ref_store *refs,
|
struct ref_store *refs,
|
||||||
@ -400,9 +407,11 @@ typedef enum iterator_selection ref_iterator_select_fn(
|
|||||||
* Iterate over the entries from iter0 and iter1, with the values
|
* Iterate over the entries from iter0 and iter1, with the values
|
||||||
* interleaved as directed by the select function. The iterator takes
|
* interleaved as directed by the select function. The iterator takes
|
||||||
* ownership of iter0 and iter1 and frees them when the iteration is
|
* ownership of iter0 and iter1 and frees them when the iteration is
|
||||||
* over.
|
* over. A derived class should set `ordered` to 1 or 0 based on
|
||||||
|
* whether it generates its output in order by reference name.
|
||||||
*/
|
*/
|
||||||
struct ref_iterator *merge_ref_iterator_begin(
|
struct ref_iterator *merge_ref_iterator_begin(
|
||||||
|
int ordered,
|
||||||
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
||||||
ref_iterator_select_fn *select, void *cb_data);
|
ref_iterator_select_fn *select, void *cb_data);
|
||||||
|
|
||||||
@ -431,6 +440,8 @@ struct ref_iterator *overlay_ref_iterator_begin(
|
|||||||
* As an convenience to callers, if prefix is the empty string and
|
* As an convenience to callers, if prefix is the empty string and
|
||||||
* trim is zero, this function returns iter0 directly, without
|
* trim is zero, this function returns iter0 directly, without
|
||||||
* wrapping it.
|
* wrapping it.
|
||||||
|
*
|
||||||
|
* The resulting ref_iterator is ordered if iter0 is.
|
||||||
*/
|
*/
|
||||||
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
|
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
|
||||||
const char *prefix,
|
const char *prefix,
|
||||||
@ -441,11 +452,14 @@ struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
|
|||||||
/*
|
/*
|
||||||
* Base class constructor for ref_iterators. Initialize the
|
* Base class constructor for ref_iterators. Initialize the
|
||||||
* ref_iterator part of iter, setting its vtable pointer as specified.
|
* ref_iterator part of iter, setting its vtable pointer as specified.
|
||||||
|
* `ordered` should be set to 1 if the iterator will iterate over
|
||||||
|
* references in order by refname; otherwise it should be set to 0.
|
||||||
* This is meant to be called only by the initializers of derived
|
* This is meant to be called only by the initializers of derived
|
||||||
* classes.
|
* classes.
|
||||||
*/
|
*/
|
||||||
void base_ref_iterator_init(struct ref_iterator *iter,
|
void base_ref_iterator_init(struct ref_iterator *iter,
|
||||||
struct ref_iterator_vtable *vtable);
|
struct ref_iterator_vtable *vtable,
|
||||||
|
int ordered);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Base class destructor for ref_iterators. Destroy the ref_iterator
|
* Base class destructor for ref_iterators. Destroy the ref_iterator
|
||||||
@ -548,8 +562,6 @@ typedef int ref_transaction_commit_fn(struct ref_store *refs,
|
|||||||
struct strbuf *err);
|
struct strbuf *err);
|
||||||
|
|
||||||
typedef int pack_refs_fn(struct ref_store *ref_store, unsigned int flags);
|
typedef int pack_refs_fn(struct ref_store *ref_store, unsigned int flags);
|
||||||
typedef int peel_ref_fn(struct ref_store *ref_store,
|
|
||||||
const char *refname, unsigned char *sha1);
|
|
||||||
typedef int create_symref_fn(struct ref_store *ref_store,
|
typedef int create_symref_fn(struct ref_store *ref_store,
|
||||||
const char *ref_target,
|
const char *ref_target,
|
||||||
const char *refs_heads_master,
|
const char *refs_heads_master,
|
||||||
@ -567,7 +579,8 @@ typedef int copy_ref_fn(struct ref_store *ref_store,
|
|||||||
* Iterate over the references in `ref_store` whose names start with
|
* Iterate over the references in `ref_store` whose names start with
|
||||||
* `prefix`. `prefix` is matched as a literal string, without regard
|
* `prefix`. `prefix` is matched as a literal string, without regard
|
||||||
* for path separators. If prefix is NULL or the empty string, iterate
|
* for path separators. If prefix is NULL or the empty string, iterate
|
||||||
* over all references in `ref_store`.
|
* over all references in `ref_store`. The output is ordered by
|
||||||
|
* refname.
|
||||||
*/
|
*/
|
||||||
typedef struct ref_iterator *ref_iterator_begin_fn(
|
typedef struct ref_iterator *ref_iterator_begin_fn(
|
||||||
struct ref_store *ref_store,
|
struct ref_store *ref_store,
|
||||||
@ -656,7 +669,6 @@ struct ref_storage_be {
|
|||||||
ref_transaction_commit_fn *initial_transaction_commit;
|
ref_transaction_commit_fn *initial_transaction_commit;
|
||||||
|
|
||||||
pack_refs_fn *pack_refs;
|
pack_refs_fn *pack_refs;
|
||||||
peel_ref_fn *peel_ref;
|
|
||||||
create_symref_fn *create_symref;
|
create_symref_fn *create_symref;
|
||||||
delete_refs_fn *delete_refs;
|
delete_refs_fn *delete_refs;
|
||||||
rename_ref_fn *rename_ref;
|
rename_ref_fn *rename_ref;
|
||||||
|
Loading…
Reference in New Issue
Block a user