Merge branch 'mh/ref-iterators'
The API to iterate over all the refs (i.e. for_each_ref(), etc.) has been revamped. * mh/ref-iterators: for_each_reflog(): reimplement using iterators dir_iterator: new API for iterating over a directory tree for_each_reflog(): don't abort for bad references do_for_each_ref(): reimplement using reference iteration refs: introduce an iterator interface ref_resolves_to_object(): new function entry_resolves_to_object(): rename function from ref_resolves_to_object() get_ref_cache(): only create an instance if there is a submodule remote rm: handle symbolic refs correctly delete_refs(): add a flags argument refs: use name "prefix" consistently do_for_each_ref(): move docstring to the header file refs: remove unnecessary "extern" keywords
This commit is contained in:
commit
87492cb24d
2
Makefile
2
Makefile
@ -718,6 +718,7 @@ LIB_OBJS += diff-lib.o
|
|||||||
LIB_OBJS += diff-no-index.o
|
LIB_OBJS += diff-no-index.o
|
||||||
LIB_OBJS += diff.o
|
LIB_OBJS += diff.o
|
||||||
LIB_OBJS += dir.o
|
LIB_OBJS += dir.o
|
||||||
|
LIB_OBJS += dir-iterator.o
|
||||||
LIB_OBJS += editor.o
|
LIB_OBJS += editor.o
|
||||||
LIB_OBJS += entry.o
|
LIB_OBJS += entry.o
|
||||||
LIB_OBJS += environment.o
|
LIB_OBJS += environment.o
|
||||||
@ -782,6 +783,7 @@ LIB_OBJS += read-cache.o
|
|||||||
LIB_OBJS += reflog-walk.o
|
LIB_OBJS += reflog-walk.o
|
||||||
LIB_OBJS += refs.o
|
LIB_OBJS += refs.o
|
||||||
LIB_OBJS += refs/files-backend.o
|
LIB_OBJS += refs/files-backend.o
|
||||||
|
LIB_OBJS += refs/iterator.o
|
||||||
LIB_OBJS += ref-filter.o
|
LIB_OBJS += ref-filter.o
|
||||||
LIB_OBJS += remote.o
|
LIB_OBJS += remote.o
|
||||||
LIB_OBJS += replace_object.o
|
LIB_OBJS += replace_object.o
|
||||||
|
@ -921,7 +921,7 @@ static int prune_refs(struct refspec *refs, int ref_count, struct ref *ref_map,
|
|||||||
for (ref = stale_refs; ref; ref = ref->next)
|
for (ref = stale_refs; ref; ref = ref->next)
|
||||||
string_list_append(&refnames, ref->name);
|
string_list_append(&refnames, ref->name);
|
||||||
|
|
||||||
result = delete_refs(&refnames);
|
result = delete_refs(&refnames, 0);
|
||||||
string_list_clear(&refnames, 0);
|
string_list_clear(&refnames, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -539,10 +539,6 @@ static int add_branch_for_removal(const char *refname,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* make sure that symrefs are deleted */
|
|
||||||
if (flags & REF_ISSYMREF)
|
|
||||||
return unlink(git_path("%s", refname));
|
|
||||||
|
|
||||||
string_list_append(branches->branches, refname);
|
string_list_append(branches->branches, refname);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -788,7 +784,7 @@ static int rm(int argc, const char **argv)
|
|||||||
strbuf_release(&buf);
|
strbuf_release(&buf);
|
||||||
|
|
||||||
if (!result)
|
if (!result)
|
||||||
result = delete_refs(&branches);
|
result = delete_refs(&branches, REF_NODEREF);
|
||||||
string_list_clear(&branches, 0);
|
string_list_clear(&branches, 0);
|
||||||
|
|
||||||
if (skipped.nr) {
|
if (skipped.nr) {
|
||||||
@ -1304,7 +1300,7 @@ static int prune_remote(const char *remote, int dry_run)
|
|||||||
string_list_sort(&refs_to_prune);
|
string_list_sort(&refs_to_prune);
|
||||||
|
|
||||||
if (!dry_run)
|
if (!dry_run)
|
||||||
result |= delete_refs(&refs_to_prune);
|
result |= delete_refs(&refs_to_prune, 0);
|
||||||
|
|
||||||
for_each_string_list_item(item, &states.stale) {
|
for_each_string_list_item(item, &states.stale) {
|
||||||
const char *refname = item->util;
|
const char *refname = item->util;
|
||||||
|
202
dir-iterator.c
Normal file
202
dir-iterator.c
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
#include "cache.h"
|
||||||
|
#include "dir.h"
|
||||||
|
#include "iterator.h"
|
||||||
|
#include "dir-iterator.h"
|
||||||
|
|
||||||
|
struct dir_iterator_level {
|
||||||
|
int initialized;
|
||||||
|
|
||||||
|
DIR *dir;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The length of the directory part of path at this level
|
||||||
|
* (including a trailing '/'):
|
||||||
|
*/
|
||||||
|
size_t prefix_len;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The last action that has been taken with the current entry
|
||||||
|
* (needed for directories, which have to be included in the
|
||||||
|
* iteration and also iterated into):
|
||||||
|
*/
|
||||||
|
enum {
|
||||||
|
DIR_STATE_ITER,
|
||||||
|
DIR_STATE_RECURSE
|
||||||
|
} dir_state;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The full data structure used to manage the internal directory
|
||||||
|
* iteration state. It includes members that are not part of the
|
||||||
|
* public interface.
|
||||||
|
*/
|
||||||
|
struct dir_iterator_int {
|
||||||
|
struct dir_iterator base;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The number of levels currently on the stack. This is always
|
||||||
|
* at least 1, because when it becomes zero the iteration is
|
||||||
|
* ended and this struct is freed.
|
||||||
|
*/
|
||||||
|
size_t levels_nr;
|
||||||
|
|
||||||
|
/* The number of levels that have been allocated on the stack */
|
||||||
|
size_t levels_alloc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A stack of levels. levels[0] is the uppermost directory
|
||||||
|
* that will be included in this iteration.
|
||||||
|
*/
|
||||||
|
struct dir_iterator_level *levels;
|
||||||
|
};
|
||||||
|
|
||||||
|
int dir_iterator_advance(struct dir_iterator *dir_iterator)
|
||||||
|
{
|
||||||
|
struct dir_iterator_int *iter =
|
||||||
|
(struct dir_iterator_int *)dir_iterator;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
struct dir_iterator_level *level =
|
||||||
|
&iter->levels[iter->levels_nr - 1];
|
||||||
|
struct dirent *de;
|
||||||
|
|
||||||
|
if (!level->initialized) {
|
||||||
|
/*
|
||||||
|
* Note: dir_iterator_begin() ensures that
|
||||||
|
* path is not the empty string.
|
||||||
|
*/
|
||||||
|
if (!is_dir_sep(iter->base.path.buf[iter->base.path.len - 1]))
|
||||||
|
strbuf_addch(&iter->base.path, '/');
|
||||||
|
level->prefix_len = iter->base.path.len;
|
||||||
|
|
||||||
|
level->dir = opendir(iter->base.path.buf);
|
||||||
|
if (!level->dir && errno != ENOENT) {
|
||||||
|
warning("error opening directory %s: %s",
|
||||||
|
iter->base.path.buf, strerror(errno));
|
||||||
|
/* Popping the level is handled below */
|
||||||
|
}
|
||||||
|
|
||||||
|
level->initialized = 1;
|
||||||
|
} else if (S_ISDIR(iter->base.st.st_mode)) {
|
||||||
|
if (level->dir_state == DIR_STATE_ITER) {
|
||||||
|
/*
|
||||||
|
* The directory was just iterated
|
||||||
|
* over; now prepare to iterate into
|
||||||
|
* it.
|
||||||
|
*/
|
||||||
|
level->dir_state = DIR_STATE_RECURSE;
|
||||||
|
ALLOC_GROW(iter->levels, iter->levels_nr + 1,
|
||||||
|
iter->levels_alloc);
|
||||||
|
level = &iter->levels[iter->levels_nr++];
|
||||||
|
level->initialized = 0;
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* The directory has already been
|
||||||
|
* iterated over and iterated into;
|
||||||
|
* we're done with it.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!level->dir) {
|
||||||
|
/*
|
||||||
|
* This level is exhausted (or wasn't opened
|
||||||
|
* successfully); pop up a level.
|
||||||
|
*/
|
||||||
|
if (--iter->levels_nr == 0)
|
||||||
|
return dir_iterator_abort(dir_iterator);
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Loop until we find an entry that we can give back
|
||||||
|
* to the caller:
|
||||||
|
*/
|
||||||
|
while (1) {
|
||||||
|
strbuf_setlen(&iter->base.path, level->prefix_len);
|
||||||
|
errno = 0;
|
||||||
|
de = readdir(level->dir);
|
||||||
|
|
||||||
|
if (!de) {
|
||||||
|
/* This level is exhausted; pop up a level. */
|
||||||
|
if (errno) {
|
||||||
|
warning("error reading directory %s: %s",
|
||||||
|
iter->base.path.buf, strerror(errno));
|
||||||
|
} else if (closedir(level->dir))
|
||||||
|
warning("error closing directory %s: %s",
|
||||||
|
iter->base.path.buf, strerror(errno));
|
||||||
|
|
||||||
|
level->dir = NULL;
|
||||||
|
if (--iter->levels_nr == 0)
|
||||||
|
return dir_iterator_abort(dir_iterator);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_dot_or_dotdot(de->d_name))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
strbuf_addstr(&iter->base.path, de->d_name);
|
||||||
|
if (lstat(iter->base.path.buf, &iter->base.st) < 0) {
|
||||||
|
if (errno != ENOENT)
|
||||||
|
warning("error reading path '%s': %s",
|
||||||
|
iter->base.path.buf,
|
||||||
|
strerror(errno));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We have to set these each time because
|
||||||
|
* the path strbuf might have been realloc()ed.
|
||||||
|
*/
|
||||||
|
iter->base.relative_path =
|
||||||
|
iter->base.path.buf + iter->levels[0].prefix_len;
|
||||||
|
iter->base.basename =
|
||||||
|
iter->base.path.buf + level->prefix_len;
|
||||||
|
level->dir_state = DIR_STATE_ITER;
|
||||||
|
|
||||||
|
return ITER_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int dir_iterator_abort(struct dir_iterator *dir_iterator)
|
||||||
|
{
|
||||||
|
struct dir_iterator_int *iter = (struct dir_iterator_int *)dir_iterator;
|
||||||
|
|
||||||
|
for (; iter->levels_nr; iter->levels_nr--) {
|
||||||
|
struct dir_iterator_level *level =
|
||||||
|
&iter->levels[iter->levels_nr - 1];
|
||||||
|
|
||||||
|
if (level->dir && closedir(level->dir)) {
|
||||||
|
strbuf_setlen(&iter->base.path, level->prefix_len);
|
||||||
|
warning("error closing directory %s: %s",
|
||||||
|
iter->base.path.buf, strerror(errno));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
free(iter->levels);
|
||||||
|
strbuf_release(&iter->base.path);
|
||||||
|
free(iter);
|
||||||
|
return ITER_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct dir_iterator *dir_iterator_begin(const char *path)
|
||||||
|
{
|
||||||
|
struct dir_iterator_int *iter = xcalloc(1, sizeof(*iter));
|
||||||
|
struct dir_iterator *dir_iterator = &iter->base;
|
||||||
|
|
||||||
|
if (!path || !*path)
|
||||||
|
die("BUG: empty path passed to dir_iterator_begin()");
|
||||||
|
|
||||||
|
strbuf_init(&iter->base.path, PATH_MAX);
|
||||||
|
strbuf_addstr(&iter->base.path, path);
|
||||||
|
|
||||||
|
ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
|
||||||
|
|
||||||
|
iter->levels_nr = 1;
|
||||||
|
iter->levels[0].initialized = 0;
|
||||||
|
|
||||||
|
return dir_iterator;
|
||||||
|
}
|
87
dir-iterator.h
Normal file
87
dir-iterator.h
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
#ifndef DIR_ITERATOR_H
|
||||||
|
#define DIR_ITERATOR_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Iterate over a directory tree.
|
||||||
|
*
|
||||||
|
* Iterate over a directory tree, recursively, including paths of all
|
||||||
|
* types and hidden paths. Skip "." and ".." entries and don't follow
|
||||||
|
* symlinks except for the original path.
|
||||||
|
*
|
||||||
|
* Every time dir_iterator_advance() is called, update the members of
|
||||||
|
* the dir_iterator structure to reflect the next path in the
|
||||||
|
* iteration. The order that paths are iterated over within a
|
||||||
|
* directory is undefined, but directory paths are always iterated
|
||||||
|
* over before the subdirectory contents.
|
||||||
|
*
|
||||||
|
* A typical iteration looks like this:
|
||||||
|
*
|
||||||
|
* int ok;
|
||||||
|
* struct iterator *iter = dir_iterator_begin(path);
|
||||||
|
*
|
||||||
|
* while ((ok = dir_iterator_advance(iter)) == ITER_OK) {
|
||||||
|
* if (want_to_stop_iteration()) {
|
||||||
|
* ok = dir_iterator_abort(iter);
|
||||||
|
* break;
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* // Access information about the current path:
|
||||||
|
* if (S_ISDIR(iter->st.st_mode))
|
||||||
|
* printf("%s is a directory\n", iter->relative_path);
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* if (ok != ITER_DONE)
|
||||||
|
* handle_error();
|
||||||
|
*
|
||||||
|
* Callers are allowed to modify iter->path while they are working,
|
||||||
|
* but they must restore it to its original contents before calling
|
||||||
|
* dir_iterator_advance() again.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct dir_iterator {
|
||||||
|
/* The current path: */
|
||||||
|
struct strbuf path;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The current path relative to the starting path. This part
|
||||||
|
* of the path always uses "/" characters to separate path
|
||||||
|
* components:
|
||||||
|
*/
|
||||||
|
const char *relative_path;
|
||||||
|
|
||||||
|
/* The current basename: */
|
||||||
|
const char *basename;
|
||||||
|
|
||||||
|
/* The result of calling lstat() on path: */
|
||||||
|
struct stat st;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Start a directory iteration over path. Return a dir_iterator that
|
||||||
|
* holds the internal state of the iteration.
|
||||||
|
*
|
||||||
|
* The iteration includes all paths under path, not including path
|
||||||
|
* itself and not including "." or ".." entries.
|
||||||
|
*
|
||||||
|
* path is the starting directory. An internal copy will be made.
|
||||||
|
*/
|
||||||
|
struct dir_iterator *dir_iterator_begin(const char *path);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Advance the iterator to the first or next item and return ITER_OK.
|
||||||
|
* If the iteration is exhausted, free the dir_iterator and any
|
||||||
|
* resources associated with it and return ITER_DONE. On error, free
|
||||||
|
* dir_iterator and associated resources and return ITER_ERROR. It is
|
||||||
|
* a bug to use iterator or call this function again after it has
|
||||||
|
* returned ITER_DONE or ITER_ERROR.
|
||||||
|
*/
|
||||||
|
int dir_iterator_advance(struct dir_iterator *iterator);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* End the iteration before it has been exhausted. Free the
|
||||||
|
* dir_iterator and any associated resources and return ITER_DONE. On
|
||||||
|
* error, free the dir_iterator and return ITER_ERROR.
|
||||||
|
*/
|
||||||
|
int dir_iterator_abort(struct dir_iterator *iterator);
|
||||||
|
|
||||||
|
#endif
|
81
iterator.h
Normal file
81
iterator.h
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
#ifndef ITERATOR_H
|
||||||
|
#define ITERATOR_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generic constants related to iterators.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The attempt to advance the iterator was successful; the iterator
|
||||||
|
* reflects the new current entry.
|
||||||
|
*/
|
||||||
|
#define ITER_OK 0
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The iterator is exhausted and has been freed.
|
||||||
|
*/
|
||||||
|
#define ITER_DONE -1
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The iterator experienced an error. The iteration has been aborted
|
||||||
|
* and the iterator has been freed.
|
||||||
|
*/
|
||||||
|
#define ITER_ERROR -2
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return values for selector functions for merge iterators. The
|
||||||
|
* numerical values of these constants are important and must be
|
||||||
|
* compatible with ITER_DONE and ITER_ERROR.
|
||||||
|
*/
|
||||||
|
enum iterator_selection {
|
||||||
|
/* End the iteration without an error: */
|
||||||
|
ITER_SELECT_DONE = ITER_DONE,
|
||||||
|
|
||||||
|
/* Report an error and abort the iteration: */
|
||||||
|
ITER_SELECT_ERROR = ITER_ERROR,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The next group of constants are masks that are useful
|
||||||
|
* mainly internally.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* The LSB selects whether iter0/iter1 is the "current" iterator: */
|
||||||
|
ITER_CURRENT_SELECTION_MASK = 0x01,
|
||||||
|
|
||||||
|
/* iter0 is the "current" iterator this round: */
|
||||||
|
ITER_CURRENT_SELECTION_0 = 0x00,
|
||||||
|
|
||||||
|
/* iter1 is the "current" iterator this round: */
|
||||||
|
ITER_CURRENT_SELECTION_1 = 0x01,
|
||||||
|
|
||||||
|
/* Yield the value from the current iterator? */
|
||||||
|
ITER_YIELD_CURRENT = 0x02,
|
||||||
|
|
||||||
|
/* Discard the value from the secondary iterator? */
|
||||||
|
ITER_SKIP_SECONDARY = 0x04,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The constants that a selector function should usually
|
||||||
|
* return.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Yield the value from iter0: */
|
||||||
|
ITER_SELECT_0 = ITER_CURRENT_SELECTION_0 | ITER_YIELD_CURRENT,
|
||||||
|
|
||||||
|
/* Yield the value from iter0 and discard the one from iter1: */
|
||||||
|
ITER_SELECT_0_SKIP_1 = ITER_SELECT_0 | ITER_SKIP_SECONDARY,
|
||||||
|
|
||||||
|
/* Discard the value from iter0 without yielding anything this round: */
|
||||||
|
ITER_SKIP_0 = ITER_CURRENT_SELECTION_1 | ITER_SKIP_SECONDARY,
|
||||||
|
|
||||||
|
/* Yield the value from iter1: */
|
||||||
|
ITER_SELECT_1 = ITER_CURRENT_SELECTION_1 | ITER_YIELD_CURRENT,
|
||||||
|
|
||||||
|
/* Yield the value from iter1 and discard the one from iter0: */
|
||||||
|
ITER_SELECT_1_SKIP_0 = ITER_SELECT_1 | ITER_SKIP_SECONDARY,
|
||||||
|
|
||||||
|
/* Discard the value from iter1 without yielding anything this round: */
|
||||||
|
ITER_SKIP_1 = ITER_CURRENT_SELECTION_0 | ITER_SKIP_SECONDARY
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* ITERATOR_H */
|
20
refs.c
20
refs.c
@ -1120,6 +1120,26 @@ int head_ref(each_ref_fn fn, void *cb_data)
|
|||||||
return head_ref_submodule(NULL, fn, cb_data);
|
return head_ref_submodule(NULL, fn, cb_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Call fn for each reference in the specified submodule for which the
|
||||||
|
* refname begins with prefix. If trim is non-zero, then trim that
|
||||||
|
* many characters off the beginning of each refname before passing
|
||||||
|
* the refname to fn. flags can be DO_FOR_EACH_INCLUDE_BROKEN to
|
||||||
|
* include broken references in the iteration. If fn ever returns a
|
||||||
|
* non-zero value, stop the iteration and return that value;
|
||||||
|
* otherwise, return 0.
|
||||||
|
*/
|
||||||
|
static int do_for_each_ref(const char *submodule, const char *prefix,
|
||||||
|
each_ref_fn fn, int trim, int flags, void *cb_data)
|
||||||
|
{
|
||||||
|
struct ref_iterator *iter;
|
||||||
|
|
||||||
|
iter = files_ref_iterator_begin(submodule, prefix, flags);
|
||||||
|
iter = prefix_ref_iterator_begin(iter, prefix, trim);
|
||||||
|
|
||||||
|
return do_for_each_ref_iterator(iter, fn, cb_data);
|
||||||
|
}
|
||||||
|
|
||||||
int for_each_ref(each_ref_fn fn, void *cb_data)
|
int for_each_ref(each_ref_fn fn, void *cb_data)
|
||||||
{
|
{
|
||||||
return do_for_each_ref(NULL, "", fn, 0, 0, cb_data);
|
return do_for_each_ref(NULL, "", fn, 0, 0, cb_data);
|
||||||
|
139
refs.h
139
refs.h
@ -52,19 +52,19 @@
|
|||||||
#define RESOLVE_REF_NO_RECURSE 0x02
|
#define RESOLVE_REF_NO_RECURSE 0x02
|
||||||
#define RESOLVE_REF_ALLOW_BAD_NAME 0x04
|
#define RESOLVE_REF_ALLOW_BAD_NAME 0x04
|
||||||
|
|
||||||
extern const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
|
const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
|
||||||
unsigned char *sha1, int *flags);
|
unsigned char *sha1, int *flags);
|
||||||
|
|
||||||
extern char *resolve_refdup(const char *refname, int resolve_flags,
|
char *resolve_refdup(const char *refname, int resolve_flags,
|
||||||
unsigned char *sha1, int *flags);
|
unsigned char *sha1, int *flags);
|
||||||
|
|
||||||
extern int read_ref_full(const char *refname, int resolve_flags,
|
int read_ref_full(const char *refname, int resolve_flags,
|
||||||
unsigned char *sha1, int *flags);
|
unsigned char *sha1, int *flags);
|
||||||
extern int read_ref(const char *refname, unsigned char *sha1);
|
int read_ref(const char *refname, unsigned char *sha1);
|
||||||
|
|
||||||
extern int ref_exists(const char *refname);
|
int ref_exists(const char *refname);
|
||||||
|
|
||||||
extern int is_branch(const char *refname);
|
int is_branch(const char *refname);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If refname is a non-symbolic reference that refers to a tag object,
|
* If refname is a non-symbolic reference that refers to a tag object,
|
||||||
@ -74,24 +74,25 @@ extern int is_branch(const char *refname);
|
|||||||
* Symbolic references are considered unpeelable, even if they
|
* Symbolic references are considered unpeelable, even if they
|
||||||
* ultimately resolve to a peelable tag.
|
* ultimately resolve to a peelable tag.
|
||||||
*/
|
*/
|
||||||
extern int peel_ref(const char *refname, unsigned char *sha1);
|
int peel_ref(const char *refname, unsigned char *sha1);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resolve refname in the nested "gitlink" repository that is located
|
* Resolve refname in the nested "gitlink" repository that is located
|
||||||
* at path. If the resolution is successful, return 0 and set sha1 to
|
* at path. If the resolution is successful, return 0 and set sha1 to
|
||||||
* the name of the object; otherwise, return a non-zero value.
|
* the name of the object; otherwise, return a non-zero value.
|
||||||
*/
|
*/
|
||||||
extern int resolve_gitlink_ref(const char *path, const char *refname, unsigned char *sha1);
|
int resolve_gitlink_ref(const char *path, const char *refname,
|
||||||
|
unsigned char *sha1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return true iff abbrev_name is a possible abbreviation for
|
* Return true iff abbrev_name is a possible abbreviation for
|
||||||
* full_name according to the rules defined by ref_rev_parse_rules in
|
* full_name according to the rules defined by ref_rev_parse_rules in
|
||||||
* refs.c.
|
* refs.c.
|
||||||
*/
|
*/
|
||||||
extern int refname_match(const char *abbrev_name, const char *full_name);
|
int refname_match(const char *abbrev_name, const char *full_name);
|
||||||
|
|
||||||
extern int dwim_ref(const char *str, int len, unsigned char *sha1, char **ref);
|
int dwim_ref(const char *str, int len, unsigned char *sha1, char **ref);
|
||||||
extern int dwim_log(const char *str, int len, unsigned char *sha1, char **ref);
|
int dwim_log(const char *str, int len, unsigned char *sha1, char **ref);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A ref_transaction represents a collection of ref updates
|
* A ref_transaction represents a collection of ref updates
|
||||||
@ -140,7 +141,9 @@ extern int dwim_log(const char *str, int len, unsigned char *sha1, char **ref);
|
|||||||
struct ref_transaction;
|
struct ref_transaction;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bit values set in the flags argument passed to each_ref_fn():
|
* Bit values set in the flags argument passed to each_ref_fn() and
|
||||||
|
* stored in ref_iterator::flags. Other bits are for internal use
|
||||||
|
* only:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Reference is a symbolic reference. */
|
/* Reference is a symbolic reference. */
|
||||||
@ -182,38 +185,45 @@ typedef int each_ref_fn(const char *refname,
|
|||||||
* modifies the reference also returns a nonzero value to immediately
|
* modifies the reference also returns a nonzero value to immediately
|
||||||
* stop the iteration.
|
* stop the iteration.
|
||||||
*/
|
*/
|
||||||
extern int head_ref(each_ref_fn fn, void *cb_data);
|
int head_ref(each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_ref(each_ref_fn fn, void *cb_data);
|
int for_each_ref(each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_ref_in(const char *prefix, each_ref_fn fn, void *cb_data);
|
int for_each_ref_in(const char *prefix, each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data, unsigned int broken);
|
int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data,
|
||||||
extern int for_each_tag_ref(each_ref_fn fn, void *cb_data);
|
unsigned int broken);
|
||||||
extern int for_each_branch_ref(each_ref_fn fn, void *cb_data);
|
int for_each_tag_ref(each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_remote_ref(each_ref_fn fn, void *cb_data);
|
int for_each_branch_ref(each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_replace_ref(each_ref_fn fn, void *cb_data);
|
int for_each_remote_ref(each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_glob_ref(each_ref_fn fn, const char *pattern, void *cb_data);
|
int for_each_replace_ref(each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_glob_ref_in(each_ref_fn fn, const char *pattern, const char *prefix, void *cb_data);
|
int for_each_glob_ref(each_ref_fn fn, const char *pattern, void *cb_data);
|
||||||
|
int for_each_glob_ref_in(each_ref_fn fn, const char *pattern,
|
||||||
|
const char *prefix, void *cb_data);
|
||||||
|
|
||||||
extern int head_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data);
|
int head_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data);
|
int for_each_ref_submodule(const char *submodule,
|
||||||
extern int for_each_ref_in_submodule(const char *submodule, const char *prefix,
|
each_ref_fn fn, void *cb_data);
|
||||||
|
int for_each_ref_in_submodule(const char *submodule, const char *prefix,
|
||||||
each_ref_fn fn, void *cb_data);
|
each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_tag_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data);
|
int for_each_tag_ref_submodule(const char *submodule,
|
||||||
extern int for_each_branch_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data);
|
each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_remote_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data);
|
int for_each_branch_ref_submodule(const char *submodule,
|
||||||
|
each_ref_fn fn, void *cb_data);
|
||||||
|
int for_each_remote_ref_submodule(const char *submodule,
|
||||||
|
each_ref_fn fn, void *cb_data);
|
||||||
|
|
||||||
extern int head_ref_namespaced(each_ref_fn fn, void *cb_data);
|
int head_ref_namespaced(each_ref_fn fn, void *cb_data);
|
||||||
extern int for_each_namespaced_ref(each_ref_fn fn, void *cb_data);
|
int for_each_namespaced_ref(each_ref_fn fn, void *cb_data);
|
||||||
|
|
||||||
/* can be used to learn about broken ref and symref */
|
/* can be used to learn about broken ref and symref */
|
||||||
extern int for_each_rawref(each_ref_fn fn, void *cb_data);
|
int for_each_rawref(each_ref_fn fn, void *cb_data);
|
||||||
|
|
||||||
static inline const char *has_glob_specials(const char *pattern)
|
static inline const char *has_glob_specials(const char *pattern)
|
||||||
{
|
{
|
||||||
return strpbrk(pattern, "?*[");
|
return strpbrk(pattern, "?*[");
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void warn_dangling_symref(FILE *fp, const char *msg_fmt, const char *refname);
|
void warn_dangling_symref(FILE *fp, const char *msg_fmt, const char *refname);
|
||||||
extern void warn_dangling_symrefs(FILE *fp, const char *msg_fmt, const struct string_list *refnames);
|
void warn_dangling_symrefs(FILE *fp, const char *msg_fmt,
|
||||||
|
const struct string_list *refnames);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flags for controlling behaviour of pack_refs()
|
* Flags for controlling behaviour of pack_refs()
|
||||||
@ -245,13 +255,13 @@ int pack_refs(unsigned int flags);
|
|||||||
int safe_create_reflog(const char *refname, int force_create, struct strbuf *err);
|
int safe_create_reflog(const char *refname, int force_create, struct strbuf *err);
|
||||||
|
|
||||||
/** Reads log for the value of ref during at_time. **/
|
/** Reads log for the value of ref during at_time. **/
|
||||||
extern int read_ref_at(const char *refname, unsigned int flags,
|
int read_ref_at(const char *refname, unsigned int flags,
|
||||||
unsigned long at_time, int cnt,
|
unsigned long at_time, int cnt,
|
||||||
unsigned char *sha1, char **msg,
|
unsigned char *sha1, char **msg,
|
||||||
unsigned long *cutoff_time, int *cutoff_tz, int *cutoff_cnt);
|
unsigned long *cutoff_time, int *cutoff_tz, int *cutoff_cnt);
|
||||||
|
|
||||||
/** Check if a particular reflog exists */
|
/** Check if a particular reflog exists */
|
||||||
extern int reflog_exists(const char *refname);
|
int reflog_exists(const char *refname);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Delete the specified reference. If old_sha1 is non-NULL, then
|
* Delete the specified reference. If old_sha1 is non-NULL, then
|
||||||
@ -260,21 +270,26 @@ extern int reflog_exists(const char *refname);
|
|||||||
* exists, regardless of its old value. It is an error for old_sha1 to
|
* exists, regardless of its old value. It is an error for old_sha1 to
|
||||||
* be NULL_SHA1. flags is passed through to ref_transaction_delete().
|
* be NULL_SHA1. flags is passed through to ref_transaction_delete().
|
||||||
*/
|
*/
|
||||||
extern int delete_ref(const char *refname, const unsigned char *old_sha1,
|
int delete_ref(const char *refname, const unsigned char *old_sha1,
|
||||||
unsigned int flags);
|
unsigned int flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Delete the specified references. If there are any problems, emit
|
* Delete the specified references. If there are any problems, emit
|
||||||
* errors but attempt to keep going (i.e., the deletes are not done in
|
* errors but attempt to keep going (i.e., the deletes are not done in
|
||||||
* an all-or-nothing transaction).
|
* an all-or-nothing transaction). flags is passed through to
|
||||||
|
* ref_transaction_delete().
|
||||||
*/
|
*/
|
||||||
extern int delete_refs(struct string_list *refnames);
|
int delete_refs(struct string_list *refnames, unsigned int flags);
|
||||||
|
|
||||||
/** Delete a reflog */
|
/** Delete a reflog */
|
||||||
extern int delete_reflog(const char *refname);
|
int delete_reflog(const char *refname);
|
||||||
|
|
||||||
/* iterate over reflog entries */
|
/* iterate over reflog entries */
|
||||||
typedef int each_reflog_ent_fn(unsigned char *osha1, unsigned char *nsha1, const char *, unsigned long, int, const char *, void *);
|
typedef int each_reflog_ent_fn(
|
||||||
|
unsigned char *old_sha1, unsigned char *new_sha1,
|
||||||
|
const char *committer, unsigned long timestamp,
|
||||||
|
int tz, const char *msg, void *cb_data);
|
||||||
|
|
||||||
int for_each_reflog_ent(const char *refname, each_reflog_ent_fn fn, void *cb_data);
|
int for_each_reflog_ent(const char *refname, each_reflog_ent_fn fn, void *cb_data);
|
||||||
int for_each_reflog_ent_reverse(const char *refname, each_reflog_ent_fn fn, void *cb_data);
|
int for_each_reflog_ent_reverse(const char *refname, each_reflog_ent_fn fn, void *cb_data);
|
||||||
|
|
||||||
@ -282,7 +297,7 @@ int for_each_reflog_ent_reverse(const char *refname, each_reflog_ent_fn fn, void
|
|||||||
* Calls the specified function for each reflog file until it returns nonzero,
|
* Calls the specified function for each reflog file until it returns nonzero,
|
||||||
* and returns the value
|
* and returns the value
|
||||||
*/
|
*/
|
||||||
extern int for_each_reflog(each_ref_fn, void *);
|
int for_each_reflog(each_ref_fn fn, void *cb_data);
|
||||||
|
|
||||||
#define REFNAME_ALLOW_ONELEVEL 1
|
#define REFNAME_ALLOW_ONELEVEL 1
|
||||||
#define REFNAME_REFSPEC_PATTERN 2
|
#define REFNAME_REFSPEC_PATTERN 2
|
||||||
@ -295,16 +310,16 @@ extern int for_each_reflog(each_ref_fn, void *);
|
|||||||
* allow a single "*" wildcard character in the refspec. No leading or
|
* allow a single "*" wildcard character in the refspec. No leading or
|
||||||
* repeated slashes are accepted.
|
* repeated slashes are accepted.
|
||||||
*/
|
*/
|
||||||
extern int check_refname_format(const char *refname, int flags);
|
int check_refname_format(const char *refname, int flags);
|
||||||
|
|
||||||
extern const char *prettify_refname(const char *refname);
|
const char *prettify_refname(const char *refname);
|
||||||
|
|
||||||
extern char *shorten_unambiguous_ref(const char *refname, int strict);
|
char *shorten_unambiguous_ref(const char *refname, int strict);
|
||||||
|
|
||||||
/** rename ref, return 0 on success **/
|
/** rename ref, return 0 on success **/
|
||||||
extern int rename_ref(const char *oldref, const char *newref, const char *logmsg);
|
int rename_ref(const char *oldref, const char *newref, const char *logmsg);
|
||||||
|
|
||||||
extern int create_symref(const char *refname, const char *target, const char *logmsg);
|
int create_symref(const char *refname, const char *target, const char *logmsg);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update HEAD of the specified gitdir.
|
* Update HEAD of the specified gitdir.
|
||||||
@ -313,7 +328,7 @@ extern int create_symref(const char *refname, const char *target, const char *lo
|
|||||||
* $GIT_DIR points to.
|
* $GIT_DIR points to.
|
||||||
* Return 0 if successful, non-zero otherwise.
|
* Return 0 if successful, non-zero otherwise.
|
||||||
* */
|
* */
|
||||||
extern int set_worktree_head_symref(const char *gitdir, const char *target);
|
int set_worktree_head_symref(const char *gitdir, const char *target);
|
||||||
|
|
||||||
enum action_on_err {
|
enum action_on_err {
|
||||||
UPDATE_REFS_MSG_ON_ERR,
|
UPDATE_REFS_MSG_ON_ERR,
|
||||||
@ -463,7 +478,7 @@ int update_ref(const char *msg, const char *refname,
|
|||||||
const unsigned char *new_sha1, const unsigned char *old_sha1,
|
const unsigned char *new_sha1, const unsigned char *old_sha1,
|
||||||
unsigned int flags, enum action_on_err onerr);
|
unsigned int flags, enum action_on_err onerr);
|
||||||
|
|
||||||
extern int parse_hide_refs_config(const char *var, const char *value, const char *);
|
int parse_hide_refs_config(const char *var, const char *value, const char *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether a ref is hidden. If no namespace is set, both the first and
|
* Check whether a ref is hidden. If no namespace is set, both the first and
|
||||||
@ -473,7 +488,7 @@ extern int parse_hide_refs_config(const char *var, const char *value, const char
|
|||||||
* the ref is outside that namespace, the first parameter is NULL. The second
|
* the ref is outside that namespace, the first parameter is NULL. The second
|
||||||
* parameter always points to the full ref name.
|
* parameter always points to the full ref name.
|
||||||
*/
|
*/
|
||||||
extern int ref_is_hidden(const char *, const char *);
|
int ref_is_hidden(const char *, const char *);
|
||||||
|
|
||||||
enum ref_type {
|
enum ref_type {
|
||||||
REF_TYPE_PER_WORKTREE,
|
REF_TYPE_PER_WORKTREE,
|
||||||
@ -522,11 +537,11 @@ typedef void reflog_expiry_cleanup_fn(void *cb_data);
|
|||||||
* enum expire_reflog_flags. The three function pointers are described
|
* enum expire_reflog_flags. The three function pointers are described
|
||||||
* above. On success, return zero.
|
* above. On success, return zero.
|
||||||
*/
|
*/
|
||||||
extern int reflog_expire(const char *refname, const unsigned char *sha1,
|
int reflog_expire(const char *refname, const unsigned char *sha1,
|
||||||
unsigned int flags,
|
unsigned int flags,
|
||||||
reflog_expiry_prepare_fn prepare_fn,
|
reflog_expiry_prepare_fn prepare_fn,
|
||||||
reflog_expiry_should_prune_fn should_prune_fn,
|
reflog_expiry_should_prune_fn should_prune_fn,
|
||||||
reflog_expiry_cleanup_fn cleanup_fn,
|
reflog_expiry_cleanup_fn cleanup_fn,
|
||||||
void *policy_cb_data);
|
void *policy_cb_data);
|
||||||
|
|
||||||
#endif /* REFS_H */
|
#endif /* REFS_H */
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#include "../cache.h"
|
#include "../cache.h"
|
||||||
#include "../refs.h"
|
#include "../refs.h"
|
||||||
#include "refs-internal.h"
|
#include "refs-internal.h"
|
||||||
|
#include "../iterator.h"
|
||||||
|
#include "../dir-iterator.h"
|
||||||
#include "../lockfile.h"
|
#include "../lockfile.h"
|
||||||
#include "../object.h"
|
#include "../object.h"
|
||||||
#include "../dir.h"
|
#include "../dir.h"
|
||||||
@ -513,68 +515,36 @@ static void sort_ref_dir(struct ref_dir *dir)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return true iff the reference described by entry can be resolved to
|
* Return true if refname, which has the specified oid and flags, can
|
||||||
* an object in the database. Emit a warning if the referred-to
|
* be resolved to an object in the database. If the referred-to object
|
||||||
* object does not exist.
|
* does not exist, emit a warning and return false.
|
||||||
*/
|
*/
|
||||||
static int ref_resolves_to_object(struct ref_entry *entry)
|
static int ref_resolves_to_object(const char *refname,
|
||||||
|
const struct object_id *oid,
|
||||||
|
unsigned int flags)
|
||||||
{
|
{
|
||||||
if (entry->flag & REF_ISBROKEN)
|
if (flags & REF_ISBROKEN)
|
||||||
return 0;
|
return 0;
|
||||||
if (!has_sha1_file(entry->u.value.oid.hash)) {
|
if (!has_sha1_file(oid->hash)) {
|
||||||
error("%s does not point to a valid object!", entry->name);
|
error("%s does not point to a valid object!", refname);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* current_ref is a performance hack: when iterating over references
|
* Return true if the reference described by entry can be resolved to
|
||||||
* using the for_each_ref*() functions, current_ref is set to the
|
* an object in the database; otherwise, emit a warning and return
|
||||||
* current reference's entry before calling the callback function. If
|
* false.
|
||||||
* the callback function calls peel_ref(), then peel_ref() first
|
|
||||||
* checks whether the reference to be peeled is the current reference
|
|
||||||
* (it usually is) and if so, returns that reference's peeled version
|
|
||||||
* if it is available. This avoids a refname lookup in a common case.
|
|
||||||
*/
|
*/
|
||||||
static struct ref_entry *current_ref;
|
static int entry_resolves_to_object(struct ref_entry *entry)
|
||||||
|
{
|
||||||
|
return ref_resolves_to_object(entry->name,
|
||||||
|
&entry->u.value.oid, entry->flag);
|
||||||
|
}
|
||||||
|
|
||||||
typedef int each_ref_entry_fn(struct ref_entry *entry, void *cb_data);
|
typedef int each_ref_entry_fn(struct ref_entry *entry, void *cb_data);
|
||||||
|
|
||||||
struct ref_entry_cb {
|
|
||||||
const char *base;
|
|
||||||
int trim;
|
|
||||||
int flags;
|
|
||||||
each_ref_fn *fn;
|
|
||||||
void *cb_data;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Handle one reference in a do_for_each_ref*()-style iteration,
|
|
||||||
* calling an each_ref_fn for each entry.
|
|
||||||
*/
|
|
||||||
static int do_one_ref(struct ref_entry *entry, void *cb_data)
|
|
||||||
{
|
|
||||||
struct ref_entry_cb *data = cb_data;
|
|
||||||
struct ref_entry *old_current_ref;
|
|
||||||
int retval;
|
|
||||||
|
|
||||||
if (!starts_with(entry->name, data->base))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!(data->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
|
|
||||||
!ref_resolves_to_object(entry))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Store the old value, in case this is a recursive call: */
|
|
||||||
old_current_ref = current_ref;
|
|
||||||
current_ref = entry;
|
|
||||||
retval = data->fn(entry->name + data->trim, &entry->u.value.oid,
|
|
||||||
entry->flag, data->cb_data);
|
|
||||||
current_ref = old_current_ref;
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call fn for each reference in dir that has index in the range
|
* Call fn for each reference in dir that has index in the range
|
||||||
* offset <= index < dir->nr. Recurse into subdirectories that are in
|
* offset <= index < dir->nr. Recurse into subdirectories that are in
|
||||||
@ -603,78 +573,6 @@ static int do_for_each_entry_in_dir(struct ref_dir *dir, int offset,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Call fn for each reference in the union of dir1 and dir2, in order
|
|
||||||
* by refname. Recurse into subdirectories. If a value entry appears
|
|
||||||
* in both dir1 and dir2, then only process the version that is in
|
|
||||||
* dir2. The input dirs must already be sorted, but subdirs will be
|
|
||||||
* sorted as needed. fn is called for all references, including
|
|
||||||
* broken ones.
|
|
||||||
*/
|
|
||||||
static int do_for_each_entry_in_dirs(struct ref_dir *dir1,
|
|
||||||
struct ref_dir *dir2,
|
|
||||||
each_ref_entry_fn fn, void *cb_data)
|
|
||||||
{
|
|
||||||
int retval;
|
|
||||||
int i1 = 0, i2 = 0;
|
|
||||||
|
|
||||||
assert(dir1->sorted == dir1->nr);
|
|
||||||
assert(dir2->sorted == dir2->nr);
|
|
||||||
while (1) {
|
|
||||||
struct ref_entry *e1, *e2;
|
|
||||||
int cmp;
|
|
||||||
if (i1 == dir1->nr) {
|
|
||||||
return do_for_each_entry_in_dir(dir2, i2, fn, cb_data);
|
|
||||||
}
|
|
||||||
if (i2 == dir2->nr) {
|
|
||||||
return do_for_each_entry_in_dir(dir1, i1, fn, cb_data);
|
|
||||||
}
|
|
||||||
e1 = dir1->entries[i1];
|
|
||||||
e2 = dir2->entries[i2];
|
|
||||||
cmp = strcmp(e1->name, e2->name);
|
|
||||||
if (cmp == 0) {
|
|
||||||
if ((e1->flag & REF_DIR) && (e2->flag & REF_DIR)) {
|
|
||||||
/* Both are directories; descend them in parallel. */
|
|
||||||
struct ref_dir *subdir1 = get_ref_dir(e1);
|
|
||||||
struct ref_dir *subdir2 = get_ref_dir(e2);
|
|
||||||
sort_ref_dir(subdir1);
|
|
||||||
sort_ref_dir(subdir2);
|
|
||||||
retval = do_for_each_entry_in_dirs(
|
|
||||||
subdir1, subdir2, fn, cb_data);
|
|
||||||
i1++;
|
|
||||||
i2++;
|
|
||||||
} else if (!(e1->flag & REF_DIR) && !(e2->flag & REF_DIR)) {
|
|
||||||
/* Both are references; ignore the one from dir1. */
|
|
||||||
retval = fn(e2, cb_data);
|
|
||||||
i1++;
|
|
||||||
i2++;
|
|
||||||
} else {
|
|
||||||
die("conflict between reference and directory: %s",
|
|
||||||
e1->name);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
struct ref_entry *e;
|
|
||||||
if (cmp < 0) {
|
|
||||||
e = e1;
|
|
||||||
i1++;
|
|
||||||
} else {
|
|
||||||
e = e2;
|
|
||||||
i2++;
|
|
||||||
}
|
|
||||||
if (e->flag & REF_DIR) {
|
|
||||||
struct ref_dir *subdir = get_ref_dir(e);
|
|
||||||
sort_ref_dir(subdir);
|
|
||||||
retval = do_for_each_entry_in_dir(
|
|
||||||
subdir, 0, fn, cb_data);
|
|
||||||
} else {
|
|
||||||
retval = fn(e, cb_data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (retval)
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Load all of the refs from the dir into our in-memory cache. The hard work
|
* Load all of the refs from the dir into our in-memory cache. The hard work
|
||||||
* of loading loose refs is done by get_ref_dir(), so we just need to recurse
|
* of loading loose refs is done by get_ref_dir(), so we just need to recurse
|
||||||
@ -691,6 +589,153 @@ static void prime_ref_dir(struct ref_dir *dir)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A level in the reference hierarchy that is currently being iterated
|
||||||
|
* through.
|
||||||
|
*/
|
||||||
|
struct cache_ref_iterator_level {
|
||||||
|
/*
|
||||||
|
* The ref_dir being iterated over at this level. The ref_dir
|
||||||
|
* is sorted before being stored here.
|
||||||
|
*/
|
||||||
|
struct ref_dir *dir;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The index of the current entry within dir (which might
|
||||||
|
* itself be a directory). If index == -1, then the iteration
|
||||||
|
* hasn't yet begun. If index == dir->nr, then the iteration
|
||||||
|
* through this level is over.
|
||||||
|
*/
|
||||||
|
int index;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Represent an iteration through a ref_dir in the memory cache. The
|
||||||
|
* iteration recurses through subdirectories.
|
||||||
|
*/
|
||||||
|
struct cache_ref_iterator {
|
||||||
|
struct ref_iterator base;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The number of levels currently on the stack. This is always
|
||||||
|
* at least 1, because when it becomes zero the iteration is
|
||||||
|
* ended and this struct is freed.
|
||||||
|
*/
|
||||||
|
size_t levels_nr;
|
||||||
|
|
||||||
|
/* The number of levels that have been allocated on the stack */
|
||||||
|
size_t levels_alloc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A stack of levels. levels[0] is the uppermost level that is
|
||||||
|
* being iterated over in this iteration. (This is not
|
||||||
|
* necessary the top level in the references hierarchy. If we
|
||||||
|
* are iterating through a subtree, then levels[0] will hold
|
||||||
|
* the ref_dir for that subtree, and subsequent levels will go
|
||||||
|
* on from there.)
|
||||||
|
*/
|
||||||
|
struct cache_ref_iterator_level *levels;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
struct cache_ref_iterator *iter =
|
||||||
|
(struct cache_ref_iterator *)ref_iterator;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
struct cache_ref_iterator_level *level =
|
||||||
|
&iter->levels[iter->levels_nr - 1];
|
||||||
|
struct ref_dir *dir = level->dir;
|
||||||
|
struct ref_entry *entry;
|
||||||
|
|
||||||
|
if (level->index == -1)
|
||||||
|
sort_ref_dir(dir);
|
||||||
|
|
||||||
|
if (++level->index == level->dir->nr) {
|
||||||
|
/* This level is exhausted; pop up a level */
|
||||||
|
if (--iter->levels_nr == 0)
|
||||||
|
return ref_iterator_abort(ref_iterator);
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = dir->entries[level->index];
|
||||||
|
|
||||||
|
if (entry->flag & REF_DIR) {
|
||||||
|
/* push down a level */
|
||||||
|
ALLOC_GROW(iter->levels, iter->levels_nr + 1,
|
||||||
|
iter->levels_alloc);
|
||||||
|
|
||||||
|
level = &iter->levels[iter->levels_nr++];
|
||||||
|
level->dir = get_ref_dir(entry);
|
||||||
|
level->index = -1;
|
||||||
|
} else {
|
||||||
|
iter->base.refname = entry->name;
|
||||||
|
iter->base.oid = &entry->u.value.oid;
|
||||||
|
iter->base.flags = entry->flag;
|
||||||
|
return ITER_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static enum peel_status peel_entry(struct ref_entry *entry, int repeel);
|
||||||
|
|
||||||
|
static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||||
|
struct object_id *peeled)
|
||||||
|
{
|
||||||
|
struct cache_ref_iterator *iter =
|
||||||
|
(struct cache_ref_iterator *)ref_iterator;
|
||||||
|
struct cache_ref_iterator_level *level;
|
||||||
|
struct ref_entry *entry;
|
||||||
|
|
||||||
|
level = &iter->levels[iter->levels_nr - 1];
|
||||||
|
|
||||||
|
if (level->index == -1)
|
||||||
|
die("BUG: peel called before advance for cache iterator");
|
||||||
|
|
||||||
|
entry = level->dir->entries[level->index];
|
||||||
|
|
||||||
|
if (peel_entry(entry, 0))
|
||||||
|
return -1;
|
||||||
|
hashcpy(peeled->hash, entry->u.value.peeled.hash);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
struct cache_ref_iterator *iter =
|
||||||
|
(struct cache_ref_iterator *)ref_iterator;
|
||||||
|
|
||||||
|
free(iter->levels);
|
||||||
|
base_ref_iterator_free(ref_iterator);
|
||||||
|
return ITER_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ref_iterator_vtable cache_ref_iterator_vtable = {
|
||||||
|
cache_ref_iterator_advance,
|
||||||
|
cache_ref_iterator_peel,
|
||||||
|
cache_ref_iterator_abort
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct ref_iterator *cache_ref_iterator_begin(struct ref_dir *dir)
|
||||||
|
{
|
||||||
|
struct cache_ref_iterator *iter;
|
||||||
|
struct ref_iterator *ref_iterator;
|
||||||
|
struct cache_ref_iterator_level *level;
|
||||||
|
|
||||||
|
iter = xcalloc(1, sizeof(*iter));
|
||||||
|
ref_iterator = &iter->base;
|
||||||
|
base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable);
|
||||||
|
ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
|
||||||
|
|
||||||
|
iter->levels_nr = 1;
|
||||||
|
level = &iter->levels[0];
|
||||||
|
level->index = -1;
|
||||||
|
level->dir = dir;
|
||||||
|
|
||||||
|
return ref_iterator;
|
||||||
|
}
|
||||||
|
|
||||||
struct nonmatching_ref_data {
|
struct nonmatching_ref_data {
|
||||||
const struct string_list *skip;
|
const struct string_list *skip;
|
||||||
const char *conflicting_refname;
|
const char *conflicting_refname;
|
||||||
@ -954,15 +999,26 @@ static struct ref_cache *lookup_ref_cache(const char *submodule)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Return a pointer to a ref_cache for the specified submodule. For
|
* Return a pointer to a ref_cache for the specified submodule. For
|
||||||
* the main repository, use submodule==NULL. The returned structure
|
* the main repository, use submodule==NULL; such a call cannot fail.
|
||||||
* will be allocated and initialized but not necessarily populated; it
|
* For a submodule, the submodule must exist and be a nonbare
|
||||||
* should not be freed.
|
* repository, otherwise return NULL.
|
||||||
|
*
|
||||||
|
* The returned structure will be allocated and initialized but not
|
||||||
|
* necessarily populated; it should not be freed.
|
||||||
*/
|
*/
|
||||||
static struct ref_cache *get_ref_cache(const char *submodule)
|
static struct ref_cache *get_ref_cache(const char *submodule)
|
||||||
{
|
{
|
||||||
struct ref_cache *refs = lookup_ref_cache(submodule);
|
struct ref_cache *refs = lookup_ref_cache(submodule);
|
||||||
if (!refs)
|
|
||||||
refs = create_ref_cache(submodule);
|
if (!refs) {
|
||||||
|
struct strbuf submodule_sb = STRBUF_INIT;
|
||||||
|
|
||||||
|
strbuf_addstr(&submodule_sb, submodule);
|
||||||
|
if (is_nonbare_repository_dir(&submodule_sb))
|
||||||
|
refs = create_ref_cache(submodule);
|
||||||
|
strbuf_release(&submodule_sb);
|
||||||
|
}
|
||||||
|
|
||||||
return refs;
|
return refs;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1341,13 +1397,10 @@ int resolve_gitlink_ref(const char *path, const char *refname, unsigned char *sh
|
|||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
strbuf_add(&submodule, path, len);
|
strbuf_add(&submodule, path, len);
|
||||||
refs = lookup_ref_cache(submodule.buf);
|
refs = get_ref_cache(submodule.buf);
|
||||||
if (!refs) {
|
if (!refs) {
|
||||||
if (!is_nonbare_repository_dir(&submodule)) {
|
strbuf_release(&submodule);
|
||||||
strbuf_release(&submodule);
|
return -1;
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
refs = create_ref_cache(submodule.buf);
|
|
||||||
}
|
}
|
||||||
strbuf_release(&submodule);
|
strbuf_release(&submodule);
|
||||||
|
|
||||||
@ -1790,11 +1843,12 @@ int peel_ref(const char *refname, unsigned char *sha1)
|
|||||||
int flag;
|
int flag;
|
||||||
unsigned char base[20];
|
unsigned char base[20];
|
||||||
|
|
||||||
if (current_ref && (current_ref->name == refname
|
if (current_ref_iter && current_ref_iter->refname == refname) {
|
||||||
|| !strcmp(current_ref->name, refname))) {
|
struct object_id peeled;
|
||||||
if (peel_entry(current_ref, 0))
|
|
||||||
|
if (ref_iterator_peel(current_ref_iter, &peeled))
|
||||||
return -1;
|
return -1;
|
||||||
hashcpy(sha1, current_ref->u.value.peeled.hash);
|
hashcpy(sha1, peeled.hash);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1822,90 +1876,137 @@ int peel_ref(const char *refname, unsigned char *sha1)
|
|||||||
return peel_object(base, sha1);
|
return peel_object(base, sha1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
struct files_ref_iterator {
|
||||||
* Call fn for each reference in the specified ref_cache, omitting
|
struct ref_iterator base;
|
||||||
* references not in the containing_dir of base. fn is called for all
|
|
||||||
* references, including broken ones. If fn ever returns a non-zero
|
|
||||||
* value, stop the iteration and return that value; otherwise, return
|
|
||||||
* 0.
|
|
||||||
*/
|
|
||||||
static int do_for_each_entry(struct ref_cache *refs, const char *base,
|
|
||||||
each_ref_entry_fn fn, void *cb_data)
|
|
||||||
{
|
|
||||||
struct packed_ref_cache *packed_ref_cache;
|
struct packed_ref_cache *packed_ref_cache;
|
||||||
struct ref_dir *loose_dir;
|
struct ref_iterator *iter0;
|
||||||
struct ref_dir *packed_dir;
|
unsigned int flags;
|
||||||
int retval = 0;
|
};
|
||||||
|
|
||||||
/*
|
static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||||
* We must make sure that all loose refs are read before accessing the
|
{
|
||||||
* packed-refs file; this avoids a race condition in which loose refs
|
struct files_ref_iterator *iter =
|
||||||
* are migrated to the packed-refs file by a simultaneous process, but
|
(struct files_ref_iterator *)ref_iterator;
|
||||||
* our in-memory view is from before the migration. get_packed_ref_cache()
|
int ok;
|
||||||
* takes care of making sure our view is up to date with what is on
|
|
||||||
* disk.
|
|
||||||
*/
|
|
||||||
loose_dir = get_loose_refs(refs);
|
|
||||||
if (base && *base) {
|
|
||||||
loose_dir = find_containing_dir(loose_dir, base, 0);
|
|
||||||
}
|
|
||||||
if (loose_dir)
|
|
||||||
prime_ref_dir(loose_dir);
|
|
||||||
|
|
||||||
packed_ref_cache = get_packed_ref_cache(refs);
|
while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
|
||||||
acquire_packed_ref_cache(packed_ref_cache);
|
if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
|
||||||
packed_dir = get_packed_ref_dir(packed_ref_cache);
|
!ref_resolves_to_object(iter->iter0->refname,
|
||||||
if (base && *base) {
|
iter->iter0->oid,
|
||||||
packed_dir = find_containing_dir(packed_dir, base, 0);
|
iter->iter0->flags))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
iter->base.refname = iter->iter0->refname;
|
||||||
|
iter->base.oid = iter->iter0->oid;
|
||||||
|
iter->base.flags = iter->iter0->flags;
|
||||||
|
return ITER_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (packed_dir && loose_dir) {
|
iter->iter0 = NULL;
|
||||||
sort_ref_dir(packed_dir);
|
if (ref_iterator_abort(ref_iterator) != ITER_DONE)
|
||||||
sort_ref_dir(loose_dir);
|
ok = ITER_ERROR;
|
||||||
retval = do_for_each_entry_in_dirs(
|
|
||||||
packed_dir, loose_dir, fn, cb_data);
|
|
||||||
} else if (packed_dir) {
|
|
||||||
sort_ref_dir(packed_dir);
|
|
||||||
retval = do_for_each_entry_in_dir(
|
|
||||||
packed_dir, 0, fn, cb_data);
|
|
||||||
} else if (loose_dir) {
|
|
||||||
sort_ref_dir(loose_dir);
|
|
||||||
retval = do_for_each_entry_in_dir(
|
|
||||||
loose_dir, 0, fn, cb_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
release_packed_ref_cache(packed_ref_cache);
|
return ok;
|
||||||
return retval;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static int files_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||||
* Call fn for each reference in the specified ref_cache for which the
|
struct object_id *peeled)
|
||||||
* refname begins with base. If trim is non-zero, then trim that many
|
|
||||||
* characters off the beginning of each refname before passing the
|
|
||||||
* refname to fn. flags can be DO_FOR_EACH_INCLUDE_BROKEN to include
|
|
||||||
* broken references in the iteration. If fn ever returns a non-zero
|
|
||||||
* value, stop the iteration and return that value; otherwise, return
|
|
||||||
* 0.
|
|
||||||
*/
|
|
||||||
int do_for_each_ref(const char *submodule, const char *base,
|
|
||||||
each_ref_fn fn, int trim, int flags, void *cb_data)
|
|
||||||
{
|
{
|
||||||
struct ref_entry_cb data;
|
struct files_ref_iterator *iter =
|
||||||
struct ref_cache *refs;
|
(struct files_ref_iterator *)ref_iterator;
|
||||||
|
|
||||||
refs = get_ref_cache(submodule);
|
return ref_iterator_peel(iter->iter0, peeled);
|
||||||
data.base = base;
|
}
|
||||||
data.trim = trim;
|
|
||||||
data.flags = flags;
|
static int files_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||||
data.fn = fn;
|
{
|
||||||
data.cb_data = cb_data;
|
struct files_ref_iterator *iter =
|
||||||
|
(struct files_ref_iterator *)ref_iterator;
|
||||||
|
int ok = ITER_DONE;
|
||||||
|
|
||||||
|
if (iter->iter0)
|
||||||
|
ok = ref_iterator_abort(iter->iter0);
|
||||||
|
|
||||||
|
release_packed_ref_cache(iter->packed_ref_cache);
|
||||||
|
base_ref_iterator_free(ref_iterator);
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ref_iterator_vtable files_ref_iterator_vtable = {
|
||||||
|
files_ref_iterator_advance,
|
||||||
|
files_ref_iterator_peel,
|
||||||
|
files_ref_iterator_abort
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ref_iterator *files_ref_iterator_begin(
|
||||||
|
const char *submodule,
|
||||||
|
const char *prefix, unsigned int flags)
|
||||||
|
{
|
||||||
|
struct ref_cache *refs = get_ref_cache(submodule);
|
||||||
|
struct ref_dir *loose_dir, *packed_dir;
|
||||||
|
struct ref_iterator *loose_iter, *packed_iter;
|
||||||
|
struct files_ref_iterator *iter;
|
||||||
|
struct ref_iterator *ref_iterator;
|
||||||
|
|
||||||
|
if (!refs)
|
||||||
|
return empty_ref_iterator_begin();
|
||||||
|
|
||||||
if (ref_paranoia < 0)
|
if (ref_paranoia < 0)
|
||||||
ref_paranoia = git_env_bool("GIT_REF_PARANOIA", 0);
|
ref_paranoia = git_env_bool("GIT_REF_PARANOIA", 0);
|
||||||
if (ref_paranoia)
|
if (ref_paranoia)
|
||||||
data.flags |= DO_FOR_EACH_INCLUDE_BROKEN;
|
flags |= DO_FOR_EACH_INCLUDE_BROKEN;
|
||||||
|
|
||||||
return do_for_each_entry(refs, base, do_one_ref, &data);
|
iter = xcalloc(1, sizeof(*iter));
|
||||||
|
ref_iterator = &iter->base;
|
||||||
|
base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must make sure that all loose refs are read before
|
||||||
|
* accessing the packed-refs file; this avoids a race
|
||||||
|
* condition if loose refs are migrated to the packed-refs
|
||||||
|
* file by a simultaneous process, but our in-memory view is
|
||||||
|
* from before the migration. We ensure this as follows:
|
||||||
|
* First, we call prime_ref_dir(), which pre-reads the loose
|
||||||
|
* references for the subtree into the cache. (If they've
|
||||||
|
* already been read, that's OK; we only need to guarantee
|
||||||
|
* that they're read before the packed refs, not *how much*
|
||||||
|
* before.) After that, we call get_packed_ref_cache(), which
|
||||||
|
* internally checks whether the packed-ref cache is up to
|
||||||
|
* date with what is on disk, and re-reads it if not.
|
||||||
|
*/
|
||||||
|
|
||||||
|
loose_dir = get_loose_refs(refs);
|
||||||
|
|
||||||
|
if (prefix && *prefix)
|
||||||
|
loose_dir = find_containing_dir(loose_dir, prefix, 0);
|
||||||
|
|
||||||
|
if (loose_dir) {
|
||||||
|
prime_ref_dir(loose_dir);
|
||||||
|
loose_iter = cache_ref_iterator_begin(loose_dir);
|
||||||
|
} else {
|
||||||
|
/* There's nothing to iterate over. */
|
||||||
|
loose_iter = empty_ref_iterator_begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
iter->packed_ref_cache = get_packed_ref_cache(refs);
|
||||||
|
acquire_packed_ref_cache(iter->packed_ref_cache);
|
||||||
|
packed_dir = get_packed_ref_dir(iter->packed_ref_cache);
|
||||||
|
|
||||||
|
if (prefix && *prefix)
|
||||||
|
packed_dir = find_containing_dir(packed_dir, prefix, 0);
|
||||||
|
|
||||||
|
if (packed_dir) {
|
||||||
|
packed_iter = cache_ref_iterator_begin(packed_dir);
|
||||||
|
} else {
|
||||||
|
/* There's nothing to iterate over. */
|
||||||
|
packed_iter = empty_ref_iterator_begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
|
||||||
|
iter->flags = flags;
|
||||||
|
|
||||||
|
return ref_iterator;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2226,7 +2327,7 @@ static int pack_if_possible_fn(struct ref_entry *entry, void *cb_data)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Do not pack symbolic or broken refs: */
|
/* Do not pack symbolic or broken refs: */
|
||||||
if ((entry->flag & REF_ISSYMREF) || !ref_resolves_to_object(entry))
|
if ((entry->flag & REF_ISSYMREF) || !entry_resolves_to_object(entry))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Add a packed ref cache entry equivalent to the loose entry. */
|
/* Add a packed ref cache entry equivalent to the loose entry. */
|
||||||
@ -2412,7 +2513,7 @@ static int delete_ref_loose(struct ref_lock *lock, int flag, struct strbuf *err)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int delete_refs(struct string_list *refnames)
|
int delete_refs(struct string_list *refnames, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct strbuf err = STRBUF_INIT;
|
struct strbuf err = STRBUF_INIT;
|
||||||
int i, result = 0;
|
int i, result = 0;
|
||||||
@ -2441,7 +2542,7 @@ int delete_refs(struct string_list *refnames)
|
|||||||
for (i = 0; i < refnames->nr; i++) {
|
for (i = 0; i < refnames->nr; i++) {
|
||||||
const char *refname = refnames->items[i].string;
|
const char *refname = refnames->items[i].string;
|
||||||
|
|
||||||
if (delete_ref(refname, NULL, 0))
|
if (delete_ref(refname, NULL, flags))
|
||||||
result |= error(_("could not remove reference %s"), refname);
|
result |= error(_("could not remove reference %s"), refname);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3191,60 +3292,88 @@ int for_each_reflog_ent(const char *refname, each_reflog_ent_fn fn, void *cb_dat
|
|||||||
strbuf_release(&sb);
|
strbuf_release(&sb);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* Call fn for each reflog in the namespace indicated by name. name
|
struct files_reflog_iterator {
|
||||||
* must be empty or end with '/'. Name will be used as a scratch
|
struct ref_iterator base;
|
||||||
* space, but its contents will be restored before return.
|
|
||||||
*/
|
struct dir_iterator *dir_iterator;
|
||||||
static int do_for_each_reflog(struct strbuf *name, each_ref_fn fn, void *cb_data)
|
struct object_id oid;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
|
||||||
{
|
{
|
||||||
DIR *d = opendir(git_path("logs/%s", name->buf));
|
struct files_reflog_iterator *iter =
|
||||||
int retval = 0;
|
(struct files_reflog_iterator *)ref_iterator;
|
||||||
struct dirent *de;
|
struct dir_iterator *diter = iter->dir_iterator;
|
||||||
int oldlen = name->len;
|
int ok;
|
||||||
|
|
||||||
if (!d)
|
while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
|
||||||
return name->len ? errno : 0;
|
int flags;
|
||||||
|
|
||||||
while ((de = readdir(d)) != NULL) {
|
if (!S_ISREG(diter->st.st_mode))
|
||||||
struct stat st;
|
|
||||||
|
|
||||||
if (de->d_name[0] == '.')
|
|
||||||
continue;
|
continue;
|
||||||
if (ends_with(de->d_name, ".lock"))
|
if (diter->basename[0] == '.')
|
||||||
|
continue;
|
||||||
|
if (ends_with(diter->basename, ".lock"))
|
||||||
continue;
|
continue;
|
||||||
strbuf_addstr(name, de->d_name);
|
|
||||||
if (stat(git_path("logs/%s", name->buf), &st) < 0) {
|
|
||||||
; /* silently ignore */
|
|
||||||
} else {
|
|
||||||
if (S_ISDIR(st.st_mode)) {
|
|
||||||
strbuf_addch(name, '/');
|
|
||||||
retval = do_for_each_reflog(name, fn, cb_data);
|
|
||||||
} else {
|
|
||||||
struct object_id oid;
|
|
||||||
|
|
||||||
if (read_ref_full(name->buf, 0, oid.hash, NULL))
|
if (read_ref_full(diter->relative_path, 0,
|
||||||
retval = error("bad ref for %s", name->buf);
|
iter->oid.hash, &flags)) {
|
||||||
else
|
error("bad ref for %s", diter->path.buf);
|
||||||
retval = fn(name->buf, &oid, 0, cb_data);
|
continue;
|
||||||
}
|
|
||||||
if (retval)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
strbuf_setlen(name, oldlen);
|
|
||||||
|
iter->base.refname = diter->relative_path;
|
||||||
|
iter->base.oid = &iter->oid;
|
||||||
|
iter->base.flags = flags;
|
||||||
|
return ITER_OK;
|
||||||
}
|
}
|
||||||
closedir(d);
|
|
||||||
return retval;
|
iter->dir_iterator = NULL;
|
||||||
|
if (ref_iterator_abort(ref_iterator) == ITER_ERROR)
|
||||||
|
ok = ITER_ERROR;
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator,
|
||||||
|
struct object_id *peeled)
|
||||||
|
{
|
||||||
|
die("BUG: ref_iterator_peel() called for reflog_iterator");
|
||||||
|
}
|
||||||
|
|
||||||
|
static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
struct files_reflog_iterator *iter =
|
||||||
|
(struct files_reflog_iterator *)ref_iterator;
|
||||||
|
int ok = ITER_DONE;
|
||||||
|
|
||||||
|
if (iter->dir_iterator)
|
||||||
|
ok = dir_iterator_abort(iter->dir_iterator);
|
||||||
|
|
||||||
|
base_ref_iterator_free(ref_iterator);
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ref_iterator_vtable files_reflog_iterator_vtable = {
|
||||||
|
files_reflog_iterator_advance,
|
||||||
|
files_reflog_iterator_peel,
|
||||||
|
files_reflog_iterator_abort
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ref_iterator *files_reflog_iterator_begin(void)
|
||||||
|
{
|
||||||
|
struct files_reflog_iterator *iter = xcalloc(1, sizeof(*iter));
|
||||||
|
struct ref_iterator *ref_iterator = &iter->base;
|
||||||
|
|
||||||
|
base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
|
||||||
|
iter->dir_iterator = dir_iterator_begin(git_path("logs"));
|
||||||
|
return ref_iterator;
|
||||||
}
|
}
|
||||||
|
|
||||||
int for_each_reflog(each_ref_fn fn, void *cb_data)
|
int for_each_reflog(each_ref_fn fn, void *cb_data)
|
||||||
{
|
{
|
||||||
int retval;
|
return do_for_each_ref_iterator(files_reflog_iterator_begin(),
|
||||||
struct strbuf name;
|
fn, cb_data);
|
||||||
strbuf_init(&name, PATH_MAX);
|
|
||||||
retval = do_for_each_reflog(&name, fn, cb_data);
|
|
||||||
strbuf_release(&name);
|
|
||||||
return retval;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ref_update_reject_duplicates(struct string_list *refnames,
|
static int ref_update_reject_duplicates(struct string_list *refnames,
|
||||||
|
384
refs/iterator.c
Normal file
384
refs/iterator.c
Normal file
@ -0,0 +1,384 @@
|
|||||||
|
/*
|
||||||
|
* Generic reference iterator infrastructure. See refs-internal.h for
|
||||||
|
* documentation about the design and use of reference iterators.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "cache.h"
|
||||||
|
#include "refs.h"
|
||||||
|
#include "refs/refs-internal.h"
|
||||||
|
#include "iterator.h"
|
||||||
|
|
||||||
|
int ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
return ref_iterator->vtable->advance(ref_iterator);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||||
|
struct object_id *peeled)
|
||||||
|
{
|
||||||
|
return ref_iterator->vtable->peel(ref_iterator, peeled);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
return ref_iterator->vtable->abort(ref_iterator);
|
||||||
|
}
|
||||||
|
|
||||||
|
void base_ref_iterator_init(struct ref_iterator *iter,
|
||||||
|
struct ref_iterator_vtable *vtable)
|
||||||
|
{
|
||||||
|
iter->vtable = vtable;
|
||||||
|
iter->refname = NULL;
|
||||||
|
iter->oid = NULL;
|
||||||
|
iter->flags = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void base_ref_iterator_free(struct ref_iterator *iter)
|
||||||
|
{
|
||||||
|
/* Help make use-after-free bugs fail quickly: */
|
||||||
|
iter->vtable = NULL;
|
||||||
|
free(iter);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct empty_ref_iterator {
|
||||||
|
struct ref_iterator base;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int empty_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
return ref_iterator_abort(ref_iterator);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int empty_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||||
|
struct object_id *peeled)
|
||||||
|
{
|
||||||
|
die("BUG: peel called for empty iterator");
|
||||||
|
}
|
||||||
|
|
||||||
|
static int empty_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
base_ref_iterator_free(ref_iterator);
|
||||||
|
return ITER_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ref_iterator_vtable empty_ref_iterator_vtable = {
|
||||||
|
empty_ref_iterator_advance,
|
||||||
|
empty_ref_iterator_peel,
|
||||||
|
empty_ref_iterator_abort
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ref_iterator *empty_ref_iterator_begin(void)
|
||||||
|
{
|
||||||
|
struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter));
|
||||||
|
struct ref_iterator *ref_iterator = &iter->base;
|
||||||
|
|
||||||
|
base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable);
|
||||||
|
return ref_iterator;
|
||||||
|
}
|
||||||
|
|
||||||
|
int is_empty_ref_iterator(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
return ref_iterator->vtable == &empty_ref_iterator_vtable;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct merge_ref_iterator {
|
||||||
|
struct ref_iterator base;
|
||||||
|
|
||||||
|
struct ref_iterator *iter0, *iter1;
|
||||||
|
|
||||||
|
ref_iterator_select_fn *select;
|
||||||
|
void *cb_data;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A pointer to iter0 or iter1 (whichever is supplying the
|
||||||
|
* current value), or NULL if advance has not yet been called.
|
||||||
|
*/
|
||||||
|
struct ref_iterator **current;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
struct merge_ref_iterator *iter =
|
||||||
|
(struct merge_ref_iterator *)ref_iterator;
|
||||||
|
int ok;
|
||||||
|
|
||||||
|
if (!iter->current) {
|
||||||
|
/* Initialize: advance both iterators to their first entries */
|
||||||
|
if ((ok = ref_iterator_advance(iter->iter0)) != ITER_OK) {
|
||||||
|
iter->iter0 = NULL;
|
||||||
|
if (ok == ITER_ERROR)
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
if ((ok = ref_iterator_advance(iter->iter1)) != ITER_OK) {
|
||||||
|
iter->iter1 = NULL;
|
||||||
|
if (ok == ITER_ERROR)
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Advance the current iterator past the just-used
|
||||||
|
* entry:
|
||||||
|
*/
|
||||||
|
if ((ok = ref_iterator_advance(*iter->current)) != ITER_OK) {
|
||||||
|
*iter->current = NULL;
|
||||||
|
if (ok == ITER_ERROR)
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Loop until we find an entry that we can yield. */
|
||||||
|
while (1) {
|
||||||
|
struct ref_iterator **secondary;
|
||||||
|
enum iterator_selection selection =
|
||||||
|
iter->select(iter->iter0, iter->iter1, iter->cb_data);
|
||||||
|
|
||||||
|
if (selection == ITER_SELECT_DONE) {
|
||||||
|
return ref_iterator_abort(ref_iterator);
|
||||||
|
} else if (selection == ITER_SELECT_ERROR) {
|
||||||
|
ref_iterator_abort(ref_iterator);
|
||||||
|
return ITER_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((selection & ITER_CURRENT_SELECTION_MASK) == 0) {
|
||||||
|
iter->current = &iter->iter0;
|
||||||
|
secondary = &iter->iter1;
|
||||||
|
} else {
|
||||||
|
iter->current = &iter->iter1;
|
||||||
|
secondary = &iter->iter0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selection & ITER_SKIP_SECONDARY) {
|
||||||
|
if ((ok = ref_iterator_advance(*secondary)) != ITER_OK) {
|
||||||
|
*secondary = NULL;
|
||||||
|
if (ok == ITER_ERROR)
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selection & ITER_YIELD_CURRENT) {
|
||||||
|
iter->base.refname = (*iter->current)->refname;
|
||||||
|
iter->base.oid = (*iter->current)->oid;
|
||||||
|
iter->base.flags = (*iter->current)->flags;
|
||||||
|
return ITER_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
error:
|
||||||
|
ref_iterator_abort(ref_iterator);
|
||||||
|
return ITER_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int merge_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||||
|
struct object_id *peeled)
|
||||||
|
{
|
||||||
|
struct merge_ref_iterator *iter =
|
||||||
|
(struct merge_ref_iterator *)ref_iterator;
|
||||||
|
|
||||||
|
if (!iter->current) {
|
||||||
|
die("BUG: peel called before advance for merge iterator");
|
||||||
|
}
|
||||||
|
return ref_iterator_peel(*iter->current, peeled);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int merge_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
struct merge_ref_iterator *iter =
|
||||||
|
(struct merge_ref_iterator *)ref_iterator;
|
||||||
|
int ok = ITER_DONE;
|
||||||
|
|
||||||
|
if (iter->iter0) {
|
||||||
|
if (ref_iterator_abort(iter->iter0) != ITER_DONE)
|
||||||
|
ok = ITER_ERROR;
|
||||||
|
}
|
||||||
|
if (iter->iter1) {
|
||||||
|
if (ref_iterator_abort(iter->iter1) != ITER_DONE)
|
||||||
|
ok = ITER_ERROR;
|
||||||
|
}
|
||||||
|
base_ref_iterator_free(ref_iterator);
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ref_iterator_vtable merge_ref_iterator_vtable = {
|
||||||
|
merge_ref_iterator_advance,
|
||||||
|
merge_ref_iterator_peel,
|
||||||
|
merge_ref_iterator_abort
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ref_iterator *merge_ref_iterator_begin(
|
||||||
|
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
||||||
|
ref_iterator_select_fn *select, void *cb_data)
|
||||||
|
{
|
||||||
|
struct merge_ref_iterator *iter = xcalloc(1, sizeof(*iter));
|
||||||
|
struct ref_iterator *ref_iterator = &iter->base;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can't do the same kind of is_empty_ref_iterator()-style
|
||||||
|
* optimization here as overlay_ref_iterator_begin() does,
|
||||||
|
* because we don't know the semantics of the select function.
|
||||||
|
* It might, for example, implement "intersect" by passing
|
||||||
|
* references through only if they exist in both iterators.
|
||||||
|
*/
|
||||||
|
|
||||||
|
base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable);
|
||||||
|
iter->iter0 = iter0;
|
||||||
|
iter->iter1 = iter1;
|
||||||
|
iter->select = select;
|
||||||
|
iter->cb_data = cb_data;
|
||||||
|
iter->current = NULL;
|
||||||
|
return ref_iterator;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A ref_iterator_select_fn that overlays the items from front on top
|
||||||
|
* of those from back (like loose refs over packed refs). See
|
||||||
|
* overlay_ref_iterator_begin().
|
||||||
|
*/
|
||||||
|
static enum iterator_selection overlay_iterator_select(
|
||||||
|
struct ref_iterator *front, struct ref_iterator *back,
|
||||||
|
void *cb_data)
|
||||||
|
{
|
||||||
|
int cmp;
|
||||||
|
|
||||||
|
if (!back)
|
||||||
|
return front ? ITER_SELECT_0 : ITER_SELECT_DONE;
|
||||||
|
else if (!front)
|
||||||
|
return ITER_SELECT_1;
|
||||||
|
|
||||||
|
cmp = strcmp(front->refname, back->refname);
|
||||||
|
|
||||||
|
if (cmp < 0)
|
||||||
|
return ITER_SELECT_0;
|
||||||
|
else if (cmp > 0)
|
||||||
|
return ITER_SELECT_1;
|
||||||
|
else
|
||||||
|
return ITER_SELECT_0_SKIP_1;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ref_iterator *overlay_ref_iterator_begin(
|
||||||
|
struct ref_iterator *front, struct ref_iterator *back)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Optimization: if one of the iterators is empty, return the
|
||||||
|
* other one rather than incurring the overhead of wrapping
|
||||||
|
* them.
|
||||||
|
*/
|
||||||
|
if (is_empty_ref_iterator(front)) {
|
||||||
|
ref_iterator_abort(front);
|
||||||
|
return back;
|
||||||
|
} else if (is_empty_ref_iterator(back)) {
|
||||||
|
ref_iterator_abort(back);
|
||||||
|
return front;
|
||||||
|
}
|
||||||
|
|
||||||
|
return merge_ref_iterator_begin(front, back,
|
||||||
|
overlay_iterator_select, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct prefix_ref_iterator {
|
||||||
|
struct ref_iterator base;
|
||||||
|
|
||||||
|
struct ref_iterator *iter0;
|
||||||
|
char *prefix;
|
||||||
|
int trim;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
struct prefix_ref_iterator *iter =
|
||||||
|
(struct prefix_ref_iterator *)ref_iterator;
|
||||||
|
int ok;
|
||||||
|
|
||||||
|
while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
|
||||||
|
if (!starts_with(iter->iter0->refname, iter->prefix))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
iter->base.refname = iter->iter0->refname + iter->trim;
|
||||||
|
iter->base.oid = iter->iter0->oid;
|
||||||
|
iter->base.flags = iter->iter0->flags;
|
||||||
|
return ITER_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
iter->iter0 = NULL;
|
||||||
|
if (ref_iterator_abort(ref_iterator) != ITER_DONE)
|
||||||
|
return ITER_ERROR;
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int prefix_ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||||
|
struct object_id *peeled)
|
||||||
|
{
|
||||||
|
struct prefix_ref_iterator *iter =
|
||||||
|
(struct prefix_ref_iterator *)ref_iterator;
|
||||||
|
|
||||||
|
return ref_iterator_peel(iter->iter0, peeled);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int prefix_ref_iterator_abort(struct ref_iterator *ref_iterator)
|
||||||
|
{
|
||||||
|
struct prefix_ref_iterator *iter =
|
||||||
|
(struct prefix_ref_iterator *)ref_iterator;
|
||||||
|
int ok = ITER_DONE;
|
||||||
|
|
||||||
|
if (iter->iter0)
|
||||||
|
ok = ref_iterator_abort(iter->iter0);
|
||||||
|
free(iter->prefix);
|
||||||
|
base_ref_iterator_free(ref_iterator);
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ref_iterator_vtable prefix_ref_iterator_vtable = {
|
||||||
|
prefix_ref_iterator_advance,
|
||||||
|
prefix_ref_iterator_peel,
|
||||||
|
prefix_ref_iterator_abort
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
|
||||||
|
const char *prefix,
|
||||||
|
int trim)
|
||||||
|
{
|
||||||
|
struct prefix_ref_iterator *iter;
|
||||||
|
struct ref_iterator *ref_iterator;
|
||||||
|
|
||||||
|
if (!*prefix && !trim)
|
||||||
|
return iter0; /* optimization: no need to wrap iterator */
|
||||||
|
|
||||||
|
iter = xcalloc(1, sizeof(*iter));
|
||||||
|
ref_iterator = &iter->base;
|
||||||
|
|
||||||
|
base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable);
|
||||||
|
|
||||||
|
iter->iter0 = iter0;
|
||||||
|
iter->prefix = xstrdup(prefix);
|
||||||
|
iter->trim = trim;
|
||||||
|
|
||||||
|
return ref_iterator;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ref_iterator *current_ref_iter = NULL;
|
||||||
|
|
||||||
|
int do_for_each_ref_iterator(struct ref_iterator *iter,
|
||||||
|
each_ref_fn fn, void *cb_data)
|
||||||
|
{
|
||||||
|
int retval = 0, ok;
|
||||||
|
struct ref_iterator *old_ref_iter = current_ref_iter;
|
||||||
|
|
||||||
|
current_ref_iter = iter;
|
||||||
|
while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
|
||||||
|
retval = fn(iter->refname, iter->oid, iter->flags, cb_data);
|
||||||
|
if (retval) {
|
||||||
|
/*
|
||||||
|
* If ref_iterator_abort() returns ITER_ERROR,
|
||||||
|
* we ignore that error in deference to the
|
||||||
|
* callback function's return value.
|
||||||
|
*/
|
||||||
|
ref_iterator_abort(iter);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
current_ref_iter = old_ref_iter;
|
||||||
|
if (ok == ITER_ERROR)
|
||||||
|
return -1;
|
||||||
|
return retval;
|
||||||
|
}
|
@ -249,10 +249,230 @@ int rename_ref_available(const char *oldname, const char *newname);
|
|||||||
#define DO_FOR_EACH_INCLUDE_BROKEN 0x01
|
#define DO_FOR_EACH_INCLUDE_BROKEN 0x01
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The common backend for the for_each_*ref* functions
|
* Reference iterators
|
||||||
|
*
|
||||||
|
* A reference iterator encapsulates the state of an in-progress
|
||||||
|
* iteration over references. Create an instance of `struct
|
||||||
|
* ref_iterator` via one of the functions in this module.
|
||||||
|
*
|
||||||
|
* A freshly-created ref_iterator doesn't yet point at a reference. To
|
||||||
|
* advance the iterator, call ref_iterator_advance(). If successful,
|
||||||
|
* this sets the iterator's refname, oid, and flags fields to describe
|
||||||
|
* the next reference and returns ITER_OK. The data pointed at by
|
||||||
|
* refname and oid belong to the iterator; if you want to retain them
|
||||||
|
* after calling ref_iterator_advance() again or calling
|
||||||
|
* ref_iterator_abort(), you must make a copy. When the iteration has
|
||||||
|
* been exhausted, ref_iterator_advance() releases any resources
|
||||||
|
* assocated with the iteration, frees the ref_iterator object, and
|
||||||
|
* returns ITER_DONE. If you want to abort the iteration early, call
|
||||||
|
* ref_iterator_abort(), which also frees the ref_iterator object and
|
||||||
|
* any associated resources. If there was an internal error advancing
|
||||||
|
* to the next entry, ref_iterator_advance() aborts the iteration,
|
||||||
|
* frees the ref_iterator, and returns ITER_ERROR.
|
||||||
|
*
|
||||||
|
* The reference currently being looked at can be peeled by calling
|
||||||
|
* ref_iterator_peel(). This function is often faster than peel_ref(),
|
||||||
|
* so it should be preferred when iterating over references.
|
||||||
|
*
|
||||||
|
* Putting it all together, a typical iteration looks like this:
|
||||||
|
*
|
||||||
|
* int ok;
|
||||||
|
* struct ref_iterator *iter = ...;
|
||||||
|
*
|
||||||
|
* while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
|
||||||
|
* if (want_to_stop_iteration()) {
|
||||||
|
* ok = ref_iterator_abort(iter);
|
||||||
|
* break;
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* // Access information about the current reference:
|
||||||
|
* if (!(iter->flags & REF_ISSYMREF))
|
||||||
|
* printf("%s is %s\n", iter->refname, oid_to_hex(&iter->oid));
|
||||||
|
*
|
||||||
|
* // If you need to peel the reference:
|
||||||
|
* ref_iterator_peel(iter, &oid);
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* if (ok != ITER_DONE)
|
||||||
|
* handle_error();
|
||||||
*/
|
*/
|
||||||
int do_for_each_ref(const char *submodule, const char *base,
|
struct ref_iterator {
|
||||||
each_ref_fn fn, int trim, int flags, void *cb_data);
|
struct ref_iterator_vtable *vtable;
|
||||||
|
const char *refname;
|
||||||
|
const struct object_id *oid;
|
||||||
|
unsigned int flags;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Advance the iterator to the first or next item and return ITER_OK.
|
||||||
|
* If the iteration is exhausted, free the resources associated with
|
||||||
|
* the ref_iterator and return ITER_DONE. On errors, free the iterator
|
||||||
|
* resources and return ITER_ERROR. It is a bug to use ref_iterator or
|
||||||
|
* call this function again after it has returned ITER_DONE or
|
||||||
|
* ITER_ERROR.
|
||||||
|
*/
|
||||||
|
int ref_iterator_advance(struct ref_iterator *ref_iterator);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If possible, peel the reference currently being viewed by the
|
||||||
|
* iterator. Return 0 on success.
|
||||||
|
*/
|
||||||
|
int ref_iterator_peel(struct ref_iterator *ref_iterator,
|
||||||
|
struct object_id *peeled);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* End the iteration before it has been exhausted, freeing the
|
||||||
|
* reference iterator and any associated resources and returning
|
||||||
|
* ITER_DONE. If the abort itself failed, return ITER_ERROR.
|
||||||
|
*/
|
||||||
|
int ref_iterator_abort(struct ref_iterator *ref_iterator);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* An iterator over nothing (its first ref_iterator_advance() call
|
||||||
|
* returns ITER_DONE).
|
||||||
|
*/
|
||||||
|
struct ref_iterator *empty_ref_iterator_begin(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return true iff ref_iterator is an empty_ref_iterator.
|
||||||
|
*/
|
||||||
|
int is_empty_ref_iterator(struct ref_iterator *ref_iterator);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A callback function used to instruct merge_ref_iterator how to
|
||||||
|
* interleave the entries from iter0 and iter1. The function should
|
||||||
|
* return one of the constants defined in enum iterator_selection. It
|
||||||
|
* must not advance either of the iterators itself.
|
||||||
|
*
|
||||||
|
* The function must be prepared to handle the case that iter0 and/or
|
||||||
|
* iter1 is NULL, which indicates that the corresponding sub-iterator
|
||||||
|
* has been exhausted. Its return value must be consistent with the
|
||||||
|
* current states of the iterators; e.g., it must not return
|
||||||
|
* ITER_SKIP_1 if iter1 has already been exhausted.
|
||||||
|
*/
|
||||||
|
typedef enum iterator_selection ref_iterator_select_fn(
|
||||||
|
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
||||||
|
void *cb_data);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Iterate over the entries from iter0 and iter1, with the values
|
||||||
|
* interleaved as directed by the select function. The iterator takes
|
||||||
|
* ownership of iter0 and iter1 and frees them when the iteration is
|
||||||
|
* over.
|
||||||
|
*/
|
||||||
|
struct ref_iterator *merge_ref_iterator_begin(
|
||||||
|
struct ref_iterator *iter0, struct ref_iterator *iter1,
|
||||||
|
ref_iterator_select_fn *select, void *cb_data);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* An iterator consisting of the union of the entries from front and
|
||||||
|
* back. If there are entries common to the two sub-iterators, use the
|
||||||
|
* one from front. Each iterator must iterate over its entries in
|
||||||
|
* strcmp() order by refname for this to work.
|
||||||
|
*
|
||||||
|
* The new iterator takes ownership of its arguments and frees them
|
||||||
|
* when the iteration is over. As a convenience to callers, if front
|
||||||
|
* or back is an empty_ref_iterator, then abort that one immediately
|
||||||
|
* and return the other iterator directly, without wrapping it.
|
||||||
|
*/
|
||||||
|
struct ref_iterator *overlay_ref_iterator_begin(
|
||||||
|
struct ref_iterator *front, struct ref_iterator *back);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wrap iter0, only letting through the references whose names start
|
||||||
|
* with prefix. If trim is set, set iter->refname to the name of the
|
||||||
|
* reference with that many characters trimmed off the front;
|
||||||
|
* otherwise set it to the full refname. The new iterator takes over
|
||||||
|
* ownership of iter0 and frees it when iteration is over. It makes
|
||||||
|
* its own copy of prefix.
|
||||||
|
*
|
||||||
|
* As an convenience to callers, if prefix is the empty string and
|
||||||
|
* trim is zero, this function returns iter0 directly, without
|
||||||
|
* wrapping it.
|
||||||
|
*/
|
||||||
|
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
|
||||||
|
const char *prefix,
|
||||||
|
int trim);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Iterate over the packed and loose references in the specified
|
||||||
|
* submodule that are within find_containing_dir(prefix). If prefix is
|
||||||
|
* NULL or the empty string, iterate over all references in the
|
||||||
|
* submodule.
|
||||||
|
*/
|
||||||
|
struct ref_iterator *files_ref_iterator_begin(const char *submodule,
|
||||||
|
const char *prefix,
|
||||||
|
unsigned int flags);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Iterate over the references in the main ref_store that have a
|
||||||
|
* reflog. The paths within a directory are iterated over in arbitrary
|
||||||
|
* order.
|
||||||
|
*/
|
||||||
|
struct ref_iterator *files_reflog_iterator_begin(void);
|
||||||
|
|
||||||
|
/* Internal implementation of reference iteration: */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Base class constructor for ref_iterators. Initialize the
|
||||||
|
* ref_iterator part of iter, setting its vtable pointer as specified.
|
||||||
|
* This is meant to be called only by the initializers of derived
|
||||||
|
* classes.
|
||||||
|
*/
|
||||||
|
void base_ref_iterator_init(struct ref_iterator *iter,
|
||||||
|
struct ref_iterator_vtable *vtable);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Base class destructor for ref_iterators. Destroy the ref_iterator
|
||||||
|
* part of iter and shallow-free the object. This is meant to be
|
||||||
|
* called only by the destructors of derived classes.
|
||||||
|
*/
|
||||||
|
void base_ref_iterator_free(struct ref_iterator *iter);
|
||||||
|
|
||||||
|
/* Virtual function declarations for ref_iterators: */
|
||||||
|
|
||||||
|
typedef int ref_iterator_advance_fn(struct ref_iterator *ref_iterator);
|
||||||
|
|
||||||
|
typedef int ref_iterator_peel_fn(struct ref_iterator *ref_iterator,
|
||||||
|
struct object_id *peeled);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Implementations of this function should free any resources specific
|
||||||
|
* to the derived class, then call base_ref_iterator_free() to clean
|
||||||
|
* up and free the ref_iterator object.
|
||||||
|
*/
|
||||||
|
typedef int ref_iterator_abort_fn(struct ref_iterator *ref_iterator);
|
||||||
|
|
||||||
|
struct ref_iterator_vtable {
|
||||||
|
ref_iterator_advance_fn *advance;
|
||||||
|
ref_iterator_peel_fn *peel;
|
||||||
|
ref_iterator_abort_fn *abort;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* current_ref_iter is a performance hack: when iterating over
|
||||||
|
* references using the for_each_ref*() functions, current_ref_iter is
|
||||||
|
* set to the reference iterator before calling the callback function.
|
||||||
|
* If the callback function calls peel_ref(), then peel_ref() first
|
||||||
|
* checks whether the reference to be peeled is the one referred to by
|
||||||
|
* the iterator (it usually is) and if so, asks the iterator for the
|
||||||
|
* peeled version of the reference if it is available. This avoids a
|
||||||
|
* refname lookup in a common case. current_ref_iter is set to NULL
|
||||||
|
* when the iteration is over.
|
||||||
|
*/
|
||||||
|
extern struct ref_iterator *current_ref_iter;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The common backend for the for_each_*ref* functions. Call fn for
|
||||||
|
* each reference in iter. If the iterator itself ever returns
|
||||||
|
* ITER_ERROR, return -1. If fn ever returns a non-zero value, stop
|
||||||
|
* the iteration and return that value. Otherwise, return 0. In any
|
||||||
|
* case, free the iterator when done. This function is basically an
|
||||||
|
* adapter between the callback style of reference iteration and the
|
||||||
|
* iterator style.
|
||||||
|
*/
|
||||||
|
int do_for_each_ref_iterator(struct ref_iterator *iter,
|
||||||
|
each_ref_fn fn, void *cb_data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read the specified reference from the filesystem or packed refs
|
* Read the specified reference from the filesystem or packed refs
|
||||||
|
Loading…
Reference in New Issue
Block a user