git-commit-vandalism/builtin-read-tree.c

1043 lines
23 KiB
C
Raw Normal View History

/*
* GIT - The information manager from hell
*
* Copyright (C) Linus Torvalds, 2005
*/
#define DBRT_DEBUG 1
#include "cache.h"
#include "object.h"
#include "tree.h"
#include "tree-walk.h"
#include "cache-tree.h"
#include <sys/time.h>
#include <signal.h>
#include "builtin.h"
static int reset = 0;
static int merge = 0;
static int update = 0;
static int index_only = 0;
static int nontrivial_merge = 0;
static int trivial_merges_only = 0;
static int aggressive = 0;
static int verbose_update = 0;
static volatile int progress_update = 0;
static const char *prefix = NULL;
static int head_idx = -1;
static int merge_size = 0;
static struct object_list *trees = NULL;
static struct cache_entry df_conflict_entry;
struct tree_entry_list {
struct tree_entry_list *next;
unsigned directory : 1;
unsigned executable : 1;
unsigned symlink : 1;
unsigned int mode;
const char *name;
const unsigned char *sha1;
};
static struct tree_entry_list df_conflict_list;
typedef int (*merge_fn_t)(struct cache_entry **src);
static struct tree_entry_list *create_tree_entry_list(struct tree *tree)
{
struct tree_desc desc;
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
struct name_entry one;
struct tree_entry_list *ret = NULL;
struct tree_entry_list **list_p = &ret;
desc.buf = tree->buffer;
desc.size = tree->size;
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
while (tree_entry(&desc, &one)) {
struct tree_entry_list *entry;
entry = xmalloc(sizeof(struct tree_entry_list));
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
entry->name = one.path;
entry->sha1 = one.sha1;
entry->mode = one.mode;
entry->directory = S_ISDIR(one.mode) != 0;
entry->executable = (one.mode & S_IXUSR) != 0;
entry->symlink = S_ISLNK(one.mode) != 0;
entry->next = NULL;
*list_p = entry;
list_p = &entry->next;
}
return ret;
}
static int entcmp(const char *name1, int dir1, const char *name2, int dir2)
{
int len1 = strlen(name1);
int len2 = strlen(name2);
int len = len1 < len2 ? len1 : len2;
int ret = memcmp(name1, name2, len);
unsigned char c1, c2;
if (ret)
return ret;
c1 = name1[len];
c2 = name2[len];
if (!c1 && dir1)
c1 = '/';
if (!c2 && dir2)
c2 = '/';
ret = (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
if (c1 && c2 && !ret)
ret = len1 - len2;
return ret;
}
static int unpack_trees_rec(struct tree_entry_list **posns, int len,
const char *base, merge_fn_t fn, int *indpos)
{
int baselen = strlen(base);
int src_size = len + 1;
do {
int i;
const char *first;
int firstdir = 0;
int pathlen;
unsigned ce_size;
struct tree_entry_list **subposns;
struct cache_entry **src;
int any_files = 0;
int any_dirs = 0;
char *cache_name;
int ce_stage;
/* Find the first name in the input. */
first = NULL;
cache_name = NULL;
/* Check the cache */
if (merge && *indpos < active_nr) {
/* This is a bit tricky: */
/* If the index has a subdirectory (with
* contents) as the first name, it'll get a
* filename like "foo/bar". But that's after
* "foo", so the entry in trees will get
* handled first, at which point we'll go into
* "foo", and deal with "bar" from the index,
* because the base will be "foo/". The only
* way we can actually have "foo/bar" first of
* all the things is if the trees don't
* contain "foo" at all, in which case we'll
* handle "foo/bar" without going into the
* directory, but that's fine (and will return
* an error anyway, with the added unknown
* file case.
*/
cache_name = active_cache[*indpos]->name;
if (strlen(cache_name) > baselen &&
!memcmp(cache_name, base, baselen)) {
cache_name += baselen;
first = cache_name;
} else {
cache_name = NULL;
}
}
#if DBRT_DEBUG > 1
if (first)
printf("index %s\n", first);
#endif
for (i = 0; i < len; i++) {
if (!posns[i] || posns[i] == &df_conflict_list)
continue;
#if DBRT_DEBUG > 1
printf("%d %s\n", i + 1, posns[i]->name);
#endif
if (!first || entcmp(first, firstdir,
posns[i]->name,
posns[i]->directory) > 0) {
first = posns[i]->name;
firstdir = posns[i]->directory;
}
}
/* No name means we're done */
if (!first)
return 0;
pathlen = strlen(first);
ce_size = cache_entry_size(baselen + pathlen);
src = xcalloc(src_size, sizeof(struct cache_entry *));
subposns = xcalloc(len, sizeof(struct tree_list_entry *));
if (cache_name && !strcmp(cache_name, first)) {
any_files = 1;
src[0] = active_cache[*indpos];
remove_cache_entry_at(*indpos);
}
for (i = 0; i < len; i++) {
struct cache_entry *ce;
if (!posns[i] ||
(posns[i] != &df_conflict_list &&
strcmp(first, posns[i]->name))) {
continue;
}
if (posns[i] == &df_conflict_list) {
src[i + merge] = &df_conflict_entry;
continue;
}
if (posns[i]->directory) {
struct tree *tree = lookup_tree(posns[i]->sha1);
any_dirs = 1;
parse_tree(tree);
subposns[i] = create_tree_entry_list(tree);
posns[i] = posns[i]->next;
src[i + merge] = &df_conflict_entry;
continue;
}
if (!merge)
ce_stage = 0;
else if (i + 1 < head_idx)
ce_stage = 1;
else if (i + 1 > head_idx)
ce_stage = 3;
else
ce_stage = 2;
ce = xcalloc(1, ce_size);
ce->ce_mode = create_ce_mode(posns[i]->mode);
ce->ce_flags = create_ce_flags(baselen + pathlen,
ce_stage);
memcpy(ce->name, base, baselen);
memcpy(ce->name + baselen, first, pathlen + 1);
any_files = 1;
memcpy(ce->sha1, posns[i]->sha1, 20);
src[i + merge] = ce;
subposns[i] = &df_conflict_list;
posns[i] = posns[i]->next;
}
if (any_files) {
if (merge) {
int ret;
#if DBRT_DEBUG > 1
printf("%s:\n", first);
for (i = 0; i < src_size; i++) {
printf(" %d ", i);
if (src[i])
printf("%s\n", sha1_to_hex(src[i]->sha1));
else
printf("\n");
}
#endif
ret = fn(src);
#if DBRT_DEBUG > 1
printf("Added %d entries\n", ret);
#endif
*indpos += ret;
} else {
for (i = 0; i < src_size; i++) {
if (src[i]) {
add_cache_entry(src[i], ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
}
}
}
}
if (any_dirs) {
char *newbase = xmalloc(baselen + 2 + pathlen);
memcpy(newbase, base, baselen);
memcpy(newbase + baselen, first, pathlen);
newbase[baselen + pathlen] = '/';
newbase[baselen + pathlen + 1] = '\0';
if (unpack_trees_rec(subposns, len, newbase, fn,
indpos))
return -1;
free(newbase);
}
free(subposns);
free(src);
} while (1);
}
static void reject_merge(struct cache_entry *ce)
{
die("Entry '%s' would be overwritten by merge. Cannot merge.",
ce->name);
}
/* Unlink the last component and attempt to remove leading
* directories, in case this unlink is the removal of the
* last entry in the directory -- empty directories are removed.
*/
static void unlink_entry(char *name)
{
char *cp, *prev;
if (unlink(name))
return;
prev = NULL;
while (1) {
int status;
cp = strrchr(name, '/');
if (prev)
*prev = '/';
if (!cp)
break;
*cp = 0;
status = rmdir(name);
if (status) {
*cp = '/';
break;
}
prev = cp;
}
}
static void progress_interval(int signum)
{
progress_update = 1;
}
static void setup_progress_signal(void)
{
struct sigaction sa;
struct itimerval v;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = progress_interval;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART;
sigaction(SIGALRM, &sa, NULL);
v.it_interval.tv_sec = 1;
v.it_interval.tv_usec = 0;
v.it_value = v.it_interval;
setitimer(ITIMER_REAL, &v, NULL);
}
static struct checkout state;
static void check_updates(struct cache_entry **src, int nr)
{
unsigned short mask = htons(CE_UPDATE);
unsigned last_percent = 200, cnt = 0, total = 0;
if (update && verbose_update) {
for (total = cnt = 0; cnt < nr; cnt++) {
struct cache_entry *ce = src[cnt];
if (!ce->ce_mode || ce->ce_flags & mask)
total++;
}
/* Don't bother doing this for very small updates */
if (total < 250)
total = 0;
if (total) {
fprintf(stderr, "Checking files out...\n");
setup_progress_signal();
progress_update = 1;
}
cnt = 0;
}
while (nr--) {
struct cache_entry *ce = *src++;
if (total) {
if (!ce->ce_mode || ce->ce_flags & mask) {
unsigned percent;
cnt++;
percent = (cnt * 100) / total;
if (percent != last_percent ||
progress_update) {
fprintf(stderr, "%4u%% (%u/%u) done\r",
percent, cnt, total);
last_percent = percent;
progress_update = 0;
}
}
}
if (!ce->ce_mode) {
if (update)
unlink_entry(ce->name);
continue;
}
if (ce->ce_flags & mask) {
ce->ce_flags &= ~mask;
if (update)
checkout_entry(ce, &state, NULL);
}
}
if (total) {
signal(SIGALRM, SIG_IGN);
fputc('\n', stderr);
}
}
static int unpack_trees(merge_fn_t fn)
{
int indpos = 0;
unsigned len = object_list_length(trees);
struct tree_entry_list **posns;
int i;
struct object_list *posn = trees;
merge_size = len;
if (len) {
posns = xmalloc(len * sizeof(struct tree_entry_list *));
for (i = 0; i < len; i++) {
posns[i] = create_tree_entry_list((struct tree *) posn->item);
posn = posn->next;
}
if (unpack_trees_rec(posns, len, prefix ? prefix : "",
fn, &indpos))
return -1;
}
if (trivial_merges_only && nontrivial_merge)
die("Merge requires file-level merging");
check_updates(active_cache, active_nr);
return 0;
}
static int list_tree(unsigned char *sha1)
{
struct tree *tree = parse_tree_indirect(sha1);
if (!tree)
return -1;
object_list_append(&tree->object, &trees);
return 0;
}
static int same(struct cache_entry *a, struct cache_entry *b)
{
if (!!a != !!b)
return 0;
if (!a && !b)
return 1;
return a->ce_mode == b->ce_mode &&
!memcmp(a->sha1, b->sha1, 20);
}
/*
* When a CE gets turned into an unmerged entry, we
* want it to be up-to-date
*/
static void verify_uptodate(struct cache_entry *ce)
{
struct stat st;
if (index_only || reset)
return;
if (!lstat(ce->name, &st)) {
"Assume unchanged" git This adds "assume unchanged" logic, started by this message in the list discussion recently: <Pine.LNX.4.64.0601311807470.7301@g5.osdl.org> This is a workaround for filesystems that do not have lstat() that is quick enough for the index mechanism to take advantage of. On the paths marked as "assumed to be unchanged", the user needs to explicitly use update-index to register the object name to be in the next commit. You can use two new options to update-index to set and reset the CE_VALID bit: git-update-index --assume-unchanged path... git-update-index --no-assume-unchanged path... These forms manipulate only the CE_VALID bit; it does not change the object name recorded in the index file. Nor they add a new entry to the index. When the configuration variable "core.ignorestat = true" is set, the index entries are marked with CE_VALID bit automatically after: - update-index to explicitly register the current object name to the index file. - when update-index --refresh finds the path to be up-to-date. - when tools like read-tree -u and apply --index update the working tree file and register the current object name to the index file. The flag is dropped upon read-tree that does not check out the index entry. This happens regardless of the core.ignorestat settings. Index entries marked with CE_VALID bit are assumed to be unchanged most of the time. However, there are cases that CE_VALID bit is ignored for the sake of safety and usability: - while "git-read-tree -m" or git-apply need to make sure that the paths involved in the merge do not have local modifications. This sacrifices performance for safety. - when git-checkout-index -f -q -u -a tries to see if it needs to checkout the paths. Otherwise you can never check anything out ;-). - when git-update-index --really-refresh (a new flag) tries to see if the index entry is up to date. You can start with everything marked as CE_VALID and run this once to drop CE_VALID bit for paths that are modified. Most notably, "update-index --refresh" honours CE_VALID and does not actively stat, so after you modified a file in the working tree, update-index --refresh would not notice until you tell the index about it with "git-update-index path" or "git-update-index --no-assume-unchanged path". This version is not expected to be perfect. I think diff between index and/or tree and working files may need some adjustment, and there probably needs other cases we should automatically unmark paths that are marked to be CE_VALID. But the basics seem to work, and ready to be tested by people who asked for this feature. Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-02-09 06:15:24 +01:00
unsigned changed = ce_match_stat(ce, &st, 1);
if (!changed)
return;
errno = 0;
}
if (reset) {
ce->ce_flags |= htons(CE_UPDATE);
return;
}
if (errno == ENOENT)
return;
die("Entry '%s' not uptodate. Cannot merge.", ce->name);
}
static void invalidate_ce_path(struct cache_entry *ce)
{
if (ce)
cache_tree_invalidate_path(active_cache_tree, ce->name);
}
/*
* We do not want to remove or overwrite a working tree file that
* is not tracked.
*/
static void verify_absent(const char *path, const char *action)
{
struct stat st;
if (index_only || reset || !update)
return;
if (!lstat(path, &st))
die("Untracked working tree file '%s' "
"would be %s by merge.", path, action);
}
static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
{
merge->ce_flags |= htons(CE_UPDATE);
if (old) {
/*
* See if we can re-use the old CE directly?
* That way we get the uptodate stat info.
*
* This also removes the UPDATE flag on
* a match.
*/
if (same(old, merge)) {
*merge = *old;
} else {
verify_uptodate(old);
invalidate_ce_path(old);
}
}
else {
verify_absent(merge->name, "overwritten");
invalidate_ce_path(merge);
}
merge->ce_flags &= ~htons(CE_STAGEMASK);
add_cache_entry(merge, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
return 1;
}
static int deleted_entry(struct cache_entry *ce, struct cache_entry *old)
{
if (old)
verify_uptodate(old);
else
verify_absent(ce->name, "removed");
ce->ce_mode = 0;
add_cache_entry(ce, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
invalidate_ce_path(ce);
return 1;
}
static int keep_entry(struct cache_entry *ce)
{
add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
return 1;
}
#if DBRT_DEBUG
static void show_stage_entry(FILE *o,
const char *label, const struct cache_entry *ce)
{
if (!ce)
fprintf(o, "%s (missing)\n", label);
else
fprintf(o, "%s%06o %s %d\t%s\n",
label,
ntohl(ce->ce_mode),
sha1_to_hex(ce->sha1),
ce_stage(ce),
ce->name);
}
#endif
static int threeway_merge(struct cache_entry **stages)
{
struct cache_entry *index;
struct cache_entry *head;
struct cache_entry *remote = stages[head_idx + 1];
int count;
int head_match = 0;
int remote_match = 0;
const char *path = NULL;
int df_conflict_head = 0;
int df_conflict_remote = 0;
int any_anc_missing = 0;
int no_anc_exists = 1;
int i;
for (i = 1; i < head_idx; i++) {
if (!stages[i])
any_anc_missing = 1;
else {
if (!path)
path = stages[i]->name;
no_anc_exists = 0;
}
[PATCH] read-tree: fix too strong index requirement #5ALT This fixes too strong index requirement 3-way merge enforces in one case: the same file is added in both branches. In this case, the original code insisted that if the index file has that path, it must match our branch and be up-to-date. However in this particular case, it only has to match it, and can be dirty. We just need to make sure that we keep the work-tree copy instead of checking out the merge result. The resolution of such a path, however, cannot be left to outside script, because we will not keep the original stage0 entries for unmerged paths when read-tree finishes, and at that point, the knowledge of "if we resolve it to match the new file added in both branches, the merge succeeds and the work tree would not lose information, but we should _not_ update the work tree from the resulting index file" is lost. For this reason, the now code needs to resolve this case (#5ALT) internally. This affects some existing tests in the test suite, but all in positive ways. In t1000 (3-way test), this #5ALT case now gets one stage0 entry, instead of an identical stage2 and stage3 entry pair, for such a path, and one test that checked for merge failure (because the test assumed the "stricter-than-necessary" behaviour) does not have to fail anymore. In t1005 (emu23 test), two tests that involves a case where the work tree already had a change introduced in the upstream (aka "merged head"), the merge succeeds instead of failing. Signed-off-by: Junio C Hamano <junkio@cox.net> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-11 03:37:47 +02:00
}
index = stages[0];
head = stages[head_idx];
if (head == &df_conflict_entry) {
df_conflict_head = 1;
head = NULL;
}
if (remote == &df_conflict_entry) {
df_conflict_remote = 1;
remote = NULL;
}
if (!path && index)
path = index->name;
if (!path && head)
path = head->name;
if (!path && remote)
path = remote->name;
/* First, if there's a #16 situation, note that to prevent #13
* and #14.
*/
if (!same(remote, head)) {
for (i = 1; i < head_idx; i++) {
if (same(stages[i], head)) {
head_match = i;
}
if (same(stages[i], remote)) {
remote_match = i;
}
}
}
/* We start with cases where the index is allowed to match
* something other than the head: #14(ALT) and #2ALT, where it
* is permitted to match the result instead.
*/
/* #14, #14ALT, #2ALT */
if (remote && !df_conflict_head && head_match && !remote_match) {
if (index && !same(index, remote) && !same(index, head))
reject_merge(index);
return merged_entry(remote, index);
}
/*
* If we have an entry in the index cache, then we want to
* make sure that it matches head.
*/
if (index && !same(index, head)) {
reject_merge(index);
}
if (head) {
/* #5ALT, #15 */
if (same(head, remote))
return merged_entry(head, index);
/* #13, #3ALT */
if (!df_conflict_remote && remote_match && !head_match)
return merged_entry(head, index);
}
/* #1 */
if (!head && !remote && any_anc_missing)
return 0;
/* Under the new "aggressive" rule, we resolve mostly trivial
* cases that we historically had git-merge-one-file resolve.
*/
if (aggressive) {
int head_deleted = !head && !df_conflict_head;
int remote_deleted = !remote && !df_conflict_remote;
/*
* Deleted in both.
* Deleted in one and unchanged in the other.
*/
if ((head_deleted && remote_deleted) ||
(head_deleted && remote && remote_match) ||
(remote_deleted && head && head_match)) {
if (index)
return deleted_entry(index, index);
else if (path)
verify_absent(path, "removed");
return 0;
}
/*
* Added in both, identically.
*/
if (no_anc_exists && head && remote && same(head, remote))
return merged_entry(head, index);
}
/* Below are "no merge" cases, which require that the index be
* up-to-date to avoid the files getting overwritten with
* conflict resolution files.
*/
if (index) {
verify_uptodate(index);
}
else if (path)
verify_absent(path, "overwritten");
nontrivial_merge = 1;
/* #2, #3, #4, #6, #7, #9, #11. */
count = 0;
if (!head_match || !remote_match) {
for (i = 1; i < head_idx; i++) {
if (stages[i]) {
keep_entry(stages[i]);
count++;
break;
}
}
}
#if DBRT_DEBUG
else {
fprintf(stderr, "read-tree: warning #16 detected\n");
show_stage_entry(stderr, "head ", stages[head_match]);
show_stage_entry(stderr, "remote ", stages[remote_match]);
}
#endif
if (head) { count += keep_entry(head); }
if (remote) { count += keep_entry(remote); }
return count;
}
/*
* Two-way merge.
*
* The rule is to "carry forward" what is in the index without losing
* information across a "fast forward", favoring a successful merge
* over a merge failure when it makes sense. For details of the
* "carry forward" rule, please see <Documentation/git-read-tree.txt>.
*
*/
static int twoway_merge(struct cache_entry **src)
{
struct cache_entry *current = src[0];
struct cache_entry *oldtree = src[1], *newtree = src[2];
if (merge_size != 2)
return error("Cannot do a twoway merge of %d trees",
merge_size);
if (current) {
if ((!oldtree && !newtree) || /* 4 and 5 */
(!oldtree && newtree &&
same(current, newtree)) || /* 6 and 7 */
(oldtree && newtree &&
same(oldtree, newtree)) || /* 14 and 15 */
(oldtree && newtree &&
!same(oldtree, newtree) && /* 18 and 19*/
same(current, newtree))) {
return keep_entry(current);
}
else if (oldtree && !newtree && same(current, oldtree)) {
/* 10 or 11 */
return deleted_entry(oldtree, current);
}
else if (oldtree && newtree &&
same(current, oldtree) && !same(current, newtree)) {
/* 20 or 21 */
return merged_entry(newtree, current);
}
else {
/* all other failures */
if (oldtree)
reject_merge(oldtree);
if (current)
reject_merge(current);
if (newtree)
reject_merge(newtree);
return -1;
}
}
else if (newtree)
return merged_entry(newtree, current);
else
return deleted_entry(oldtree, current);
}
/*
* Bind merge.
*
* Keep the index entries at stage0, collapse stage1 but make sure
* stage0 does not have anything there.
*/
static int bind_merge(struct cache_entry **src)
{
struct cache_entry *old = src[0];
struct cache_entry *a = src[1];
if (merge_size != 1)
return error("Cannot do a bind merge of %d trees\n",
merge_size);
if (a && old)
die("Entry '%s' overlaps. Cannot bind.", a->name);
if (!a)
return keep_entry(old);
else
return merged_entry(a, NULL);
}
/*
* One-way merge.
*
* The rule is:
* - take the stat information from stage0, take the data from stage1
*/
static int oneway_merge(struct cache_entry **src)
{
struct cache_entry *old = src[0];
struct cache_entry *a = src[1];
if (merge_size != 1)
return error("Cannot do a oneway merge of %d trees",
merge_size);
if (!a)
return deleted_entry(old, old);
if (old && same(old, a)) {
if (reset) {
struct stat st;
if (lstat(old->name, &st) ||
ce_match_stat(old, &st, 1))
old->ce_flags |= htons(CE_UPDATE);
}
return keep_entry(old);
}
return merged_entry(a, old);
}
static int read_cache_unmerged(void)
{
int i;
struct cache_entry **dst;
struct cache_entry *last = NULL;
read_cache();
dst = active_cache;
for (i = 0; i < active_nr; i++) {
struct cache_entry *ce = active_cache[i];
if (ce_stage(ce)) {
if (last && !strcmp(ce->name, last->name))
continue;
invalidate_ce_path(ce);
last = ce;
ce->ce_mode = 0;
ce->ce_flags &= ~htons(CE_STAGEMASK);
}
*dst++ = ce;
}
active_nr = dst - active_cache;
return !!last;
}
static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
{
struct tree_desc desc;
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
struct name_entry entry;
int cnt;
memcpy(it->sha1, tree->object.sha1, 20);
desc.buf = tree->buffer;
desc.size = tree->size;
cnt = 0;
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
while (tree_entry(&desc, &entry)) {
if (!S_ISDIR(entry.mode))
cnt++;
else {
struct cache_tree_sub *sub;
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
struct tree *subtree = lookup_tree(entry.sha1);
if (!subtree->object.parsed)
parse_tree(subtree);
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
sub = cache_tree_sub(it, entry.path);
sub->cache_tree = cache_tree();
prime_cache_tree_rec(sub->cache_tree, subtree);
cnt += sub->cache_tree->entry_count;
}
}
it->entry_count = cnt;
}
static void prime_cache_tree(void)
{
struct tree *tree = (struct tree *)trees->item;
if (!tree)
return;
active_cache_tree = cache_tree();
prime_cache_tree_rec(active_cache_tree, tree);
}
static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] <sha1> [<sha2> [<sha3>]])";
static struct lock_file lock_file;
int cmd_read_tree(int argc, const char **argv, char **envp)
{
int i, newfd, stage = 0;
unsigned char sha1[20];
merge_fn_t fn = NULL;
df_conflict_list.next = &df_conflict_list;
state.base_dir = "";
state.force = 1;
state.quiet = 1;
state.refresh_cache = 1;
setup_git_directory();
git_config(git_default_config);
newfd = hold_lock_file_for_update(&lock_file, get_index_file());
if (newfd < 0)
die("unable to create new index file");
git_config(git_default_config);
merge = 0;
reset = 0;
for (i = 1; i < argc; i++) {
const char *arg = argv[i];
/* "-u" means "update", meaning that a merge will update
* the working tree.
*/
if (!strcmp(arg, "-u")) {
update = 1;
continue;
}
if (!strcmp(arg, "-v")) {
verbose_update = 1;
continue;
}
/* "-i" means "index only", meaning that a merge will
* not even look at the working tree.
*/
if (!strcmp(arg, "-i")) {
index_only = 1;
continue;
}
/* "--prefix=<subdirectory>/" means keep the current index
* entries and put the entries from the tree under the
* given subdirectory.
*/
if (!strncmp(arg, "--prefix=", 9)) {
if (stage || merge || prefix)
usage(read_tree_usage);
prefix = arg + 9;
merge = 1;
stage = 1;
if (read_cache_unmerged())
die("you need to resolve your current index first");
continue;
}
/* This differs from "-m" in that we'll silently ignore
* unmerged entries and overwrite working tree files that
* correspond to them.
*/
if (!strcmp(arg, "--reset")) {
if (stage || merge || prefix)
usage(read_tree_usage);
reset = 1;
merge = 1;
stage = 1;
read_cache_unmerged();
2005-06-15 19:25:46 +02:00
continue;
}
if (!strcmp(arg, "--trivial")) {
trivial_merges_only = 1;
continue;
}
if (!strcmp(arg, "--aggressive")) {
aggressive = 1;
continue;
}
/* "-m" stands for "merge", meaning we start in stage 1 */
if (!strcmp(arg, "-m")) {
if (stage || merge || prefix)
usage(read_tree_usage);
if (read_cache_unmerged())
die("you need to resolve your current index first");
stage = 1;
merge = 1;
continue;
}
/* using -u and -i at the same time makes no sense */
if (1 < index_only + update)
usage(read_tree_usage);
if (get_sha1(arg, sha1))
die("Not a valid object name %s", arg);
if (list_tree(sha1) < 0)
die("failed to unpack tree object %s", arg);
stage++;
}
if ((update||index_only) && !merge)
usage(read_tree_usage);
if (prefix) {
int pfxlen = strlen(prefix);
int pos;
if (prefix[pfxlen-1] != '/')
die("prefix must end with /");
if (stage != 2)
die("binding merge takes only one tree");
pos = cache_name_pos(prefix, pfxlen);
if (0 <= pos)
die("corrupt index file");
pos = -pos-1;
if (pos < active_nr &&
!strncmp(active_cache[pos]->name, prefix, pfxlen))
die("subdirectory '%s' already exists.", prefix);
pos = cache_name_pos(prefix, pfxlen-1);
if (0 <= pos)
die("file '%.*s' already exists.", pfxlen-1, prefix);
}
if (merge) {
if (stage < 2)
die("just how do you expect me to merge %d trees?", stage-1);
switch (stage - 1) {
case 1:
fn = prefix ? bind_merge : oneway_merge;
break;
case 2:
fn = twoway_merge;
break;
case 3:
default:
fn = threeway_merge;
cache_tree_free(&active_cache_tree);
break;
}
if (stage - 1 >= 3)
head_idx = stage - 2;
else
head_idx = 1;
}
unpack_trees(fn);
/*
* When reading only one tree (either the most basic form,
* "-m ent" or "--reset ent" form), we can obtain a fully
* valid cache-tree because the index must match exactly
* what came from the tree.
*/
if (trees && trees->item && !prefix && (!merge || (stage == 2))) {
cache_tree_free(&active_cache_tree);
prime_cache_tree();
}
if (write_cache(newfd, active_cache, active_nr) ||
close(newfd) || commit_lock_file(&lock_file))
die("unable to write new index file");
return 0;
}