@ -422,6 +422,12 @@ static void verify_uptodate(struct cache_entry *ce)
die("Entry '%s' not uptodate. Cannot merge.", ce->name);
die("Entry '%s' not uptodate. Cannot merge.", ce->name);
}
}
static void invalidate_ce_path(struct cache_entry *ce)
{
if (ce)
cache_tree_invalidate_path(active_cache_tree, ce->name);
}
static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
{
{
merge->ce_flags |= htons(CE_UPDATE);
merge->ce_flags |= htons(CE_UPDATE);
@ -437,6 +443,7 @@ static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
*merge = *old;
*merge = *old;
} else {
} else {
verify_uptodate(old);
verify_uptodate(old);
invalidate_ce_path(old);
}
}
}
}
merge->ce_flags &= ~htons(CE_STAGEMASK);
merge->ce_flags &= ~htons(CE_STAGEMASK);
@ -450,6 +457,7 @@ static int deleted_entry(struct cache_entry *ce, struct cache_entry *old)
verify_uptodate(old);
verify_uptodate(old);
ce->ce_mode = 0;
ce->ce_mode = 0;
add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
invalidate_ce_path(ce);
return 1;
return 1;
}
}
@ -684,8 +692,10 @@ static int oneway_merge(struct cache_entry **src)
return error("Cannot do a oneway merge of %d trees",
return error("Cannot do a oneway merge of %d trees",
merge_size);
merge_size);
if (!a)
if (!a) {
invalidate_ce_path(old);
return 0;
return 0;
}
if (old && same(old, a)) {
if (old && same(old, a)) {
return keep_entry(old);
return keep_entry(old);
}
}
@ -704,6 +714,7 @@ static int read_cache_unmerged(void)
struct cache_entry *ce = active_cache[i];
struct cache_entry *ce = active_cache[i];
if (ce_stage(ce)) {
if (ce_stage(ce)) {
deleted++;
deleted++;
invalidate_ce_path(ce);
continue;
continue;
}
}
if (deleted)
if (deleted)
@ -714,6 +725,39 @@ static int read_cache_unmerged(void)
return deleted;
return deleted;
}
}
static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
{
struct tree_entry_list *ent;
int cnt;
memcpy(it->sha1, tree->object.sha1, 20);
for (cnt = 0, ent = tree->entries; ent; ent = ent->next) {
if (!ent->directory)
cnt++;
else {
struct cache_tree_sub *sub;
struct tree *subtree = (struct tree *)ent->item.tree;
if (!subtree->object.parsed)
parse_tree(subtree);
sub = cache_tree_sub(it, ent->name);
sub->cache_tree = cache_tree();
prime_cache_tree_rec(sub->cache_tree, subtree);
cnt += sub->cache_tree->entry_count;
}
}
it->entry_count = cnt;
}
static void prime_cache_tree(void)
{
struct tree *tree = (struct tree *)trees->item;
if (!tree)
return;
active_cache_tree = cache_tree();
prime_cache_tree_rec(active_cache_tree, tree);
}
static const char read_tree_usage[] = "git-read-tree (<sha> | -m [--aggressive] [-u | -i] <sha1> [<sha2> [<sha3>]])";
static const char read_tree_usage[] = "git-read-tree (<sha> | -m [--aggressive] [-u | -i] <sha1> [<sha2> [<sha3>]])";
static struct cache_file cache_file;
static struct cache_file cache_file;
@ -815,10 +859,9 @@ int main(int argc, char **argv)
fn = twoway_merge;
fn = twoway_merge;
break;
break;
case 3:
case 3:
fn = threeway_merge;
break;
default:
default:
fn = threeway_merge;
fn = threeway_merge;
cache_tree_free(&active_cache_tree);
break;
break;
}
}
@ -829,7 +872,18 @@ int main(int argc, char **argv)
}
}
unpack_trees(fn);
unpack_trees(fn);
/*
* When reading only one tree (either the most basic form,
* "-m ent" or "--reset ent" form), we can obtain a fully
* valid cache-tree because the index must match exactly
* what came from the tree.
*/
if (trees->item && (!merge || (stage == 2))) {
cache_tree_free(&active_cache_tree);
cache_tree_free(&active_cache_tree);
prime_cache_tree();
}
if (write_cache(newfd, active_cache, active_nr) ||
if (write_cache(newfd, active_cache, active_nr) ||
commit_index_file(&cache_file))
commit_index_file(&cache_file))
die("unable to write new index file");
die("unable to write new index file");