Merge branch 'jk/delta-chain-limit'
"git repack --depth=<n>" for a long time busted the specified depth when reusing delta from existing packs. This has been corrected. * jk/delta-chain-limit: pack-objects: convert recursion to iteration in break_delta_chain() pack-objects: enforce --depth limit in reused deltas
This commit is contained in:
commit
538569bc8a
@ -1539,6 +1539,8 @@ static int pack_offset_sort(const void *_a, const void *_b)
|
||||
* 2. Updating our size/type to the non-delta representation. These were
|
||||
* either not recorded initially (size) or overwritten with the delta type
|
||||
* (type) when check_object() decided to reuse the delta.
|
||||
*
|
||||
* 3. Resetting our delta depth, as we are now a base object.
|
||||
*/
|
||||
static void drop_reused_delta(struct object_entry *entry)
|
||||
{
|
||||
@ -1552,6 +1554,7 @@ static void drop_reused_delta(struct object_entry *entry)
|
||||
p = &(*p)->delta_sibling;
|
||||
}
|
||||
entry->delta = NULL;
|
||||
entry->depth = 0;
|
||||
|
||||
oi.sizep = &entry->size;
|
||||
oi.typep = &entry->type;
|
||||
@ -1570,39 +1573,123 @@ static void drop_reused_delta(struct object_entry *entry)
|
||||
* Follow the chain of deltas from this entry onward, throwing away any links
|
||||
* that cause us to hit a cycle (as determined by the DFS state flags in
|
||||
* the entries).
|
||||
*
|
||||
* We also detect too-long reused chains that would violate our --depth
|
||||
* limit.
|
||||
*/
|
||||
static void break_delta_chains(struct object_entry *entry)
|
||||
{
|
||||
/* If it's not a delta, it can't be part of a cycle. */
|
||||
if (!entry->delta) {
|
||||
entry->dfs_state = DFS_DONE;
|
||||
return;
|
||||
/*
|
||||
* The actual depth of each object we will write is stored as an int,
|
||||
* as it cannot exceed our int "depth" limit. But before we break
|
||||
* changes based no that limit, we may potentially go as deep as the
|
||||
* number of objects, which is elsewhere bounded to a uint32_t.
|
||||
*/
|
||||
uint32_t total_depth;
|
||||
struct object_entry *cur, *next;
|
||||
|
||||
for (cur = entry, total_depth = 0;
|
||||
cur;
|
||||
cur = cur->delta, total_depth++) {
|
||||
if (cur->dfs_state == DFS_DONE) {
|
||||
/*
|
||||
* We've already seen this object and know it isn't
|
||||
* part of a cycle. We do need to append its depth
|
||||
* to our count.
|
||||
*/
|
||||
total_depth += cur->depth;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* We break cycles before looping, so an ACTIVE state (or any
|
||||
* other cruft which made its way into the state variable)
|
||||
* is a bug.
|
||||
*/
|
||||
if (cur->dfs_state != DFS_NONE)
|
||||
die("BUG: confusing delta dfs state in first pass: %d",
|
||||
cur->dfs_state);
|
||||
|
||||
/*
|
||||
* Now we know this is the first time we've seen the object. If
|
||||
* it's not a delta, we're done traversing, but we'll mark it
|
||||
* done to save time on future traversals.
|
||||
*/
|
||||
if (!cur->delta) {
|
||||
cur->dfs_state = DFS_DONE;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark ourselves as active and see if the next step causes
|
||||
* us to cycle to another active object. It's important to do
|
||||
* this _before_ we loop, because it impacts where we make the
|
||||
* cut, and thus how our total_depth counter works.
|
||||
* E.g., We may see a partial loop like:
|
||||
*
|
||||
* A -> B -> C -> D -> B
|
||||
*
|
||||
* Cutting B->C breaks the cycle. But now the depth of A is
|
||||
* only 1, and our total_depth counter is at 3. The size of the
|
||||
* error is always one less than the size of the cycle we
|
||||
* broke. Commits C and D were "lost" from A's chain.
|
||||
*
|
||||
* If we instead cut D->B, then the depth of A is correct at 3.
|
||||
* We keep all commits in the chain that we examined.
|
||||
*/
|
||||
cur->dfs_state = DFS_ACTIVE;
|
||||
if (cur->delta->dfs_state == DFS_ACTIVE) {
|
||||
drop_reused_delta(cur);
|
||||
cur->dfs_state = DFS_DONE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
switch (entry->dfs_state) {
|
||||
case DFS_NONE:
|
||||
/*
|
||||
* This is the first time we've seen the object. We mark it as
|
||||
* part of the active potential cycle and recurse.
|
||||
*/
|
||||
entry->dfs_state = DFS_ACTIVE;
|
||||
break_delta_chains(entry->delta);
|
||||
entry->dfs_state = DFS_DONE;
|
||||
break;
|
||||
/*
|
||||
* And now that we've gone all the way to the bottom of the chain, we
|
||||
* need to clear the active flags and set the depth fields as
|
||||
* appropriate. Unlike the loop above, which can quit when it drops a
|
||||
* delta, we need to keep going to look for more depth cuts. So we need
|
||||
* an extra "next" pointer to keep going after we reset cur->delta.
|
||||
*/
|
||||
for (cur = entry; cur; cur = next) {
|
||||
next = cur->delta;
|
||||
|
||||
case DFS_DONE:
|
||||
/* object already examined, and not part of a cycle */
|
||||
break;
|
||||
|
||||
case DFS_ACTIVE:
|
||||
/*
|
||||
* We found a cycle that needs broken. It would be correct to
|
||||
* break any link in the chain, but it's convenient to
|
||||
* break this one.
|
||||
* We should have a chain of zero or more ACTIVE states down to
|
||||
* a final DONE. We can quit after the DONE, because either it
|
||||
* has no bases, or we've already handled them in a previous
|
||||
* call.
|
||||
*/
|
||||
drop_reused_delta(entry);
|
||||
entry->dfs_state = DFS_DONE;
|
||||
break;
|
||||
if (cur->dfs_state == DFS_DONE)
|
||||
break;
|
||||
else if (cur->dfs_state != DFS_ACTIVE)
|
||||
die("BUG: confusing delta dfs state in second pass: %d",
|
||||
cur->dfs_state);
|
||||
|
||||
/*
|
||||
* If the total_depth is more than depth, then we need to snip
|
||||
* the chain into two or more smaller chains that don't exceed
|
||||
* the maximum depth. Most of the resulting chains will contain
|
||||
* (depth + 1) entries (i.e., depth deltas plus one base), and
|
||||
* the last chain (i.e., the one containing entry) will contain
|
||||
* whatever entries are left over, namely
|
||||
* (total_depth % (depth + 1)) of them.
|
||||
*
|
||||
* Since we are iterating towards decreasing depth, we need to
|
||||
* decrement total_depth as we go, and we need to write to the
|
||||
* entry what its final depth will be after all of the
|
||||
* snipping. Since we're snipping into chains of length (depth
|
||||
* + 1) entries, the final depth of an entry will be its
|
||||
* original depth modulo (depth + 1). Any time we encounter an
|
||||
* entry whose final depth is supposed to be zero, we snip it
|
||||
* from its delta base, thereby making it so.
|
||||
*/
|
||||
cur->depth = (total_depth--) % (depth + 1);
|
||||
if (!cur->depth)
|
||||
drop_reused_delta(cur);
|
||||
|
||||
cur->dfs_state = DFS_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,12 +30,16 @@ struct object_entry {
|
||||
|
||||
/*
|
||||
* State flags for depth-first search used for analyzing delta cycles.
|
||||
*
|
||||
* The depth is measured in delta-links to the base (so if A is a delta
|
||||
* against B, then A has a depth of 1, and B a depth of 0).
|
||||
*/
|
||||
enum {
|
||||
DFS_NONE = 0,
|
||||
DFS_ACTIVE,
|
||||
DFS_DONE
|
||||
} dfs_state;
|
||||
int depth;
|
||||
};
|
||||
|
||||
struct packing_data {
|
||||
|
93
t/t5316-pack-delta-depth.sh
Executable file
93
t/t5316-pack-delta-depth.sh
Executable file
@ -0,0 +1,93 @@
|
||||
#!/bin/sh
|
||||
|
||||
test_description='pack-objects breaks long cross-pack delta chains'
|
||||
. ./test-lib.sh
|
||||
|
||||
# This mirrors a repeated push setup:
|
||||
#
|
||||
# 1. A client repeatedly modifies some files, makes a
|
||||
# commit, and pushes the result. It does this N times
|
||||
# before we get around to repacking.
|
||||
#
|
||||
# 2. Each push generates a thin pack with the new version of
|
||||
# various objects. Let's consider some file in the root tree
|
||||
# which is updated in each commit.
|
||||
#
|
||||
# When generating push number X, we feed commit X-1 (and
|
||||
# thus blob X-1) as a preferred base. The resulting pack has
|
||||
# blob X as a thin delta against blob X-1.
|
||||
#
|
||||
# On the receiving end, "index-pack --fix-thin" will
|
||||
# complete the pack with a base copy of blob X-1.
|
||||
#
|
||||
# 3. In older versions of git, if we used the delta from
|
||||
# pack X, then we'd always find blob X-1 as a base in the
|
||||
# same pack (and generate a fresh delta).
|
||||
#
|
||||
# But with the pack mru, we jump from delta to delta
|
||||
# following the traversal order:
|
||||
#
|
||||
# a. We grab blob X from pack X as a delta, putting it at
|
||||
# the tip of our mru list.
|
||||
#
|
||||
# b. Eventually we move onto commit X-1. We need other
|
||||
# objects which are only in pack X-1 (in the test code
|
||||
# below, it's the containing tree). That puts pack X-1
|
||||
# at the tip of our mru list.
|
||||
#
|
||||
# c. Eventually we look for blob X-1, and we find the
|
||||
# version in pack X-1 (because it's the mru tip).
|
||||
#
|
||||
# Now we have blob X as a delta against X-1, which is a delta
|
||||
# against X-2, and so forth.
|
||||
#
|
||||
# In the real world, these small pushes would get exploded by
|
||||
# unpack-objects rather than "index-pack --fix-thin", but the
|
||||
# same principle applies to larger pushes (they only need one
|
||||
# repeatedly-modified file to generate the delta chain).
|
||||
|
||||
test_expect_success 'create series of packs' '
|
||||
test-genrandom foo 4096 >content &&
|
||||
prev= &&
|
||||
for i in $(test_seq 1 10)
|
||||
do
|
||||
cat content >file &&
|
||||
echo $i >>file &&
|
||||
git add file &&
|
||||
git commit -m $i &&
|
||||
cur=$(git rev-parse HEAD^{tree}) &&
|
||||
{
|
||||
test -n "$prev" && echo "-$prev"
|
||||
echo $cur
|
||||
echo "$(git rev-parse :file) file"
|
||||
} | git pack-objects --stdout >tmp &&
|
||||
git index-pack --stdin --fix-thin <tmp || return 1
|
||||
prev=$cur
|
||||
done
|
||||
'
|
||||
|
||||
max_chain() {
|
||||
git index-pack --verify-stat-only "$1" >output &&
|
||||
perl -lne '
|
||||
/chain length = (\d+)/ and $len = $1;
|
||||
END { print $len }
|
||||
' output
|
||||
}
|
||||
|
||||
# Note that this whole setup is pretty reliant on the current
|
||||
# packing heuristics. We double-check that our test case
|
||||
# actually produces a long chain. If it doesn't, it should be
|
||||
# adjusted (or scrapped if the heuristics have become too unreliable)
|
||||
test_expect_success 'packing produces a long delta' '
|
||||
# Use --window=0 to make sure we are seeing reused deltas,
|
||||
# not computing a new long chain.
|
||||
pack=$(git pack-objects --all --window=0 </dev/null pack) &&
|
||||
test 9 = "$(max_chain pack-$pack.pack)"
|
||||
'
|
||||
|
||||
test_expect_success '--depth limits depth' '
|
||||
pack=$(git pack-objects --all --depth=5 </dev/null pack) &&
|
||||
test 5 = "$(max_chain pack-$pack.pack)"
|
||||
'
|
||||
|
||||
test_done
|
Loading…
Reference in New Issue
Block a user