Merge branch 'ds/midx-write-fixes'
Fixes multiple crashes around midx write-out codepaths. * ds/midx-write-fixes: midx-write: simplify error cases midx-write: reenable signed comparison errors midx-write: use uint32_t for preferred_pack_idx midx-write: use cleanup when incremental midx fails midx-write: put failing response value back midx-write: only load initialized packsmain
commit
e18e761bef
131
midx-write.c
131
midx-write.c
|
@ -1,5 +1,3 @@
|
|||
#define DISABLE_SIGN_COMPARE_WARNINGS
|
||||
|
||||
#include "git-compat-util.h"
|
||||
#include "abspath.h"
|
||||
#include "config.h"
|
||||
|
@ -24,6 +22,7 @@
|
|||
#define BITMAP_POS_UNKNOWN (~((uint32_t)0))
|
||||
#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
|
||||
#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
|
||||
#define NO_PREFERRED_PACK (~((uint32_t)0))
|
||||
|
||||
extern int midx_checksum_valid(struct multi_pack_index *m);
|
||||
extern void clear_midx_files_ext(struct odb_source *source, const char *ext,
|
||||
|
@ -104,7 +103,7 @@ struct write_midx_context {
|
|||
unsigned large_offsets_needed:1;
|
||||
uint32_t num_large_offsets;
|
||||
|
||||
int preferred_pack_idx;
|
||||
uint32_t preferred_pack_idx;
|
||||
|
||||
int incremental;
|
||||
uint32_t num_multi_pack_indexes_before;
|
||||
|
@ -261,7 +260,7 @@ static void midx_fanout_sort(struct midx_fanout *fanout)
|
|||
static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
|
||||
struct multi_pack_index *m,
|
||||
uint32_t cur_fanout,
|
||||
int preferred_pack)
|
||||
uint32_t preferred_pack)
|
||||
{
|
||||
uint32_t start = m->num_objects_in_base, end;
|
||||
uint32_t cur_object;
|
||||
|
@ -275,7 +274,7 @@ static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
|
|||
end = m->num_objects_in_base + ntohl(m->chunk_oid_fanout[cur_fanout]);
|
||||
|
||||
for (cur_object = start; cur_object < end; cur_object++) {
|
||||
if ((preferred_pack > -1) &&
|
||||
if ((preferred_pack != NO_PREFERRED_PACK) &&
|
||||
(preferred_pack == nth_midxed_pack_int_id(m, cur_object))) {
|
||||
/*
|
||||
* Objects from preferred packs are added
|
||||
|
@ -365,7 +364,8 @@ static void compute_sorted_entries(struct write_midx_context *ctx,
|
|||
preferred, cur_fanout);
|
||||
}
|
||||
|
||||
if (-1 < ctx->preferred_pack_idx && ctx->preferred_pack_idx < start_pack)
|
||||
if (ctx->preferred_pack_idx != NO_PREFERRED_PACK &&
|
||||
ctx->preferred_pack_idx < start_pack)
|
||||
midx_fanout_add_pack_fanout(&fanout, ctx->info,
|
||||
ctx->preferred_pack_idx, 1,
|
||||
cur_fanout);
|
||||
|
@ -841,7 +841,7 @@ static int write_midx_bitmap(struct write_midx_context *ctx,
|
|||
uint32_t commits_nr,
|
||||
unsigned flags)
|
||||
{
|
||||
int ret, i;
|
||||
int ret;
|
||||
uint16_t options = 0;
|
||||
struct bitmap_writer writer;
|
||||
struct pack_idx_entry **index;
|
||||
|
@ -868,7 +868,7 @@ static int write_midx_bitmap(struct write_midx_context *ctx,
|
|||
* this order).
|
||||
*/
|
||||
ALLOC_ARRAY(index, pdata->nr_objects);
|
||||
for (i = 0; i < pdata->nr_objects; i++)
|
||||
for (uint32_t i = 0; i < pdata->nr_objects; i++)
|
||||
index[i] = &pdata->objects[i].idx;
|
||||
|
||||
bitmap_writer_init(&writer, ctx->repo, pdata,
|
||||
|
@ -889,7 +889,7 @@ static int write_midx_bitmap(struct write_midx_context *ctx,
|
|||
* happens between bitmap_writer_build_type_index() and
|
||||
* bitmap_writer_finish().
|
||||
*/
|
||||
for (i = 0; i < pdata->nr_objects; i++)
|
||||
for (uint32_t i = 0; i < pdata->nr_objects; i++)
|
||||
index[ctx->pack_order[i]] = &pdata->objects[i].idx;
|
||||
|
||||
bitmap_writer_select_commits(&writer, commits, commits_nr);
|
||||
|
@ -910,8 +910,7 @@ cleanup:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int fill_packs_from_midx(struct write_midx_context *ctx,
|
||||
const char *preferred_pack_name, uint32_t flags)
|
||||
static int fill_packs_from_midx(struct write_midx_context *ctx)
|
||||
{
|
||||
struct multi_pack_index *m;
|
||||
|
||||
|
@ -919,29 +918,10 @@ static int fill_packs_from_midx(struct write_midx_context *ctx,
|
|||
uint32_t i;
|
||||
|
||||
for (i = 0; i < m->num_packs; i++) {
|
||||
if (prepare_midx_pack(m, m->num_packs_in_base + i))
|
||||
return error(_("could not load pack"));
|
||||
|
||||
ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
|
||||
|
||||
/*
|
||||
* If generating a reverse index, need to have
|
||||
* packed_git's loaded to compare their
|
||||
* mtimes and object count.
|
||||
*
|
||||
* If a preferred pack is specified, need to
|
||||
* have packed_git's loaded to ensure the chosen
|
||||
* preferred pack has a non-zero object count.
|
||||
*/
|
||||
if (flags & MIDX_WRITE_REV_INDEX ||
|
||||
preferred_pack_name) {
|
||||
if (prepare_midx_pack(m, m->num_packs_in_base + i)) {
|
||||
error(_("could not load pack"));
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (open_pack_index(m->packs[i]))
|
||||
die(_("could not open index for %s"),
|
||||
m->packs[i]->pack_name);
|
||||
}
|
||||
|
||||
fill_pack_info(&ctx->info[ctx->nr++], m->packs[i],
|
||||
m->pack_names[i],
|
||||
m->num_packs_in_base + i);
|
||||
|
@ -1045,15 +1025,17 @@ static int write_midx_internal(struct odb_source *source,
|
|||
struct repository *r = source->odb->repo;
|
||||
struct strbuf midx_name = STRBUF_INIT;
|
||||
unsigned char midx_hash[GIT_MAX_RAWSZ];
|
||||
uint32_t i, start_pack;
|
||||
uint32_t start_pack;
|
||||
struct hashfile *f = NULL;
|
||||
struct lock_file lk;
|
||||
struct tempfile *incr;
|
||||
struct write_midx_context ctx = { 0 };
|
||||
struct write_midx_context ctx = {
|
||||
.preferred_pack_idx = NO_PREFERRED_PACK,
|
||||
};
|
||||
int bitmapped_packs_concat_len = 0;
|
||||
int pack_name_concat_len = 0;
|
||||
int dropped_packs = 0;
|
||||
int result = 0;
|
||||
int result = -1;
|
||||
const char **keep_hashes = NULL;
|
||||
struct chunkfile *cf;
|
||||
|
||||
|
@ -1107,14 +1089,12 @@ static int write_midx_internal(struct odb_source *source,
|
|||
error(_("could not load reverse index for MIDX %s"),
|
||||
hash_to_hex_algop(get_midx_checksum(m),
|
||||
m->source->odb->repo->hash_algo));
|
||||
result = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
ctx.num_multi_pack_indexes_before++;
|
||||
m = m->base_midx;
|
||||
}
|
||||
} else if (ctx.m && fill_packs_from_midx(&ctx, preferred_pack_name,
|
||||
flags) < 0) {
|
||||
} else if (ctx.m && fill_packs_from_midx(&ctx)) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
@ -1150,17 +1130,20 @@ static int write_midx_internal(struct odb_source *source,
|
|||
*/
|
||||
if (!want_bitmap)
|
||||
clear_midx_files_ext(source, "bitmap", NULL);
|
||||
result = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx.incremental && !ctx.nr)
|
||||
if (ctx.incremental && !ctx.nr) {
|
||||
result = 0;
|
||||
goto cleanup; /* nothing to do */
|
||||
}
|
||||
|
||||
if (preferred_pack_name) {
|
||||
ctx.preferred_pack_idx = -1;
|
||||
ctx.preferred_pack_idx = NO_PREFERRED_PACK;
|
||||
|
||||
for (i = 0; i < ctx.nr; i++) {
|
||||
for (size_t i = 0; i < ctx.nr; i++) {
|
||||
if (!cmp_idx_or_pack_name(preferred_pack_name,
|
||||
ctx.info[i].pack_name)) {
|
||||
ctx.preferred_pack_idx = i;
|
||||
|
@ -1168,14 +1151,21 @@ static int write_midx_internal(struct odb_source *source,
|
|||
}
|
||||
}
|
||||
|
||||
if (ctx.preferred_pack_idx == -1)
|
||||
if (ctx.preferred_pack_idx == NO_PREFERRED_PACK)
|
||||
warning(_("unknown preferred pack: '%s'"),
|
||||
preferred_pack_name);
|
||||
} else if (ctx.nr &&
|
||||
(flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) {
|
||||
struct packed_git *oldest = ctx.info[ctx.preferred_pack_idx].p;
|
||||
struct packed_git *oldest = ctx.info[0].p;
|
||||
ctx.preferred_pack_idx = 0;
|
||||
|
||||
/*
|
||||
* Attempt opening the pack index to populate num_objects.
|
||||
* Ignore failiures as they can be expected and are not
|
||||
* fatal during this selection time.
|
||||
*/
|
||||
open_pack_index(oldest);
|
||||
|
||||
if (packs_to_drop && packs_to_drop->nr)
|
||||
BUG("cannot write a MIDX bitmap during expiration");
|
||||
|
||||
|
@ -1185,11 +1175,12 @@ static int write_midx_internal(struct odb_source *source,
|
|||
* pack-order has all of its objects selected from that pack
|
||||
* (and not another pack containing a duplicate)
|
||||
*/
|
||||
for (i = 1; i < ctx.nr; i++) {
|
||||
for (size_t i = 1; i < ctx.nr; i++) {
|
||||
struct packed_git *p = ctx.info[i].p;
|
||||
|
||||
if (!oldest->num_objects || p->mtime < oldest->mtime) {
|
||||
oldest = p;
|
||||
open_pack_index(oldest);
|
||||
ctx.preferred_pack_idx = i;
|
||||
}
|
||||
}
|
||||
|
@ -1201,22 +1192,26 @@ static int write_midx_internal(struct odb_source *source,
|
|||
* objects to resolve, so the preferred value doesn't
|
||||
* matter.
|
||||
*/
|
||||
ctx.preferred_pack_idx = -1;
|
||||
ctx.preferred_pack_idx = NO_PREFERRED_PACK;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* otherwise don't mark any pack as preferred to avoid
|
||||
* interfering with expiration logic below
|
||||
*/
|
||||
ctx.preferred_pack_idx = -1;
|
||||
ctx.preferred_pack_idx = NO_PREFERRED_PACK;
|
||||
}
|
||||
|
||||
if (ctx.preferred_pack_idx > -1) {
|
||||
if (ctx.preferred_pack_idx != NO_PREFERRED_PACK) {
|
||||
struct packed_git *preferred = ctx.info[ctx.preferred_pack_idx].p;
|
||||
|
||||
if (open_pack_index(preferred))
|
||||
die(_("failed to open preferred pack %s"),
|
||||
ctx.info[ctx.preferred_pack_idx].pack_name);
|
||||
|
||||
if (!preferred->num_objects) {
|
||||
error(_("cannot select preferred pack %s with no objects"),
|
||||
preferred->pack_name);
|
||||
result = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
@ -1224,7 +1219,7 @@ static int write_midx_internal(struct odb_source *source,
|
|||
compute_sorted_entries(&ctx, start_pack);
|
||||
|
||||
ctx.large_offsets_needed = 0;
|
||||
for (i = 0; i < ctx.entries_nr; i++) {
|
||||
for (size_t i = 0; i < ctx.entries_nr; i++) {
|
||||
if (ctx.entries[i].offset > 0x7fffffff)
|
||||
ctx.num_large_offsets++;
|
||||
if (ctx.entries[i].offset > 0xffffffff)
|
||||
|
@ -1234,10 +1229,10 @@ static int write_midx_internal(struct odb_source *source,
|
|||
QSORT(ctx.info, ctx.nr, pack_info_compare);
|
||||
|
||||
if (packs_to_drop && packs_to_drop->nr) {
|
||||
int drop_index = 0;
|
||||
size_t drop_index = 0;
|
||||
int missing_drops = 0;
|
||||
|
||||
for (i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
|
||||
for (size_t i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
|
||||
int cmp = strcmp(ctx.info[i].pack_name,
|
||||
packs_to_drop->items[drop_index].string);
|
||||
|
||||
|
@ -1255,11 +1250,9 @@ static int write_midx_internal(struct odb_source *source,
|
|||
}
|
||||
}
|
||||
|
||||
if (missing_drops) {
|
||||
result = 1;
|
||||
if (missing_drops)
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* pack_perm stores a permutation between pack-int-ids from the
|
||||
|
@ -1268,7 +1261,7 @@ static int write_midx_internal(struct odb_source *source,
|
|||
* pack_perm[old_id] = new_id
|
||||
*/
|
||||
ALLOC_ARRAY(ctx.pack_perm, ctx.nr);
|
||||
for (i = 0; i < ctx.nr; i++) {
|
||||
for (size_t i = 0; i < ctx.nr; i++) {
|
||||
if (ctx.info[i].expired) {
|
||||
dropped_packs++;
|
||||
ctx.pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
|
||||
|
@ -1277,7 +1270,7 @@ static int write_midx_internal(struct odb_source *source,
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ctx.nr; i++) {
|
||||
for (size_t i = 0; i < ctx.nr; i++) {
|
||||
if (ctx.info[i].expired)
|
||||
continue;
|
||||
pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
|
||||
|
@ -1304,7 +1297,6 @@ static int write_midx_internal(struct odb_source *source,
|
|||
|
||||
if (ctx.nr - dropped_packs == 0) {
|
||||
error(_("no pack files to index."));
|
||||
result = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
@ -1324,13 +1316,13 @@ static int write_midx_internal(struct odb_source *source,
|
|||
incr = mks_tempfile_m(midx_name.buf, 0444);
|
||||
if (!incr) {
|
||||
error(_("unable to create temporary MIDX layer"));
|
||||
return -1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (adjust_shared_perm(r, get_tempfile_path(incr))) {
|
||||
error(_("unable to adjust shared permissions for '%s'"),
|
||||
get_tempfile_path(incr));
|
||||
return -1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
f = hashfd(r->hash_algo, get_tempfile_fd(incr),
|
||||
|
@ -1407,7 +1399,6 @@ static int write_midx_internal(struct odb_source *source,
|
|||
midx_hash, &pdata, commits, commits_nr,
|
||||
flags) < 0) {
|
||||
error(_("could not write multi-pack bitmap"));
|
||||
result = 1;
|
||||
clear_packing_data(&pdata);
|
||||
free(commits);
|
||||
goto cleanup;
|
||||
|
@ -1421,6 +1412,9 @@ static int write_midx_internal(struct odb_source *source,
|
|||
* have been freed in the previous if block.
|
||||
*/
|
||||
|
||||
if (ctx.num_multi_pack_indexes_before == UINT32_MAX)
|
||||
die(_("too many multi-pack-indexes"));
|
||||
|
||||
CALLOC_ARRAY(keep_hashes, ctx.num_multi_pack_indexes_before + 1);
|
||||
|
||||
if (ctx.incremental) {
|
||||
|
@ -1430,18 +1424,18 @@ static int write_midx_internal(struct odb_source *source,
|
|||
|
||||
if (!chainf) {
|
||||
error_errno(_("unable to open multi-pack-index chain file"));
|
||||
return -1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (link_midx_to_chain(ctx.base_midx) < 0)
|
||||
return -1;
|
||||
goto cleanup;
|
||||
|
||||
get_split_midx_filename_ext(source, &final_midx_name,
|
||||
midx_hash, MIDX_EXT_MIDX);
|
||||
|
||||
if (rename_tempfile(&incr, final_midx_name.buf) < 0) {
|
||||
error_errno(_("unable to rename new multi-pack-index layer"));
|
||||
return -1;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
strbuf_release(&final_midx_name);
|
||||
|
@ -1449,7 +1443,7 @@ static int write_midx_internal(struct odb_source *source,
|
|||
keep_hashes[ctx.num_multi_pack_indexes_before] =
|
||||
xstrdup(hash_to_hex_algop(midx_hash, r->hash_algo));
|
||||
|
||||
for (i = 0; i < ctx.num_multi_pack_indexes_before; i++) {
|
||||
for (uint32_t i = 0; i < ctx.num_multi_pack_indexes_before; i++) {
|
||||
uint32_t j = ctx.num_multi_pack_indexes_before - i - 1;
|
||||
|
||||
keep_hashes[j] = xstrdup(hash_to_hex_algop(get_midx_checksum(m),
|
||||
|
@ -1457,7 +1451,7 @@ static int write_midx_internal(struct odb_source *source,
|
|||
m = m->base_midx;
|
||||
}
|
||||
|
||||
for (i = 0; i < ctx.num_multi_pack_indexes_before + 1; i++)
|
||||
for (uint32_t i = 0; i <= ctx.num_multi_pack_indexes_before; i++)
|
||||
fprintf(get_lock_file_fp(&lk), "%s\n", keep_hashes[i]);
|
||||
} else {
|
||||
keep_hashes[ctx.num_multi_pack_indexes_before] =
|
||||
|
@ -1473,9 +1467,10 @@ static int write_midx_internal(struct odb_source *source,
|
|||
clear_midx_files(source, keep_hashes,
|
||||
ctx.num_multi_pack_indexes_before + 1,
|
||||
ctx.incremental);
|
||||
result = 0;
|
||||
|
||||
cleanup:
|
||||
for (i = 0; i < ctx.nr; i++) {
|
||||
for (size_t i = 0; i < ctx.nr; i++) {
|
||||
if (ctx.info[i].p) {
|
||||
close_pack(ctx.info[i].p);
|
||||
free(ctx.info[i].p);
|
||||
|
@ -1488,7 +1483,7 @@ cleanup:
|
|||
free(ctx.pack_perm);
|
||||
free(ctx.pack_order);
|
||||
if (keep_hashes) {
|
||||
for (i = 0; i < ctx.num_multi_pack_indexes_before + 1; i++)
|
||||
for (uint32_t i = 0; i <= ctx.num_multi_pack_indexes_before; i++)
|
||||
free((char *)keep_hashes[i]);
|
||||
free(keep_hashes);
|
||||
}
|
||||
|
|
|
@ -989,6 +989,23 @@ test_expect_success 'repack --batch-size=0 repacks everything' '
|
|||
)
|
||||
'
|
||||
|
||||
test_expect_success EXPENSIVE 'repack/expire with many packs' '
|
||||
cp -r dup many &&
|
||||
(
|
||||
cd many &&
|
||||
|
||||
for i in $(test_seq 1 100)
|
||||
do
|
||||
test_commit extra$i &&
|
||||
git maintenance run --task=loose-objects || return 1
|
||||
done &&
|
||||
|
||||
git multi-pack-index write &&
|
||||
git multi-pack-index repack &&
|
||||
git multi-pack-index expire
|
||||
)
|
||||
'
|
||||
|
||||
test_expect_success 'repack --batch-size=<large> repacks everything' '
|
||||
(
|
||||
cd dup2 &&
|
||||
|
@ -1083,7 +1100,10 @@ test_expect_success 'load reverse index when missing .idx, .pack' '
|
|||
mv $idx.bak $idx &&
|
||||
|
||||
mv $pack $pack.bak &&
|
||||
git cat-file --batch-check="%(objectsize:disk)" <tip
|
||||
git cat-file --batch-check="%(objectsize:disk)" <tip &&
|
||||
|
||||
test_must_fail git multi-pack-index write 2>err &&
|
||||
test_grep "could not load pack" err
|
||||
)
|
||||
'
|
||||
|
||||
|
|
Loading…
Reference in New Issue