Merge branch 'ps/packfile-store'
Code clean-up around the in-core list of all the pack files and object database(s). * ps/packfile-store: packfile: refactor `get_packed_git_mru()` to work on packfile store packfile: refactor `get_all_packs()` to work on packfile store packfile: refactor `get_packed_git()` to work on packfile store packfile: move `get_multi_pack_index()` into "midx.c" packfile: introduce function to load and add packfiles packfile: refactor `install_packed_git()` to work on packfile store packfile: split up responsibilities of `reprepare_packed_git()` packfile: refactor `prepare_packed_git()` to work on packfile store packfile: reorder functions to avoid function declaration odb: move kept cache into `struct packfile_store` odb: move MRU list of packfiles into `struct packfile_store` odb: move packfile map into `struct packfile_store` odb: move initialization bit into `struct packfile_store` odb: move list of packfiles into `struct packfile_store` packfile: introduce a new `struct packfile_store`main
commit
8c13c31404
|
@ -53,7 +53,7 @@ static void download_batch(struct backfill_context *ctx)
|
|||
* We likely have a new packfile. Add it to the packed list to
|
||||
* avoid possible duplicate downloads of the same objects.
|
||||
*/
|
||||
reprepare_packed_git(ctx->repo);
|
||||
odb_reprepare(ctx->repo->objects);
|
||||
}
|
||||
|
||||
static int fill_missing_blobs(const char *path UNUSED,
|
||||
|
|
|
@ -852,9 +852,10 @@ static void batch_each_object(struct batch_options *opt,
|
|||
|
||||
if (bitmap && !for_each_bitmapped_object(bitmap, &opt->objects_filter,
|
||||
batch_one_object_bitmapped, &payload)) {
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *pack;
|
||||
|
||||
for (pack = get_all_packs(the_repository); pack; pack = pack->next) {
|
||||
for (pack = packfile_store_get_all_packs(packs); pack; pack = pack->next) {
|
||||
if (bitmap_index_contains_pack(bitmap, pack) ||
|
||||
open_pack_index(pack))
|
||||
continue;
|
||||
|
|
|
@ -122,6 +122,7 @@ int cmd_count_objects(int argc,
|
|||
count_loose, count_cruft, NULL, NULL);
|
||||
|
||||
if (verbose) {
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
unsigned long num_pack = 0;
|
||||
off_t size_pack = 0;
|
||||
|
@ -129,7 +130,7 @@ int cmd_count_objects(int argc,
|
|||
struct strbuf pack_buf = STRBUF_INIT;
|
||||
struct strbuf garbage_buf = STRBUF_INIT;
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (!p->pack_local)
|
||||
continue;
|
||||
if (open_pack_index(p))
|
||||
|
|
|
@ -899,11 +899,11 @@ static void end_packfile(void)
|
|||
idx_name = keep_pack(create_index());
|
||||
|
||||
/* Register the packfile with core git's machinery. */
|
||||
new_p = add_packed_git(pack_data->repo, idx_name, strlen(idx_name), 1);
|
||||
new_p = packfile_store_load_pack(pack_data->repo->objects->packfiles,
|
||||
idx_name, 1);
|
||||
if (!new_p)
|
||||
die("core git rejected index %s", idx_name);
|
||||
all_packs[pack_id] = new_p;
|
||||
install_packed_git(the_repository, new_p);
|
||||
free(idx_name);
|
||||
|
||||
/* Print the boundary */
|
||||
|
@ -954,6 +954,7 @@ static int store_object(
|
|||
struct object_id *oidout,
|
||||
uintmax_t mark)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
void *out, *delta;
|
||||
struct object_entry *e;
|
||||
unsigned char hdr[96];
|
||||
|
@ -977,7 +978,7 @@ static int store_object(
|
|||
if (e->idx.offset) {
|
||||
duplicate_count_by_type[type]++;
|
||||
return 1;
|
||||
} else if (find_oid_pack(&oid, get_all_packs(the_repository))) {
|
||||
} else if (find_oid_pack(&oid, packfile_store_get_all_packs(packs))) {
|
||||
e->type = type;
|
||||
e->pack_id = MAX_PACK_ID;
|
||||
e->idx.offset = 1; /* just not zero! */
|
||||
|
@ -1094,6 +1095,7 @@ static void truncate_pack(struct hashfile_checkpoint *checkpoint)
|
|||
|
||||
static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
size_t in_sz = 64 * 1024, out_sz = 64 * 1024;
|
||||
unsigned char *in_buf = xmalloc(in_sz);
|
||||
unsigned char *out_buf = xmalloc(out_sz);
|
||||
|
@ -1177,7 +1179,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
|
|||
duplicate_count_by_type[OBJ_BLOB]++;
|
||||
truncate_pack(&checkpoint);
|
||||
|
||||
} else if (find_oid_pack(&oid, get_all_packs(the_repository))) {
|
||||
} else if (find_oid_pack(&oid, packfile_store_get_all_packs(packs))) {
|
||||
e->type = OBJ_BLOB;
|
||||
e->pack_id = MAX_PACK_ID;
|
||||
e->idx.offset = 1; /* just not zero! */
|
||||
|
|
|
@ -867,19 +867,20 @@ static int mark_packed_for_connectivity(const struct object_id *oid,
|
|||
|
||||
static int check_pack_rev_indexes(struct repository *r, int show_progress)
|
||||
{
|
||||
struct packfile_store *packs = r->objects->packfiles;
|
||||
struct progress *progress = NULL;
|
||||
uint32_t pack_count = 0;
|
||||
int res = 0;
|
||||
|
||||
if (show_progress) {
|
||||
for (struct packed_git *p = get_all_packs(r); p; p = p->next)
|
||||
for (struct packed_git *p = packfile_store_get_all_packs(packs); p; p = p->next)
|
||||
pack_count++;
|
||||
progress = start_delayed_progress(the_repository,
|
||||
"Verifying reverse pack-indexes", pack_count);
|
||||
pack_count = 0;
|
||||
}
|
||||
|
||||
for (struct packed_git *p = get_all_packs(r); p; p = p->next) {
|
||||
for (struct packed_git *p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
int load_error = load_pack_revindex_from_disk(p);
|
||||
|
||||
if (load_error < 0) {
|
||||
|
@ -999,6 +1000,8 @@ int cmd_fsck(int argc,
|
|||
for_each_packed_object(the_repository,
|
||||
mark_packed_for_connectivity, NULL, 0);
|
||||
} else {
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
|
||||
odb_prepare_alternates(the_repository->objects);
|
||||
for (source = the_repository->objects->sources; source; source = source->next)
|
||||
fsck_source(source);
|
||||
|
@ -1009,7 +1012,7 @@ int cmd_fsck(int argc,
|
|||
struct progress *progress = NULL;
|
||||
|
||||
if (show_progress) {
|
||||
for (p = get_all_packs(the_repository); p;
|
||||
for (p = packfile_store_get_all_packs(packs); p;
|
||||
p = p->next) {
|
||||
if (open_pack_index(p))
|
||||
continue;
|
||||
|
@ -1019,7 +1022,7 @@ int cmd_fsck(int argc,
|
|||
progress = start_progress(the_repository,
|
||||
_("Checking objects"), total);
|
||||
}
|
||||
for (p = get_all_packs(the_repository); p;
|
||||
for (p = packfile_store_get_all_packs(packs); p;
|
||||
p = p->next) {
|
||||
/* verify gives error messages itself */
|
||||
if (verify_pack(the_repository,
|
||||
|
|
14
builtin/gc.c
14
builtin/gc.c
|
@ -487,9 +487,10 @@ static int too_many_loose_objects(struct gc_config *cfg)
|
|||
static struct packed_git *find_base_packs(struct string_list *packs,
|
||||
unsigned long limit)
|
||||
{
|
||||
struct packfile_store *packfiles = the_repository->objects->packfiles;
|
||||
struct packed_git *p, *base = NULL;
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packfiles); p; p = p->next) {
|
||||
if (!p->pack_local || p->is_cruft)
|
||||
continue;
|
||||
if (limit) {
|
||||
|
@ -508,13 +509,14 @@ static struct packed_git *find_base_packs(struct string_list *packs,
|
|||
|
||||
static int too_many_packs(struct gc_config *cfg)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
int cnt;
|
||||
|
||||
if (cfg->gc_auto_pack_limit <= 0)
|
||||
return 0;
|
||||
|
||||
for (cnt = 0, p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (cnt = 0, p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (!p->pack_local)
|
||||
continue;
|
||||
if (p->pack_keep)
|
||||
|
@ -1042,7 +1044,7 @@ int cmd_gc(int argc,
|
|||
die(FAILED_RUN, "rerere");
|
||||
|
||||
report_garbage = report_pack_garbage;
|
||||
reprepare_packed_git(the_repository);
|
||||
odb_reprepare(the_repository->objects);
|
||||
if (pack_garbage.nr > 0) {
|
||||
close_object_store(the_repository->objects);
|
||||
clean_pack_garbage();
|
||||
|
@ -1423,7 +1425,7 @@ static int incremental_repack_auto_condition(struct gc_config *cfg UNUSED)
|
|||
if (incremental_repack_auto_limit < 0)
|
||||
return 1;
|
||||
|
||||
for (p = get_packed_git(the_repository);
|
||||
for (p = packfile_store_get_packs(the_repository->objects->packfiles);
|
||||
count < incremental_repack_auto_limit && p;
|
||||
p = p->next) {
|
||||
if (!p->multi_pack_index)
|
||||
|
@ -1491,8 +1493,8 @@ static off_t get_auto_pack_size(void)
|
|||
struct packed_git *p;
|
||||
struct repository *r = the_repository;
|
||||
|
||||
reprepare_packed_git(r);
|
||||
for (p = get_all_packs(r); p; p = p->next) {
|
||||
odb_reprepare(r->objects);
|
||||
for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
|
||||
if (p->pack_size > max_size) {
|
||||
second_largest_size = max_size;
|
||||
max_size = p->pack_size;
|
||||
|
|
|
@ -1214,7 +1214,7 @@ int cmd_grep(int argc,
|
|||
if (recurse_submodules)
|
||||
repo_read_gitmodules(the_repository, 1);
|
||||
if (startup_info->have_repository)
|
||||
(void)get_packed_git(the_repository);
|
||||
(void)packfile_store_get_packs(the_repository->objects->packfiles);
|
||||
|
||||
start_threads(&opt);
|
||||
} else {
|
||||
|
|
|
@ -1640,13 +1640,9 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
|
|||
rename_tmp_packfile(&final_index_name, curr_index_name, &index_name,
|
||||
hash, "idx", 1);
|
||||
|
||||
if (do_fsck_object) {
|
||||
struct packed_git *p;
|
||||
p = add_packed_git(the_repository, final_index_name,
|
||||
strlen(final_index_name), 0);
|
||||
if (p)
|
||||
install_packed_git(the_repository, p);
|
||||
}
|
||||
if (do_fsck_object)
|
||||
packfile_store_load_pack(the_repository->objects->packfiles,
|
||||
final_index_name, 0);
|
||||
|
||||
if (!from_stdin) {
|
||||
printf("%s\n", hash_to_hex(hash));
|
||||
|
|
|
@ -1748,12 +1748,12 @@ static int want_object_in_pack_mtime(const struct object_id *oid,
|
|||
}
|
||||
}
|
||||
|
||||
list_for_each(pos, get_packed_git_mru(the_repository)) {
|
||||
list_for_each(pos, packfile_store_get_packs_mru(the_repository->objects->packfiles)) {
|
||||
struct packed_git *p = list_entry(pos, struct packed_git, mru);
|
||||
want = want_object_in_pack_one(p, oid, exclude, found_pack, found_offset, found_mtime);
|
||||
if (!exclude && want > 0)
|
||||
list_move(&p->mru,
|
||||
get_packed_git_mru(the_repository));
|
||||
packfile_store_get_packs_mru(the_repository->objects->packfiles));
|
||||
if (want != -1)
|
||||
return want;
|
||||
}
|
||||
|
@ -3831,6 +3831,7 @@ static int pack_mtime_cmp(const void *_a, const void *_b)
|
|||
|
||||
static void read_packs_list_from_stdin(struct rev_info *revs)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
struct string_list include_packs = STRING_LIST_INIT_DUP;
|
||||
struct string_list exclude_packs = STRING_LIST_INIT_DUP;
|
||||
|
@ -3855,7 +3856,7 @@ static void read_packs_list_from_stdin(struct rev_info *revs)
|
|||
string_list_sort(&exclude_packs);
|
||||
string_list_remove_duplicates(&exclude_packs, 0);
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
const char *pack_name = pack_basename(p);
|
||||
|
||||
if ((item = string_list_lookup(&include_packs, pack_name)))
|
||||
|
@ -4076,6 +4077,7 @@ static void enumerate_cruft_objects(void)
|
|||
|
||||
static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
struct rev_info revs;
|
||||
int ret;
|
||||
|
@ -4105,7 +4107,7 @@ static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs
|
|||
* Re-mark only the fresh packs as kept so that objects in
|
||||
* unknown packs do not halt the reachability traversal early.
|
||||
*/
|
||||
for (p = get_all_packs(the_repository); p; p = p->next)
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next)
|
||||
p->pack_keep_in_core = 0;
|
||||
mark_pack_kept_in_core(fresh_packs, 1);
|
||||
|
||||
|
@ -4122,6 +4124,7 @@ static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs
|
|||
|
||||
static void read_cruft_objects(void)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
struct string_list discard_packs = STRING_LIST_INIT_DUP;
|
||||
struct string_list fresh_packs = STRING_LIST_INIT_DUP;
|
||||
|
@ -4142,7 +4145,7 @@ static void read_cruft_objects(void)
|
|||
string_list_sort(&discard_packs);
|
||||
string_list_sort(&fresh_packs);
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
const char *pack_name = pack_basename(p);
|
||||
struct string_list_item *item;
|
||||
|
||||
|
@ -4390,11 +4393,12 @@ static void add_unreachable_loose_objects(struct rev_info *revs)
|
|||
|
||||
static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
static struct packed_git *last_found = (void *)1;
|
||||
struct packed_git *p;
|
||||
|
||||
p = (last_found != (void *)1) ? last_found :
|
||||
get_all_packs(the_repository);
|
||||
packfile_store_get_all_packs(packs);
|
||||
|
||||
while (p) {
|
||||
if ((!p->pack_local || p->pack_keep ||
|
||||
|
@ -4404,7 +4408,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
|
|||
return 1;
|
||||
}
|
||||
if (p == last_found)
|
||||
p = get_all_packs(the_repository);
|
||||
p = packfile_store_get_all_packs(packs);
|
||||
else
|
||||
p = p->next;
|
||||
if (p == last_found)
|
||||
|
@ -4436,12 +4440,13 @@ static int loosened_object_can_be_discarded(const struct object_id *oid,
|
|||
|
||||
static void loosen_unused_packed_objects(void)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
uint32_t i;
|
||||
uint32_t loosened_objects_nr = 0;
|
||||
struct object_id oid;
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
|
||||
continue;
|
||||
|
||||
|
@ -4742,12 +4747,13 @@ static void get_object_list(struct rev_info *revs, struct strvec *argv)
|
|||
|
||||
static void add_extra_kept_packs(const struct string_list *names)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
|
||||
if (!names->nr)
|
||||
return;
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
const char *name = basename(p->pack_name);
|
||||
int i;
|
||||
|
||||
|
@ -5185,8 +5191,10 @@ int cmd_pack_objects(int argc,
|
|||
|
||||
add_extra_kept_packs(&keep_pack_list);
|
||||
if (ignore_packed_keep_on_disk) {
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
for (p = get_all_packs(the_repository); p; p = p->next)
|
||||
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next)
|
||||
if (p->pack_local && p->pack_keep)
|
||||
break;
|
||||
if (!p) /* no keep-able packs found */
|
||||
|
@ -5198,8 +5206,10 @@ int cmd_pack_objects(int argc,
|
|||
* want to unset "local" based on looking at packs, as
|
||||
* it also covers non-local objects
|
||||
*/
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (!p->pack_local) {
|
||||
have_non_local_packs = 1;
|
||||
break;
|
||||
|
|
|
@ -566,7 +566,8 @@ static struct pack_list * add_pack(struct packed_git *p)
|
|||
|
||||
static struct pack_list * add_pack_file(const char *filename)
|
||||
{
|
||||
struct packed_git *p = get_all_packs(the_repository);
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p = packfile_store_get_all_packs(packs);
|
||||
|
||||
if (strlen(filename) < 40)
|
||||
die("Bad pack filename: %s", filename);
|
||||
|
@ -581,7 +582,8 @@ static struct pack_list * add_pack_file(const char *filename)
|
|||
|
||||
static void load_all(void)
|
||||
{
|
||||
struct packed_git *p = get_all_packs(the_repository);
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p = packfile_store_get_all_packs(packs);
|
||||
|
||||
while (p) {
|
||||
add_pack(p);
|
||||
|
|
|
@ -2389,7 +2389,7 @@ static const char *unpack(int err_fd, struct shallow_info *si)
|
|||
status = finish_command(&child);
|
||||
if (status)
|
||||
return "index-pack abnormal exit";
|
||||
reprepare_packed_git(the_repository);
|
||||
odb_reprepare(the_repository->objects);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -265,10 +265,11 @@ static void existing_packs_release(struct existing_packs *existing)
|
|||
static void collect_pack_filenames(struct existing_packs *existing,
|
||||
const struct string_list *extra_keep)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
int i;
|
||||
const char *base;
|
||||
|
||||
|
@ -497,10 +498,11 @@ static void init_pack_geometry(struct pack_geometry *geometry,
|
|||
struct existing_packs *existing,
|
||||
const struct pack_objects_args *args)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (args->local && !p->pack_local)
|
||||
/*
|
||||
* When asked to only repack local packfiles we skip
|
||||
|
@ -1137,11 +1139,12 @@ static int write_filtered_pack(const struct pack_objects_args *args,
|
|||
static void combine_small_cruft_packs(FILE *in, size_t combine_cruft_below_size,
|
||||
struct existing_packs *existing)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
size_t i;
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (!(p->is_cruft && p->pack_local))
|
||||
continue;
|
||||
|
||||
|
@ -1685,7 +1688,7 @@ int cmd_repack(int argc,
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
reprepare_packed_git(the_repository);
|
||||
odb_reprepare(the_repository->objects);
|
||||
|
||||
if (delete_redundant) {
|
||||
int opts = 0;
|
||||
|
|
|
@ -72,11 +72,12 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
|
|||
* Before checking for promisor packs, be sure we have the
|
||||
* latest pack-files loaded into memory.
|
||||
*/
|
||||
reprepare_packed_git(the_repository);
|
||||
odb_reprepare(the_repository->objects);
|
||||
do {
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (!p->pack_promisor)
|
||||
continue;
|
||||
if (find_pack_entry_one(oid, p))
|
||||
|
|
|
@ -1983,7 +1983,7 @@ static void update_shallow(struct fetch_pack_args *args,
|
|||
* remote is shallow, but this is a clone, there are
|
||||
* no objects in repo to worry about. Accept any
|
||||
* shallow points that exist in the pack (iow in repo
|
||||
* after get_pack() and reprepare_packed_git())
|
||||
* after get_pack() and odb_reprepare())
|
||||
*/
|
||||
struct oid_array extra = OID_ARRAY_INIT;
|
||||
struct object_id *oid = si->shallow->oid;
|
||||
|
@ -2108,7 +2108,7 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
|
|||
ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
|
||||
&si, pack_lockfiles);
|
||||
}
|
||||
reprepare_packed_git(the_repository);
|
||||
odb_reprepare(the_repository->objects);
|
||||
|
||||
if (!args->cloning && args->deepen) {
|
||||
struct check_connected_options opt = CHECK_CONNECTED_INIT;
|
||||
|
|
|
@ -603,18 +603,19 @@ static void get_head(struct strbuf *hdr, char *arg UNUSED)
|
|||
static void get_info_packs(struct strbuf *hdr, char *arg UNUSED)
|
||||
{
|
||||
size_t objdirlen = strlen(repo_get_object_directory(the_repository));
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
struct packed_git *p;
|
||||
size_t cnt = 0;
|
||||
|
||||
select_getanyfile(hdr);
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (p->pack_local)
|
||||
cnt++;
|
||||
}
|
||||
|
||||
strbuf_grow(&buf, cnt * 53 + 2);
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (p->pack_local)
|
||||
strbuf_addf(&buf, "P %s\n", p->pack_name + objdirlen + 6);
|
||||
}
|
||||
|
|
5
http.c
5
http.c
|
@ -2416,6 +2416,7 @@ static char *fetch_pack_index(unsigned char *hash, const char *base_url)
|
|||
static int fetch_and_setup_pack_index(struct packed_git **packs_head,
|
||||
unsigned char *sha1, const char *base_url)
|
||||
{
|
||||
struct packfile_store *packs = the_repository->objects->packfiles;
|
||||
struct packed_git *new_pack, *p;
|
||||
char *tmp_idx = NULL;
|
||||
int ret;
|
||||
|
@ -2424,7 +2425,7 @@ static int fetch_and_setup_pack_index(struct packed_git **packs_head,
|
|||
* If we already have the pack locally, no need to fetch its index or
|
||||
* even add it to list; we already have all of its objects.
|
||||
*/
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
if (hasheq(p->hash, sha1, the_repository->hash_algo))
|
||||
return 0;
|
||||
}
|
||||
|
@ -2549,7 +2550,7 @@ void http_install_packfile(struct packed_git *p,
|
|||
lst = &((*lst)->next);
|
||||
*lst = (*lst)->next;
|
||||
|
||||
install_packed_git(the_repository, p);
|
||||
packfile_store_add_pack(the_repository->objects->packfiles, p);
|
||||
}
|
||||
|
||||
struct http_pack_request *new_http_pack_request(
|
||||
|
|
2
http.h
2
http.h
|
@ -210,7 +210,7 @@ int finish_http_pack_request(struct http_pack_request *preq);
|
|||
void release_http_pack_request(struct http_pack_request *preq);
|
||||
|
||||
/*
|
||||
* Remove p from the given list, and invoke install_packed_git() on it.
|
||||
* Remove p from the given list, and invoke packfile_store_add_pack() on it.
|
||||
*
|
||||
* This is a convenience function for users that have obtained a list of packs
|
||||
* from http_get_info_packs() and have chosen a specific pack to fetch.
|
||||
|
|
29
midx.c
29
midx.c
|
@ -93,6 +93,12 @@ static int midx_read_object_offsets(const unsigned char *chunk_start,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct multi_pack_index *get_multi_pack_index(struct odb_source *source)
|
||||
{
|
||||
packfile_store_prepare(source->odb->packfiles);
|
||||
return source->midx;
|
||||
}
|
||||
|
||||
static struct multi_pack_index *load_multi_pack_index_one(struct odb_source *source,
|
||||
const char *midx_name)
|
||||
{
|
||||
|
@ -443,7 +449,6 @@ int prepare_midx_pack(struct multi_pack_index *m,
|
|||
{
|
||||
struct repository *r = m->source->odb->repo;
|
||||
struct strbuf pack_name = STRBUF_INIT;
|
||||
struct strbuf key = STRBUF_INIT;
|
||||
struct packed_git *p;
|
||||
|
||||
pack_int_id = midx_for_pack(&m, pack_int_id);
|
||||
|
@ -455,25 +460,11 @@ int prepare_midx_pack(struct multi_pack_index *m,
|
|||
|
||||
strbuf_addf(&pack_name, "%s/pack/%s", m->source->path,
|
||||
m->pack_names[pack_int_id]);
|
||||
|
||||
/* pack_map holds the ".pack" name, but we have the .idx */
|
||||
strbuf_addbuf(&key, &pack_name);
|
||||
strbuf_strip_suffix(&key, ".idx");
|
||||
strbuf_addstr(&key, ".pack");
|
||||
p = hashmap_get_entry_from_hash(&r->objects->pack_map,
|
||||
strhash(key.buf), key.buf,
|
||||
struct packed_git, packmap_ent);
|
||||
if (!p) {
|
||||
p = add_packed_git(r, pack_name.buf, pack_name.len,
|
||||
m->source->local);
|
||||
if (p) {
|
||||
install_packed_git(r, p);
|
||||
list_add_tail(&p->mru, &r->objects->packed_git_mru);
|
||||
}
|
||||
}
|
||||
|
||||
p = packfile_store_load_pack(r->objects->packfiles,
|
||||
pack_name.buf, m->source->local);
|
||||
if (p)
|
||||
list_add_tail(&p->mru, &r->objects->packfiles->mru);
|
||||
strbuf_release(&pack_name);
|
||||
strbuf_release(&key);
|
||||
|
||||
if (!p) {
|
||||
m->packs[pack_int_id] = MIDX_PACK_ERROR;
|
||||
|
|
1
midx.h
1
midx.h
|
@ -94,6 +94,7 @@ void get_midx_chain_filename(struct odb_source *source, struct strbuf *out);
|
|||
void get_split_midx_filename_ext(struct odb_source *source, struct strbuf *buf,
|
||||
const unsigned char *hash, const char *ext);
|
||||
|
||||
struct multi_pack_index *get_multi_pack_index(struct odb_source *source);
|
||||
struct multi_pack_index *load_multi_pack_index(struct odb_source *source);
|
||||
int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id);
|
||||
struct packed_git *nth_midxed_pack(struct multi_pack_index *m,
|
||||
|
|
|
@ -1504,7 +1504,7 @@ clear_exit:
|
|||
|
||||
strbuf_release(&packname);
|
||||
/* Make objects we just wrote available to ourselves */
|
||||
reprepare_packed_git(repo);
|
||||
odb_reprepare(repo->objects);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -213,7 +213,7 @@ static void find_short_packed_object(struct disambiguate_state *ds)
|
|||
unique_in_midx(m, ds);
|
||||
}
|
||||
|
||||
for (p = get_packed_git(ds->repo); p && !ds->ambiguous;
|
||||
for (p = packfile_store_get_packs(ds->repo->objects->packfiles); p && !ds->ambiguous;
|
||||
p = p->next)
|
||||
unique_in_pack(p, ds);
|
||||
}
|
||||
|
@ -596,7 +596,7 @@ static enum get_oid_result get_short_oid(struct repository *r,
|
|||
* or migrated from loose to packed.
|
||||
*/
|
||||
if (status == MISSING_OBJECT) {
|
||||
reprepare_packed_git(r);
|
||||
odb_reprepare(r->objects);
|
||||
find_short_object_filename(&ds);
|
||||
find_short_packed_object(&ds);
|
||||
status = finish_object_disambiguation(&ds, oid);
|
||||
|
@ -805,7 +805,7 @@ static void find_abbrev_len_packed(struct min_abbrev_data *mad)
|
|||
find_abbrev_len_for_midx(m, mad);
|
||||
}
|
||||
|
||||
for (p = get_packed_git(mad->repo); p; p = p->next)
|
||||
for (p = packfile_store_get_packs(mad->repo->objects->packfiles); p; p = p->next)
|
||||
find_abbrev_len_for_pack(p, mad);
|
||||
}
|
||||
|
||||
|
|
40
odb.c
40
odb.c
|
@ -694,7 +694,7 @@ static int do_oid_object_info_extended(struct object_database *odb,
|
|||
|
||||
/* Not a loose object; someone else may have just packed it. */
|
||||
if (!(flags & OBJECT_INFO_QUICK)) {
|
||||
reprepare_packed_git(odb->repo);
|
||||
odb_reprepare(odb->repo->objects);
|
||||
if (find_pack_entry(odb->repo, real, &e))
|
||||
break;
|
||||
}
|
||||
|
@ -996,8 +996,7 @@ struct object_database *odb_new(struct repository *repo)
|
|||
|
||||
memset(o, 0, sizeof(*o));
|
||||
o->repo = repo;
|
||||
INIT_LIST_HEAD(&o->packed_git_mru);
|
||||
hashmap_init(&o->pack_map, pack_map_entry_cmp, NULL, 0);
|
||||
o->packfiles = packfile_store_new(o);
|
||||
pthread_mutex_init(&o->replace_mutex, NULL);
|
||||
string_list_init_dup(&o->submodule_source_paths);
|
||||
return o;
|
||||
|
@ -1035,21 +1034,36 @@ void odb_clear(struct object_database *o)
|
|||
free((char *) o->cached_objects[i].value.buf);
|
||||
FREE_AND_NULL(o->cached_objects);
|
||||
|
||||
INIT_LIST_HEAD(&o->packed_git_mru);
|
||||
close_object_store(o);
|
||||
packfile_store_free(o->packfiles);
|
||||
o->packfiles = NULL;
|
||||
|
||||
string_list_clear(&o->submodule_source_paths, 0);
|
||||
}
|
||||
|
||||
void odb_reprepare(struct object_database *o)
|
||||
{
|
||||
struct odb_source *source;
|
||||
|
||||
obj_read_lock();
|
||||
|
||||
/*
|
||||
* `close_object_store()` only closes the packfiles, but doesn't free
|
||||
* them. We thus have to do this manually.
|
||||
* Reprepare alt odbs, in case the alternates file was modified
|
||||
* during the course of this process. This only _adds_ odbs to
|
||||
* the linked list, so existing odbs will continue to exist for
|
||||
* the lifetime of the process.
|
||||
*/
|
||||
for (struct packed_git *p = o->packed_git, *next; p; p = next) {
|
||||
next = p->next;
|
||||
free(p);
|
||||
}
|
||||
o->packed_git = NULL;
|
||||
o->loaded_alternates = 0;
|
||||
odb_prepare_alternates(o);
|
||||
|
||||
hashmap_clear(&o->pack_map);
|
||||
string_list_clear(&o->submodule_source_paths, 0);
|
||||
for (source = o->sources; source; source = source->next)
|
||||
odb_clear_loose_cache(source);
|
||||
|
||||
o->approximate_object_count_valid = 0;
|
||||
|
||||
packfile_store_reprepare(o->packfiles);
|
||||
|
||||
obj_read_unlock();
|
||||
}
|
||||
|
||||
struct odb_transaction *odb_transaction_begin(struct object_database *odb)
|
||||
|
|
36
odb.h
36
odb.h
|
@ -3,7 +3,6 @@
|
|||
|
||||
#include "hashmap.h"
|
||||
#include "object.h"
|
||||
#include "list.h"
|
||||
#include "oidset.h"
|
||||
#include "oidmap.h"
|
||||
#include "string-list.h"
|
||||
|
@ -91,6 +90,7 @@ struct odb_source {
|
|||
};
|
||||
|
||||
struct packed_git;
|
||||
struct packfile_store;
|
||||
struct cached_object_entry;
|
||||
struct odb_transaction;
|
||||
|
||||
|
@ -139,20 +139,8 @@ struct object_database {
|
|||
struct commit_graph *commit_graph;
|
||||
unsigned commit_graph_attempted : 1; /* if loading has been attempted */
|
||||
|
||||
/*
|
||||
* private data
|
||||
*
|
||||
* should only be accessed directly by packfile.c
|
||||
*/
|
||||
|
||||
struct packed_git *packed_git;
|
||||
/* A most-recently-used ordered version of the packed_git list. */
|
||||
struct list_head packed_git_mru;
|
||||
|
||||
struct {
|
||||
struct packed_git **packs;
|
||||
unsigned flags;
|
||||
} kept_pack_cache;
|
||||
/* Should only be accessed directly by packfile.c and midx.c. */
|
||||
struct packfile_store *packfiles;
|
||||
|
||||
/*
|
||||
* This is meant to hold a *small* number of objects that you would
|
||||
|
@ -163,12 +151,6 @@ struct object_database {
|
|||
struct cached_object_entry *cached_objects;
|
||||
size_t cached_object_nr, cached_object_alloc;
|
||||
|
||||
/*
|
||||
* A map of packfiles to packed_git structs for tracking which
|
||||
* packs have been loaded already.
|
||||
*/
|
||||
struct hashmap pack_map;
|
||||
|
||||
/*
|
||||
* A fast, rough count of the number of objects in the repository.
|
||||
* These two fields are not meant for direct access. Use
|
||||
|
@ -177,12 +159,6 @@ struct object_database {
|
|||
unsigned long approximate_object_count;
|
||||
unsigned approximate_object_count_valid : 1;
|
||||
|
||||
/*
|
||||
* Whether packed_git has already been populated with this repository's
|
||||
* packs.
|
||||
*/
|
||||
unsigned packed_git_initialized : 1;
|
||||
|
||||
/*
|
||||
* Submodule source paths that will be added as additional sources to
|
||||
* allow lookup of submodule objects via the main object database.
|
||||
|
@ -193,6 +169,12 @@ struct object_database {
|
|||
struct object_database *odb_new(struct repository *repo);
|
||||
void odb_clear(struct object_database *o);
|
||||
|
||||
/*
|
||||
* Clear caches, reload alternates and then reload object sources so that new
|
||||
* objects may become accessible.
|
||||
*/
|
||||
void odb_reprepare(struct object_database *o);
|
||||
|
||||
/*
|
||||
* Starts an ODB transaction. Subsequent objects are written to the transaction
|
||||
* and not committed until odb_transaction_commit() is invoked on the
|
||||
|
|
|
@ -664,7 +664,7 @@ static int open_pack_bitmap(struct repository *r,
|
|||
struct packed_git *p;
|
||||
int ret = -1;
|
||||
|
||||
for (p = get_all_packs(r); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
|
||||
if (open_pack_bitmap_1(bitmap_git, p) == 0) {
|
||||
ret = 0;
|
||||
/*
|
||||
|
@ -3362,7 +3362,7 @@ int verify_bitmap_files(struct repository *r)
|
|||
free(midx_bitmap_name);
|
||||
}
|
||||
|
||||
for (struct packed_git *p = get_all_packs(r);
|
||||
for (struct packed_git *p = packfile_store_get_all_packs(r->objects->packfiles);
|
||||
p; p = p->next) {
|
||||
char *pack_bitmap_name = pack_bitmap_filename(p);
|
||||
res |= verify_bitmap_file(r->hash_algo, pack_bitmap_name);
|
||||
|
|
|
@ -86,6 +86,7 @@ struct object_entry *packlist_find(struct packing_data *pdata,
|
|||
|
||||
static void prepare_in_pack_by_idx(struct packing_data *pdata)
|
||||
{
|
||||
struct packfile_store *packs = pdata->repo->objects->packfiles;
|
||||
struct packed_git **mapping, *p;
|
||||
int cnt = 0, nr = 1U << OE_IN_PACK_BITS;
|
||||
|
||||
|
@ -95,7 +96,7 @@ static void prepare_in_pack_by_idx(struct packing_data *pdata)
|
|||
* (i.e. in_pack_idx also zero) should return NULL.
|
||||
*/
|
||||
mapping[cnt++] = NULL;
|
||||
for (p = get_all_packs(pdata->repo); p; p = p->next, cnt++) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next, cnt++) {
|
||||
if (cnt == nr) {
|
||||
free(mapping);
|
||||
return;
|
||||
|
|
283
packfile.c
283
packfile.c
|
@ -278,7 +278,7 @@ static int unuse_one_window(struct packed_git *current)
|
|||
|
||||
if (current)
|
||||
scan_windows(current, &lru_p, &lru_w, &lru_l);
|
||||
for (p = current->repo->objects->packed_git; p; p = p->next)
|
||||
for (p = current->repo->objects->packfiles->packs; p; p = p->next)
|
||||
scan_windows(p, &lru_p, &lru_w, &lru_l);
|
||||
if (lru_p) {
|
||||
munmap(lru_w->base, lru_w->len);
|
||||
|
@ -362,13 +362,8 @@ void close_pack(struct packed_git *p)
|
|||
void close_object_store(struct object_database *o)
|
||||
{
|
||||
struct odb_source *source;
|
||||
struct packed_git *p;
|
||||
|
||||
for (p = o->packed_git; p; p = p->next)
|
||||
if (p->do_not_close)
|
||||
BUG("want to close pack marked 'do-not-close'");
|
||||
else
|
||||
close_pack(p);
|
||||
packfile_store_close(o->packfiles);
|
||||
|
||||
for (source = o->sources; source; source = source->next) {
|
||||
if (source->midx)
|
||||
|
@ -468,7 +463,7 @@ static int close_one_pack(struct repository *r)
|
|||
struct pack_window *mru_w = NULL;
|
||||
int accept_windows_inuse = 1;
|
||||
|
||||
for (p = r->objects->packed_git; p; p = p->next) {
|
||||
for (p = r->objects->packfiles->packs; p; p = p->next) {
|
||||
if (p->pack_fd == -1)
|
||||
continue;
|
||||
find_lru_pack(p, &lru_p, &mru_w, &accept_windows_inuse);
|
||||
|
@ -784,16 +779,44 @@ struct packed_git *add_packed_git(struct repository *r, const char *path,
|
|||
return p;
|
||||
}
|
||||
|
||||
void install_packed_git(struct repository *r, struct packed_git *pack)
|
||||
void packfile_store_add_pack(struct packfile_store *store,
|
||||
struct packed_git *pack)
|
||||
{
|
||||
if (pack->pack_fd != -1)
|
||||
pack_open_fds++;
|
||||
|
||||
pack->next = r->objects->packed_git;
|
||||
r->objects->packed_git = pack;
|
||||
pack->next = store->packs;
|
||||
store->packs = pack;
|
||||
|
||||
hashmap_entry_init(&pack->packmap_ent, strhash(pack->pack_name));
|
||||
hashmap_add(&r->objects->pack_map, &pack->packmap_ent);
|
||||
hashmap_add(&store->map, &pack->packmap_ent);
|
||||
}
|
||||
|
||||
struct packed_git *packfile_store_load_pack(struct packfile_store *store,
|
||||
const char *idx_path, int local)
|
||||
{
|
||||
struct strbuf key = STRBUF_INIT;
|
||||
struct packed_git *p;
|
||||
|
||||
/*
|
||||
* We're being called with the path to the index file, but `pack_map`
|
||||
* holds the path to the packfile itself.
|
||||
*/
|
||||
strbuf_addstr(&key, idx_path);
|
||||
strbuf_strip_suffix(&key, ".idx");
|
||||
strbuf_addstr(&key, ".pack");
|
||||
|
||||
p = hashmap_get_entry_from_hash(&store->map, strhash(key.buf), key.buf,
|
||||
struct packed_git, packmap_ent);
|
||||
if (!p) {
|
||||
p = add_packed_git(store->odb->repo, idx_path,
|
||||
strlen(idx_path), local);
|
||||
if (p)
|
||||
packfile_store_add_pack(store, p);
|
||||
}
|
||||
|
||||
strbuf_release(&key);
|
||||
return p;
|
||||
}
|
||||
|
||||
void (*report_garbage)(unsigned seen_bits, const char *path);
|
||||
|
@ -895,23 +918,14 @@ static void prepare_pack(const char *full_name, size_t full_name_len,
|
|||
const char *file_name, void *_data)
|
||||
{
|
||||
struct prepare_pack_data *data = (struct prepare_pack_data *)_data;
|
||||
struct packed_git *p;
|
||||
size_t base_len = full_name_len;
|
||||
|
||||
if (strip_suffix_mem(full_name, &base_len, ".idx") &&
|
||||
!(data->m && midx_contains_pack(data->m, file_name))) {
|
||||
struct hashmap_entry hent;
|
||||
char *pack_name = xstrfmt("%.*s.pack", (int)base_len, full_name);
|
||||
unsigned int hash = strhash(pack_name);
|
||||
hashmap_entry_init(&hent, hash);
|
||||
|
||||
/* Don't reopen a pack we already have. */
|
||||
if (!hashmap_get(&data->r->objects->pack_map, &hent, pack_name)) {
|
||||
p = add_packed_git(data->r, full_name, full_name_len, data->local);
|
||||
if (p)
|
||||
install_packed_git(data->r, p);
|
||||
}
|
||||
free(pack_name);
|
||||
char *trimmed_path = xstrndup(full_name, full_name_len);
|
||||
packfile_store_load_pack(data->r->objects->packfiles,
|
||||
trimmed_path, data->local);
|
||||
free(trimmed_path);
|
||||
}
|
||||
|
||||
if (!report_garbage)
|
||||
|
@ -951,40 +965,6 @@ static void prepare_packed_git_one(struct odb_source *source)
|
|||
string_list_clear(data.garbage, 0);
|
||||
}
|
||||
|
||||
static void prepare_packed_git(struct repository *r);
|
||||
/*
|
||||
* Give a fast, rough count of the number of objects in the repository. This
|
||||
* ignores loose objects completely. If you have a lot of them, then either
|
||||
* you should repack because your performance will be awful, or they are
|
||||
* all unreachable objects about to be pruned, in which case they're not really
|
||||
* interesting as a measure of repo size in the first place.
|
||||
*/
|
||||
unsigned long repo_approximate_object_count(struct repository *r)
|
||||
{
|
||||
if (!r->objects->approximate_object_count_valid) {
|
||||
struct odb_source *source;
|
||||
unsigned long count = 0;
|
||||
struct packed_git *p;
|
||||
|
||||
prepare_packed_git(r);
|
||||
|
||||
for (source = r->objects->sources; source; source = source->next) {
|
||||
struct multi_pack_index *m = get_multi_pack_index(source);
|
||||
if (m)
|
||||
count += m->num_objects;
|
||||
}
|
||||
|
||||
for (p = r->objects->packed_git; p; p = p->next) {
|
||||
if (open_pack_index(p))
|
||||
continue;
|
||||
count += p->num_objects;
|
||||
}
|
||||
r->objects->approximate_object_count = count;
|
||||
r->objects->approximate_object_count_valid = 1;
|
||||
}
|
||||
return r->objects->approximate_object_count;
|
||||
}
|
||||
|
||||
DEFINE_LIST_SORT(static, sort_packs, struct packed_git, next);
|
||||
|
||||
static int sort_pack(const struct packed_git *a, const struct packed_git *b)
|
||||
|
@ -1013,80 +993,51 @@ static int sort_pack(const struct packed_git *a, const struct packed_git *b)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static void rearrange_packed_git(struct repository *r)
|
||||
{
|
||||
sort_packs(&r->objects->packed_git, sort_pack);
|
||||
}
|
||||
|
||||
static void prepare_packed_git_mru(struct repository *r)
|
||||
static void packfile_store_prepare_mru(struct packfile_store *store)
|
||||
{
|
||||
struct packed_git *p;
|
||||
|
||||
INIT_LIST_HEAD(&r->objects->packed_git_mru);
|
||||
INIT_LIST_HEAD(&store->mru);
|
||||
|
||||
for (p = r->objects->packed_git; p; p = p->next)
|
||||
list_add_tail(&p->mru, &r->objects->packed_git_mru);
|
||||
for (p = store->packs; p; p = p->next)
|
||||
list_add_tail(&p->mru, &store->mru);
|
||||
}
|
||||
|
||||
static void prepare_packed_git(struct repository *r)
|
||||
void packfile_store_prepare(struct packfile_store *store)
|
||||
{
|
||||
struct odb_source *source;
|
||||
|
||||
if (r->objects->packed_git_initialized)
|
||||
if (store->initialized)
|
||||
return;
|
||||
|
||||
odb_prepare_alternates(r->objects);
|
||||
for (source = r->objects->sources; source; source = source->next) {
|
||||
odb_prepare_alternates(store->odb);
|
||||
for (source = store->odb->sources; source; source = source->next) {
|
||||
prepare_multi_pack_index_one(source);
|
||||
prepare_packed_git_one(source);
|
||||
}
|
||||
rearrange_packed_git(r);
|
||||
sort_packs(&store->packs, sort_pack);
|
||||
|
||||
prepare_packed_git_mru(r);
|
||||
r->objects->packed_git_initialized = 1;
|
||||
packfile_store_prepare_mru(store);
|
||||
store->initialized = true;
|
||||
}
|
||||
|
||||
void reprepare_packed_git(struct repository *r)
|
||||
void packfile_store_reprepare(struct packfile_store *store)
|
||||
{
|
||||
struct odb_source *source;
|
||||
|
||||
obj_read_lock();
|
||||
|
||||
/*
|
||||
* Reprepare alt odbs, in case the alternates file was modified
|
||||
* during the course of this process. This only _adds_ odbs to
|
||||
* the linked list, so existing odbs will continue to exist for
|
||||
* the lifetime of the process.
|
||||
*/
|
||||
r->objects->loaded_alternates = 0;
|
||||
odb_prepare_alternates(r->objects);
|
||||
|
||||
for (source = r->objects->sources; source; source = source->next)
|
||||
odb_clear_loose_cache(source);
|
||||
|
||||
r->objects->approximate_object_count_valid = 0;
|
||||
r->objects->packed_git_initialized = 0;
|
||||
prepare_packed_git(r);
|
||||
obj_read_unlock();
|
||||
store->initialized = false;
|
||||
packfile_store_prepare(store);
|
||||
}
|
||||
|
||||
struct packed_git *get_packed_git(struct repository *r)
|
||||
struct packed_git *packfile_store_get_packs(struct packfile_store *store)
|
||||
{
|
||||
prepare_packed_git(r);
|
||||
return r->objects->packed_git;
|
||||
packfile_store_prepare(store);
|
||||
return store->packs;
|
||||
}
|
||||
|
||||
struct multi_pack_index *get_multi_pack_index(struct odb_source *source)
|
||||
struct packed_git *packfile_store_get_all_packs(struct packfile_store *store)
|
||||
{
|
||||
prepare_packed_git(source->odb->repo);
|
||||
return source->midx;
|
||||
}
|
||||
packfile_store_prepare(store);
|
||||
|
||||
struct packed_git *get_all_packs(struct repository *r)
|
||||
{
|
||||
prepare_packed_git(r);
|
||||
|
||||
for (struct odb_source *source = r->objects->sources; source; source = source->next) {
|
||||
for (struct odb_source *source = store->odb->sources; source; source = source->next) {
|
||||
struct multi_pack_index *m = source->midx;
|
||||
if (!m)
|
||||
continue;
|
||||
|
@ -1094,13 +1045,46 @@ struct packed_git *get_all_packs(struct repository *r)
|
|||
prepare_midx_pack(m, i);
|
||||
}
|
||||
|
||||
return r->objects->packed_git;
|
||||
return store->packs;
|
||||
}
|
||||
|
||||
struct list_head *get_packed_git_mru(struct repository *r)
|
||||
struct list_head *packfile_store_get_packs_mru(struct packfile_store *store)
|
||||
{
|
||||
prepare_packed_git(r);
|
||||
return &r->objects->packed_git_mru;
|
||||
packfile_store_prepare(store);
|
||||
return &store->mru;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give a fast, rough count of the number of objects in the repository. This
|
||||
* ignores loose objects completely. If you have a lot of them, then either
|
||||
* you should repack because your performance will be awful, or they are
|
||||
* all unreachable objects about to be pruned, in which case they're not really
|
||||
* interesting as a measure of repo size in the first place.
|
||||
*/
|
||||
unsigned long repo_approximate_object_count(struct repository *r)
|
||||
{
|
||||
if (!r->objects->approximate_object_count_valid) {
|
||||
struct odb_source *source;
|
||||
unsigned long count = 0;
|
||||
struct packed_git *p;
|
||||
|
||||
packfile_store_prepare(r->objects->packfiles);
|
||||
|
||||
for (source = r->objects->sources; source; source = source->next) {
|
||||
struct multi_pack_index *m = get_multi_pack_index(source);
|
||||
if (m)
|
||||
count += m->num_objects;
|
||||
}
|
||||
|
||||
for (p = r->objects->packfiles->packs; p; p = p->next) {
|
||||
if (open_pack_index(p))
|
||||
continue;
|
||||
count += p->num_objects;
|
||||
}
|
||||
r->objects->approximate_object_count = count;
|
||||
r->objects->approximate_object_count_valid = 1;
|
||||
}
|
||||
return r->objects->approximate_object_count;
|
||||
}
|
||||
|
||||
unsigned long unpack_object_header_buffer(const unsigned char *buf,
|
||||
|
@ -1155,7 +1139,7 @@ unsigned long get_size_from_delta(struct packed_git *p,
|
|||
*
|
||||
* Other worrying sections could be the call to close_pack_fd(),
|
||||
* which can close packs even with in-use windows, and to
|
||||
* reprepare_packed_git(). Regarding the former, mmap doc says:
|
||||
* odb_reprepare(). Regarding the former, mmap doc says:
|
||||
* "closing the file descriptor does not unmap the region". And
|
||||
* for the latter, it won't re-open already available packs.
|
||||
*/
|
||||
|
@ -1219,7 +1203,7 @@ const struct packed_git *has_packed_and_bad(struct repository *r,
|
|||
{
|
||||
struct packed_git *p;
|
||||
|
||||
for (p = r->objects->packed_git; p; p = p->next)
|
||||
for (p = r->objects->packfiles->packs; p; p = p->next)
|
||||
if (oidset_contains(&p->bad_objects, oid))
|
||||
return p;
|
||||
return NULL;
|
||||
|
@ -2074,19 +2058,19 @@ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pa
|
|||
{
|
||||
struct list_head *pos;
|
||||
|
||||
prepare_packed_git(r);
|
||||
packfile_store_prepare(r->objects->packfiles);
|
||||
|
||||
for (struct odb_source *source = r->objects->sources; source; source = source->next)
|
||||
if (source->midx && fill_midx_entry(source->midx, oid, e))
|
||||
return 1;
|
||||
|
||||
if (!r->objects->packed_git)
|
||||
if (!r->objects->packfiles->packs)
|
||||
return 0;
|
||||
|
||||
list_for_each(pos, &r->objects->packed_git_mru) {
|
||||
list_for_each(pos, &r->objects->packfiles->mru) {
|
||||
struct packed_git *p = list_entry(pos, struct packed_git, mru);
|
||||
if (!p->multi_pack_index && fill_pack_entry(oid, e, p)) {
|
||||
list_move(&p->mru, &r->objects->packed_git_mru);
|
||||
list_move(&p->mru, &r->objects->packfiles->mru);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -2096,19 +2080,19 @@ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pa
|
|||
static void maybe_invalidate_kept_pack_cache(struct repository *r,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!r->objects->kept_pack_cache.packs)
|
||||
if (!r->objects->packfiles->kept_cache.packs)
|
||||
return;
|
||||
if (r->objects->kept_pack_cache.flags == flags)
|
||||
if (r->objects->packfiles->kept_cache.flags == flags)
|
||||
return;
|
||||
FREE_AND_NULL(r->objects->kept_pack_cache.packs);
|
||||
r->objects->kept_pack_cache.flags = 0;
|
||||
FREE_AND_NULL(r->objects->packfiles->kept_cache.packs);
|
||||
r->objects->packfiles->kept_cache.flags = 0;
|
||||
}
|
||||
|
||||
struct packed_git **kept_pack_cache(struct repository *r, unsigned flags)
|
||||
{
|
||||
maybe_invalidate_kept_pack_cache(r, flags);
|
||||
|
||||
if (!r->objects->kept_pack_cache.packs) {
|
||||
if (!r->objects->packfiles->kept_cache.packs) {
|
||||
struct packed_git **packs = NULL;
|
||||
size_t nr = 0, alloc = 0;
|
||||
struct packed_git *p;
|
||||
|
@ -2121,7 +2105,7 @@ struct packed_git **kept_pack_cache(struct repository *r, unsigned flags)
|
|||
* covers, one kept and one not kept, but the midx returns only
|
||||
* the non-kept version.
|
||||
*/
|
||||
for (p = get_all_packs(r); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(r->objects->packfiles); p; p = p->next) {
|
||||
if ((p->pack_keep && (flags & ON_DISK_KEEP_PACKS)) ||
|
||||
(p->pack_keep_in_core && (flags & IN_CORE_KEEP_PACKS))) {
|
||||
ALLOC_GROW(packs, nr + 1, alloc);
|
||||
|
@ -2131,11 +2115,11 @@ struct packed_git **kept_pack_cache(struct repository *r, unsigned flags)
|
|||
ALLOC_GROW(packs, nr + 1, alloc);
|
||||
packs[nr] = NULL;
|
||||
|
||||
r->objects->kept_pack_cache.packs = packs;
|
||||
r->objects->kept_pack_cache.flags = flags;
|
||||
r->objects->packfiles->kept_cache.packs = packs;
|
||||
r->objects->packfiles->kept_cache.flags = flags;
|
||||
}
|
||||
|
||||
return r->objects->kept_pack_cache.packs;
|
||||
return r->objects->packfiles->kept_cache.packs;
|
||||
}
|
||||
|
||||
int find_kept_pack_entry(struct repository *r,
|
||||
|
@ -2218,7 +2202,7 @@ int for_each_packed_object(struct repository *repo, each_packed_object_fn cb,
|
|||
int r = 0;
|
||||
int pack_errors = 0;
|
||||
|
||||
for (p = get_all_packs(repo); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(repo->objects->packfiles); p; p = p->next) {
|
||||
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
|
||||
continue;
|
||||
if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
|
||||
|
@ -2332,3 +2316,46 @@ int parse_pack_header_option(const char *in, unsigned char *out, unsigned int *l
|
|||
*len = hdr - out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pack_map_entry_cmp(const void *cmp_data UNUSED,
|
||||
const struct hashmap_entry *entry,
|
||||
const struct hashmap_entry *entry2,
|
||||
const void *keydata)
|
||||
{
|
||||
const char *key = keydata;
|
||||
const struct packed_git *pg1, *pg2;
|
||||
|
||||
pg1 = container_of(entry, const struct packed_git, packmap_ent);
|
||||
pg2 = container_of(entry2, const struct packed_git, packmap_ent);
|
||||
|
||||
return strcmp(pg1->pack_name, key ? key : pg2->pack_name);
|
||||
}
|
||||
|
||||
struct packfile_store *packfile_store_new(struct object_database *odb)
|
||||
{
|
||||
struct packfile_store *store;
|
||||
CALLOC_ARRAY(store, 1);
|
||||
store->odb = odb;
|
||||
INIT_LIST_HEAD(&store->mru);
|
||||
hashmap_init(&store->map, pack_map_entry_cmp, NULL, 0);
|
||||
return store;
|
||||
}
|
||||
|
||||
void packfile_store_free(struct packfile_store *store)
|
||||
{
|
||||
for (struct packed_git *p = store->packs, *next; p; p = next) {
|
||||
next = p->next;
|
||||
free(p);
|
||||
}
|
||||
hashmap_clear(&store->map);
|
||||
free(store);
|
||||
}
|
||||
|
||||
void packfile_store_close(struct packfile_store *store)
|
||||
{
|
||||
for (struct packed_git *p = store->packs; p; p = p->next) {
|
||||
if (p->do_not_close)
|
||||
BUG("want to close pack marked 'do-not-close'");
|
||||
close_pack(p);
|
||||
}
|
||||
}
|
||||
|
|
125
packfile.h
125
packfile.h
|
@ -52,19 +52,114 @@ struct packed_git {
|
|||
char pack_name[FLEX_ARRAY]; /* more */
|
||||
};
|
||||
|
||||
static inline int pack_map_entry_cmp(const void *cmp_data UNUSED,
|
||||
const struct hashmap_entry *entry,
|
||||
const struct hashmap_entry *entry2,
|
||||
const void *keydata)
|
||||
{
|
||||
const char *key = keydata;
|
||||
const struct packed_git *pg1, *pg2;
|
||||
/*
|
||||
* A store that manages packfiles for a given object database.
|
||||
*/
|
||||
struct packfile_store {
|
||||
struct object_database *odb;
|
||||
|
||||
pg1 = container_of(entry, const struct packed_git, packmap_ent);
|
||||
pg2 = container_of(entry2, const struct packed_git, packmap_ent);
|
||||
/*
|
||||
* The list of packfiles in the order in which they are being added to
|
||||
* the store.
|
||||
*/
|
||||
struct packed_git *packs;
|
||||
|
||||
return strcmp(pg1->pack_name, key ? key : pg2->pack_name);
|
||||
}
|
||||
/*
|
||||
* Cache of packfiles which are marked as "kept", either because there
|
||||
* is an on-disk ".keep" file or because they are marked as "kept" in
|
||||
* memory.
|
||||
*
|
||||
* Should not be accessed directly, but via `kept_pack_cache()`. The
|
||||
* list of packs gets invalidated when the stored flags and the flags
|
||||
* passed to `kept_pack_cache()` mismatch.
|
||||
*/
|
||||
struct {
|
||||
struct packed_git **packs;
|
||||
unsigned flags;
|
||||
} kept_cache;
|
||||
|
||||
/* A most-recently-used ordered version of the packs list. */
|
||||
struct list_head mru;
|
||||
|
||||
/*
|
||||
* A map of packfile names to packed_git structs for tracking which
|
||||
* packs have been loaded already.
|
||||
*/
|
||||
struct hashmap map;
|
||||
|
||||
/*
|
||||
* Whether packfiles have already been populated with this store's
|
||||
* packs.
|
||||
*/
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
/*
|
||||
* Allocate and initialize a new empty packfile store for the given object
|
||||
* database.
|
||||
*/
|
||||
struct packfile_store *packfile_store_new(struct object_database *odb);
|
||||
|
||||
/*
|
||||
* Free the packfile store and all its associated state. All packfiles
|
||||
* tracked by the store will be closed.
|
||||
*/
|
||||
void packfile_store_free(struct packfile_store *store);
|
||||
|
||||
/*
|
||||
* Close all packfiles associated with this store. The packfiles won't be
|
||||
* free'd, so they can be re-opened at a later point in time.
|
||||
*/
|
||||
void packfile_store_close(struct packfile_store *store);
|
||||
|
||||
/*
|
||||
* Prepare the packfile store by loading packfiles and multi-pack indices for
|
||||
* all alternates. This becomes a no-op if the store is already prepared.
|
||||
*
|
||||
* It shouldn't typically be necessary to call this function directly, as
|
||||
* functions that access the store know to prepare it.
|
||||
*/
|
||||
void packfile_store_prepare(struct packfile_store *store);
|
||||
|
||||
/*
|
||||
* Clear the packfile caches and try to look up any new packfiles that have
|
||||
* appeared since last preparing the packfiles store.
|
||||
*
|
||||
* This function must be called under the `odb_read_lock()`.
|
||||
*/
|
||||
void packfile_store_reprepare(struct packfile_store *store);
|
||||
|
||||
/*
|
||||
* Add the pack to the store so that contained objects become accessible via
|
||||
* the store. This moves ownership into the store.
|
||||
*/
|
||||
void packfile_store_add_pack(struct packfile_store *store,
|
||||
struct packed_git *pack);
|
||||
|
||||
/*
|
||||
* Get packs managed by the given store. Does not load the MIDX or any packs
|
||||
* referenced by it.
|
||||
*/
|
||||
struct packed_git *packfile_store_get_packs(struct packfile_store *store);
|
||||
|
||||
/*
|
||||
* Get all packs managed by the given store, including packfiles that are
|
||||
* referenced by multi-pack indices.
|
||||
*/
|
||||
struct packed_git *packfile_store_get_all_packs(struct packfile_store *store);
|
||||
|
||||
/*
|
||||
* Get all packs in most-recently-used order.
|
||||
*/
|
||||
struct list_head *packfile_store_get_packs_mru(struct packfile_store *store);
|
||||
|
||||
/*
|
||||
* Open the packfile and add it to the store if it isn't yet known. Returns
|
||||
* either the newly opened packfile or the preexisting packfile. Returns a
|
||||
* `NULL` pointer in case the packfile could not be opened.
|
||||
*/
|
||||
struct packed_git *packfile_store_load_pack(struct packfile_store *store,
|
||||
const char *idx_path, int local);
|
||||
|
||||
struct pack_window {
|
||||
struct pack_window *next;
|
||||
|
@ -142,14 +237,6 @@ int for_each_packed_object(struct repository *repo, each_packed_object_fn cb,
|
|||
#define PACKDIR_FILE_GARBAGE 4
|
||||
extern void (*report_garbage)(unsigned seen_bits, const char *path);
|
||||
|
||||
void reprepare_packed_git(struct repository *r);
|
||||
void install_packed_git(struct repository *r, struct packed_git *pack);
|
||||
|
||||
struct packed_git *get_packed_git(struct repository *r);
|
||||
struct list_head *get_packed_git_mru(struct repository *r);
|
||||
struct multi_pack_index *get_multi_pack_index(struct odb_source *source);
|
||||
struct packed_git *get_all_packs(struct repository *r);
|
||||
|
||||
/*
|
||||
* Give a rough count of objects in the repository. This sacrifices accuracy
|
||||
* for speed.
|
||||
|
|
|
@ -287,12 +287,13 @@ static int compare_info(const void *a_, const void *b_)
|
|||
|
||||
static void init_pack_info(struct repository *r, const char *infofile, int force)
|
||||
{
|
||||
struct packfile_store *packs = r->objects->packfiles;
|
||||
struct packed_git *p;
|
||||
int stale;
|
||||
int i;
|
||||
size_t alloc = 0;
|
||||
|
||||
for (p = get_all_packs(r); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(packs); p; p = p->next) {
|
||||
/* we ignore things on alternate path since they are
|
||||
* not available to the pullers in general.
|
||||
*/
|
||||
|
|
|
@ -39,7 +39,7 @@ int cmd__find_pack(int argc, const char **argv)
|
|||
if (repo_get_oid(the_repository, argv[0], &oid))
|
||||
die("cannot parse %s as an object name", argv[0]);
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next)
|
||||
for (p = packfile_store_get_all_packs(the_repository->objects->packfiles); p; p = p->next)
|
||||
if (find_pack_entry_one(&oid, p)) {
|
||||
printf("%s\n", p->pack_name);
|
||||
actual_count++;
|
||||
|
|
|
@ -37,7 +37,7 @@ int cmd__pack_mtimes(int argc, const char **argv)
|
|||
if (argc != 2)
|
||||
usage(pack_mtimes_usage);
|
||||
|
||||
for (p = get_all_packs(the_repository); p; p = p->next) {
|
||||
for (p = packfile_store_get_all_packs(the_repository->objects->packfiles); p; p = p->next) {
|
||||
strbuf_addstr(&buf, basename(p->pack_name));
|
||||
strbuf_strip_suffix(&buf, ".pack");
|
||||
strbuf_addstr(&buf, ".mtimes");
|
||||
|
|
|
@ -450,7 +450,7 @@ static int fetch_with_fetch(struct transport *transport,
|
|||
}
|
||||
strbuf_release(&buf);
|
||||
|
||||
reprepare_packed_git(the_repository);
|
||||
odb_reprepare(the_repository->objects);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue