Merge branch 'ps/object-wo-the-repository'
The object layer has been updated to take an explicit repository instance as a parameter in more code paths. * ps/object-wo-the-repository: hash: stop depending on `the_repository` in `null_oid()` hash: fix "-Wsign-compare" warnings object-file: split out logic regarding hash algorithms delta-islands: stop depending on `the_repository` object-file-convert: stop depending on `the_repository` pack-bitmap-write: stop depending on `the_repository` pack-revindex: stop depending on `the_repository` pack-check: stop depending on `the_repository` environment: move access to "core.bigFileThreshold" into repo settings pack-write: stop depending on `the_repository` and `the_hash_algo` object: stop depending on `the_repository` csum-file: stop depending on `the_repository`maint
commit
ee847e0034
1
Makefile
1
Makefile
|
@ -1042,6 +1042,7 @@ LIB_OBJS += gpg-interface.o
|
|||
LIB_OBJS += graph.o
|
||||
LIB_OBJS += grep.o
|
||||
LIB_OBJS += hash-lookup.o
|
||||
LIB_OBJS += hash.o
|
||||
LIB_OBJS += hashmap.o
|
||||
LIB_OBJS += help.o
|
||||
LIB_OBJS += hex.o
|
||||
|
|
|
@ -216,7 +216,7 @@ static int write_archive_entry(const struct object_id *oid, const char *base,
|
|||
/* Stream it? */
|
||||
if (S_ISREG(mode) && !args->convert &&
|
||||
oid_object_info(args->repo, oid, &size) == OBJ_BLOB &&
|
||||
size > big_file_threshold)
|
||||
size > repo_settings_get_big_file_threshold(the_repository))
|
||||
return write_entry(args, oid, path.buf, path.len, mode, NULL, size);
|
||||
|
||||
buffer = object_file_to_archive(args, path.buf, oid, mode, &type, &size);
|
||||
|
@ -312,7 +312,7 @@ int write_archive_entries(struct archiver_args *args,
|
|||
struct object_id fake_oid;
|
||||
int i;
|
||||
|
||||
oidcpy(&fake_oid, null_oid());
|
||||
oidcpy(&fake_oid, null_oid(the_hash_algo));
|
||||
|
||||
if (args->baselen > 0 && args->base[args->baselen - 1] == '/') {
|
||||
size_t len = args->baselen;
|
||||
|
|
2
blame.c
2
blame.c
|
@ -255,7 +255,7 @@ static struct commit *fake_working_tree_commit(struct repository *r,
|
|||
switch (st.st_mode & S_IFMT) {
|
||||
case S_IFREG:
|
||||
if (opt->flags.allow_textconv &&
|
||||
textconv_object(r, read_from, mode, null_oid(), 0, &buf_ptr, &buf_len))
|
||||
textconv_object(r, read_from, mode, null_oid(the_hash_algo), 0, &buf_ptr, &buf_len))
|
||||
strbuf_attach(&buf, buf_ptr, buf_len, buf_len + 1);
|
||||
else if (strbuf_read_file(&buf, read_from, st.st_size) != st.st_size)
|
||||
die_errno("cannot open or read '%s'", read_from);
|
||||
|
|
2
branch.c
2
branch.c
|
@ -633,7 +633,7 @@ void create_branch(struct repository *r,
|
|||
0, &err);
|
||||
if (!transaction ||
|
||||
ref_transaction_update(transaction, ref.buf,
|
||||
&oid, forcing ? NULL : null_oid(),
|
||||
&oid, forcing ? NULL : null_oid(the_hash_algo),
|
||||
NULL, NULL, flags, msg, &err) ||
|
||||
ref_transaction_commit(transaction, &err))
|
||||
die("%s", err.buf);
|
||||
|
|
|
@ -130,8 +130,8 @@ static int post_checkout_hook(struct commit *old_commit, struct commit *new_comm
|
|||
int changed)
|
||||
{
|
||||
return run_hooks_l(the_repository, "post-checkout",
|
||||
oid_to_hex(old_commit ? &old_commit->object.oid : null_oid()),
|
||||
oid_to_hex(new_commit ? &new_commit->object.oid : null_oid()),
|
||||
oid_to_hex(old_commit ? &old_commit->object.oid : null_oid(the_hash_algo)),
|
||||
oid_to_hex(new_commit ? &new_commit->object.oid : null_oid(the_hash_algo)),
|
||||
changed ? "1" : "0", NULL);
|
||||
/* "new_commit" can be NULL when checking out from the index before
|
||||
a commit exists. */
|
||||
|
@ -710,7 +710,7 @@ static int reset_tree(struct tree *tree, const struct checkout_opts *o,
|
|||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
init_checkout_metadata(&opts.meta, info->refname,
|
||||
info->commit ? &info->commit->object.oid : null_oid(),
|
||||
info->commit ? &info->commit->object.oid : null_oid(the_hash_algo),
|
||||
NULL);
|
||||
if (parse_tree(tree) < 0)
|
||||
return 128;
|
||||
|
|
|
@ -692,7 +692,7 @@ static int checkout(int submodule_progress, int filter_submodules,
|
|||
if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write new index file"));
|
||||
|
||||
err |= run_hooks_l(the_repository, "post-checkout", oid_to_hex(null_oid()),
|
||||
err |= run_hooks_l(the_repository, "post-checkout", oid_to_hex(null_oid(the_hash_algo)),
|
||||
oid_to_hex(&oid), "1", NULL);
|
||||
|
||||
if (!err && (option_recurse_submodules.nr > 0)) {
|
||||
|
|
|
@ -518,7 +518,7 @@ static void describe_blob(struct object_id oid, struct strbuf *dst)
|
|||
{
|
||||
struct rev_info revs;
|
||||
struct strvec args = STRVEC_INIT;
|
||||
struct process_commit_data pcd = { *null_oid(), oid, dst, &revs};
|
||||
struct process_commit_data pcd = { *null_oid(the_hash_algo), oid, dst, &revs};
|
||||
|
||||
strvec_pushl(&args, "internal: The first arg is not parsed",
|
||||
"--objects", "--in-commit-order", "--reverse", "HEAD",
|
||||
|
|
|
@ -104,7 +104,7 @@ static void builtin_diff_b_f(struct rev_info *revs,
|
|||
|
||||
stuff_change(&revs->diffopt,
|
||||
blob[0]->mode, canon_mode(st.st_mode),
|
||||
&blob[0]->item->oid, null_oid(),
|
||||
&blob[0]->item->oid, null_oid(the_hash_algo),
|
||||
1, 0,
|
||||
blob[0]->path ? blob[0]->path : path,
|
||||
path);
|
||||
|
@ -498,7 +498,8 @@ int cmd_diff(int argc,
|
|||
|
||||
/* If this is a no-index diff, just run it and exit there. */
|
||||
if (no_index)
|
||||
exit(diff_no_index(&rev, no_index == DIFF_NO_INDEX_IMPLICIT,
|
||||
exit(diff_no_index(&rev, the_repository->hash_algo,
|
||||
no_index == DIFF_NO_INDEX_IMPLICIT,
|
||||
argc, argv));
|
||||
|
||||
|
||||
|
|
|
@ -949,7 +949,7 @@ static void handle_tag(const char *name, struct tag *tag)
|
|||
p = rewrite_commit((struct commit *)tagged);
|
||||
if (!p) {
|
||||
printf("reset %s\nfrom %s\n\n",
|
||||
name, oid_to_hex(null_oid()));
|
||||
name, oid_to_hex(null_oid(the_hash_algo)));
|
||||
free(buf);
|
||||
return;
|
||||
}
|
||||
|
@ -963,7 +963,7 @@ static void handle_tag(const char *name, struct tag *tag)
|
|||
|
||||
if (tagged->type == OBJ_TAG) {
|
||||
printf("reset %s\nfrom %s\n\n",
|
||||
name, oid_to_hex(null_oid()));
|
||||
name, oid_to_hex(null_oid(the_hash_algo)));
|
||||
}
|
||||
skip_prefix(name, "refs/tags/", &name);
|
||||
printf("tag %s\n", name);
|
||||
|
@ -1103,7 +1103,7 @@ static void handle_tags_and_duplicates(struct string_list *extras)
|
|||
* it.
|
||||
*/
|
||||
printf("reset %s\nfrom %s\n\n",
|
||||
name, oid_to_hex(null_oid()));
|
||||
name, oid_to_hex(null_oid(the_hash_algo)));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1122,7 +1122,7 @@ static void handle_tags_and_duplicates(struct string_list *extras)
|
|||
if (!reference_excluded_commits) {
|
||||
/* delete the ref */
|
||||
printf("reset %s\nfrom %s\n\n",
|
||||
name, oid_to_hex(null_oid()));
|
||||
name, oid_to_hex(null_oid(the_hash_algo)));
|
||||
continue;
|
||||
}
|
||||
/* set ref to commit using oid, not mark */
|
||||
|
@ -1233,7 +1233,7 @@ static void handle_deletes(void)
|
|||
continue;
|
||||
|
||||
printf("reset %s\nfrom %s\n\n",
|
||||
refspec->dst, oid_to_hex(null_oid()));
|
||||
refspec->dst, oid_to_hex(null_oid(the_hash_algo)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -770,7 +770,7 @@ static void start_packfile(void)
|
|||
p->pack_fd = pack_fd;
|
||||
p->do_not_close = 1;
|
||||
p->repo = the_repository;
|
||||
pack_file = hashfd(pack_fd, p->pack_name);
|
||||
pack_file = hashfd(the_repository->hash_algo, pack_fd, p->pack_name);
|
||||
|
||||
pack_data = p;
|
||||
pack_size = write_pack_header(pack_file, 0);
|
||||
|
@ -798,7 +798,7 @@ static const char *create_index(void)
|
|||
if (c != last)
|
||||
die("internal consistency error creating the index");
|
||||
|
||||
tmpfile = write_idx_file(the_hash_algo, NULL, idx, object_count,
|
||||
tmpfile = write_idx_file(the_repository, NULL, idx, object_count,
|
||||
&pack_idx_opts, pack_data->hash);
|
||||
free(idx);
|
||||
return tmpfile;
|
||||
|
@ -2021,7 +2021,7 @@ static void parse_and_store_blob(
|
|||
static struct strbuf buf = STRBUF_INIT;
|
||||
uintmax_t len;
|
||||
|
||||
if (parse_data(&buf, big_file_threshold, &len))
|
||||
if (parse_data(&buf, repo_settings_get_big_file_threshold(the_repository), &len))
|
||||
store_object(OBJ_BLOB, &buf, last, oidout, mark);
|
||||
else {
|
||||
if (last) {
|
||||
|
@ -3425,7 +3425,7 @@ static int parse_one_option(const char *option)
|
|||
unsigned long v;
|
||||
if (!git_parse_ulong(option, &v))
|
||||
return 0;
|
||||
big_file_threshold = v;
|
||||
repo_settings_set_big_file_threshold(the_repository, v);
|
||||
} else if (skip_prefix(option, "depth=", &option)) {
|
||||
option_depth(option);
|
||||
} else if (skip_prefix(option, "active-branches=", &option)) {
|
||||
|
|
|
@ -400,12 +400,12 @@ static void check_connectivity(void)
|
|||
}
|
||||
|
||||
/* Look up all the requirements, warn about missing objects.. */
|
||||
max = get_max_object_index();
|
||||
max = get_max_object_index(the_repository);
|
||||
if (verbose)
|
||||
fprintf_ln(stderr, _("Checking connectivity (%d objects)"), max);
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
struct object *obj = get_indexed_object(i);
|
||||
struct object *obj = get_indexed_object(the_repository, i);
|
||||
|
||||
if (obj)
|
||||
check_object(obj);
|
||||
|
@ -626,7 +626,7 @@ static int fsck_loose(const struct object_id *oid, const char *path, void *data)
|
|||
void *contents = NULL;
|
||||
int eaten;
|
||||
struct object_info oi = OBJECT_INFO_INIT;
|
||||
struct object_id real_oid = *null_oid();
|
||||
struct object_id real_oid = *null_oid(the_hash_algo);
|
||||
int err = 0;
|
||||
|
||||
strbuf_reset(&cb_data->obj_type);
|
||||
|
|
|
@ -453,7 +453,7 @@ static int grep_submodule(struct grep_opt *opt,
|
|||
return 0;
|
||||
|
||||
subrepo = xmalloc(sizeof(*subrepo));
|
||||
if (repo_submodule_init(subrepo, superproject, path, null_oid())) {
|
||||
if (repo_submodule_init(subrepo, superproject, path, null_oid(opt->repo->hash_algo))) {
|
||||
free(subrepo);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1144,7 +1144,7 @@ int cmd_grep(int argc,
|
|||
break;
|
||||
}
|
||||
|
||||
object = parse_object_or_die(&oid, arg);
|
||||
object = parse_object_or_die(the_repository, &oid, arg);
|
||||
if (!seen_dashdash)
|
||||
verify_non_filename(prefix, arg);
|
||||
add_object_array_with_path(object, arg, &list, oc.mode, oc.path);
|
||||
|
|
|
@ -279,14 +279,14 @@ static unsigned check_objects(void)
|
|||
{
|
||||
unsigned i, max, foreign_nr = 0;
|
||||
|
||||
max = get_max_object_index();
|
||||
max = get_max_object_index(the_repository);
|
||||
|
||||
if (verbose)
|
||||
progress = start_delayed_progress(the_repository,
|
||||
_("Checking objects"), max);
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
foreign_nr += check_object(get_indexed_object(i));
|
||||
foreign_nr += check_object(get_indexed_object(the_repository, i));
|
||||
display_progress(progress, i + 1);
|
||||
}
|
||||
|
||||
|
@ -485,7 +485,8 @@ static void *unpack_entry_data(off_t offset, unsigned long size,
|
|||
git_hash_update(&c, hdr, hdrlen);
|
||||
} else
|
||||
oid = NULL;
|
||||
if (type == OBJ_BLOB && size > big_file_threshold)
|
||||
if (type == OBJ_BLOB &&
|
||||
size > repo_settings_get_big_file_threshold(the_repository))
|
||||
buf = fixed_buf;
|
||||
else
|
||||
buf = xmallocz(size);
|
||||
|
@ -799,7 +800,8 @@ static int check_collison(struct object_entry *entry)
|
|||
enum object_type type;
|
||||
unsigned long size;
|
||||
|
||||
if (entry->size <= big_file_threshold || entry->type != OBJ_BLOB)
|
||||
if (entry->size <= repo_settings_get_big_file_threshold(the_repository) ||
|
||||
entry->type != OBJ_BLOB)
|
||||
return -1;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
|
@ -1382,7 +1384,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
|
|||
REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
|
||||
memset(objects + nr_objects + 1, 0,
|
||||
nr_unresolved * sizeof(*objects));
|
||||
f = hashfd(output_fd, curr_pack);
|
||||
f = hashfd(the_repository->hash_algo, output_fd, curr_pack);
|
||||
fix_unresolved_deltas(f);
|
||||
strbuf_addf(&msg, Q_("completed with %d local object",
|
||||
"completed with %d local objects",
|
||||
|
@ -2089,10 +2091,10 @@ int cmd_index_pack(int argc,
|
|||
ALLOC_ARRAY(idx_objects, nr_objects);
|
||||
for (i = 0; i < nr_objects; i++)
|
||||
idx_objects[i] = &objects[i].idx;
|
||||
curr_index = write_idx_file(the_hash_algo, index_name, idx_objects,
|
||||
curr_index = write_idx_file(the_repository, index_name, idx_objects,
|
||||
nr_objects, &opts, pack_hash);
|
||||
if (rev_index)
|
||||
curr_rev_index = write_rev_file(the_hash_algo, rev_index_name,
|
||||
curr_rev_index = write_rev_file(the_repository, rev_index_name,
|
||||
idx_objects, nr_objects,
|
||||
pack_hash, opts.flags);
|
||||
free(idx_objects);
|
||||
|
|
|
@ -2468,7 +2468,7 @@ int cmd_format_patch(int argc,
|
|||
base = get_base_commit(&cfg, list, nr);
|
||||
if (base) {
|
||||
reset_revision_walk();
|
||||
clear_object_flags(UNINTERESTING);
|
||||
clear_object_flags(the_repository, UNINTERESTING);
|
||||
prepare_bases(&bases, base, list, nr);
|
||||
}
|
||||
|
||||
|
|
|
@ -233,7 +233,8 @@ static void show_submodule(struct repository *superproject,
|
|||
{
|
||||
struct repository subrepo;
|
||||
|
||||
if (repo_submodule_init(&subrepo, superproject, path, null_oid()))
|
||||
if (repo_submodule_init(&subrepo, superproject, path,
|
||||
null_oid(superproject->hash_algo)))
|
||||
return;
|
||||
|
||||
if (repo_read_index(&subrepo) < 0)
|
||||
|
|
|
@ -675,9 +675,9 @@ int cmd_name_rev(int argc,
|
|||
} else if (all) {
|
||||
int i, max;
|
||||
|
||||
max = get_max_object_index();
|
||||
max = get_max_object_index(the_repository);
|
||||
for (i = 0; i < max; i++) {
|
||||
struct object *obj = get_indexed_object(i);
|
||||
struct object *obj = get_indexed_object(the_repository, i);
|
||||
if (!obj || obj->type != OBJ_COMMIT)
|
||||
continue;
|
||||
show_name(obj, NULL,
|
||||
|
|
|
@ -500,7 +500,8 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
|
|||
|
||||
if (!usable_delta) {
|
||||
if (oe_type(entry) == OBJ_BLOB &&
|
||||
oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
|
||||
oe_size_greater_than(&to_pack, entry,
|
||||
repo_settings_get_big_file_threshold(the_repository)) &&
|
||||
(st = open_istream(the_repository, &entry->idx.oid, &type,
|
||||
&size, NULL)) != NULL)
|
||||
buf = NULL;
|
||||
|
@ -1312,9 +1313,10 @@ static void write_pack_file(void)
|
|||
char *pack_tmp_name = NULL;
|
||||
|
||||
if (pack_to_stdout)
|
||||
f = hashfd_throughput(1, "<stdout>", progress_state);
|
||||
f = hashfd_throughput(the_repository->hash_algo, 1,
|
||||
"<stdout>", progress_state);
|
||||
else
|
||||
f = create_tmp_packfile(&pack_tmp_name);
|
||||
f = create_tmp_packfile(the_repository, &pack_tmp_name);
|
||||
|
||||
offset = write_pack_header(f, nr_remaining);
|
||||
|
||||
|
@ -1408,7 +1410,7 @@ static void write_pack_file(void)
|
|||
if (cruft)
|
||||
pack_idx_opts.flags |= WRITE_MTIMES;
|
||||
|
||||
stage_tmp_packfiles(the_hash_algo, &tmpname,
|
||||
stage_tmp_packfiles(the_repository, &tmpname,
|
||||
pack_tmp_name, written_list,
|
||||
nr_written, &to_pack,
|
||||
&pack_idx_opts, hash,
|
||||
|
@ -2536,7 +2538,8 @@ static void get_object_details(void)
|
|||
struct object_entry *entry = sorted_by_offset[i];
|
||||
check_object(entry, i);
|
||||
if (entry->type_valid &&
|
||||
oe_size_greater_than(&to_pack, entry, big_file_threshold))
|
||||
oe_size_greater_than(&to_pack, entry,
|
||||
repo_settings_get_big_file_threshold(the_repository)))
|
||||
entry->no_try_delta = 1;
|
||||
display_progress(progress_state, i + 1);
|
||||
}
|
||||
|
@ -3929,7 +3932,7 @@ static void show_commit(struct commit *commit, void *data UNUSED)
|
|||
index_commit_for_bitmap(commit);
|
||||
|
||||
if (use_delta_islands)
|
||||
propagate_island_marks(commit);
|
||||
propagate_island_marks(the_repository, commit);
|
||||
}
|
||||
|
||||
static void show_object(struct object *obj, const char *name,
|
||||
|
@ -4245,7 +4248,7 @@ static int mark_bitmap_preferred_tip(const char *refname,
|
|||
if (!peel_iterated_oid(the_repository, oid, &peeled))
|
||||
oid = &peeled;
|
||||
|
||||
object = parse_object_or_die(oid, refname);
|
||||
object = parse_object_or_die(the_repository, oid, refname);
|
||||
if (object->type == OBJ_COMMIT)
|
||||
object->flags |= NEEDS_BITMAP;
|
||||
|
||||
|
|
|
@ -185,7 +185,7 @@ int cmd_prune(int argc,
|
|||
const char *name = *argv++;
|
||||
|
||||
if (!repo_get_oid(the_repository, name, &oid)) {
|
||||
struct object *object = parse_object_or_die(&oid,
|
||||
struct object *object = parse_object_or_die(the_repository, &oid,
|
||||
name);
|
||||
add_pending_object(&revs, object, "");
|
||||
}
|
||||
|
|
|
@ -925,7 +925,7 @@ static void fill_branch_base(struct rebase_options *options,
|
|||
options->orig_head, &merge_bases) < 0)
|
||||
exit(128);
|
||||
if (!merge_bases || merge_bases->next)
|
||||
oidcpy(branch_base, null_oid());
|
||||
oidcpy(branch_base, null_oid(the_hash_algo));
|
||||
else
|
||||
oidcpy(branch_base, &merge_bases->item->object.oid);
|
||||
|
||||
|
|
|
@ -363,7 +363,7 @@ static void write_head_info(void)
|
|||
strvec_clear(&excludes_vector);
|
||||
|
||||
if (!sent_capabilities)
|
||||
show_ref("capabilities^{}", null_oid());
|
||||
show_ref("capabilities^{}", null_oid(the_hash_algo));
|
||||
|
||||
advertise_shallow_grafts(1);
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ static int get_default_remote_submodule(const char *module_path, char **default_
|
|||
int ret;
|
||||
|
||||
if (repo_submodule_init(&subrepo, the_repository, module_path,
|
||||
null_oid()) < 0)
|
||||
null_oid(the_hash_algo)) < 0)
|
||||
return die_message(_("could not get a repository handle for submodule '%s'"),
|
||||
module_path);
|
||||
ret = repo_get_default_remote(&subrepo, default_remote);
|
||||
|
@ -308,7 +308,7 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
|
|||
displaypath = get_submodule_displaypath(path, info->prefix,
|
||||
info->super_prefix);
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
|
||||
if (!sub)
|
||||
die(_("No url found for submodule path '%s' in .gitmodules"),
|
||||
|
@ -468,7 +468,7 @@ static void init_submodule(const char *path, const char *prefix,
|
|||
|
||||
displaypath = get_submodule_displaypath(path, prefix, super_prefix);
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
|
||||
if (!sub)
|
||||
die(_("No url found for submodule path '%s' in .gitmodules"),
|
||||
|
@ -645,14 +645,14 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
|
|||
if (validate_submodule_path(path) < 0)
|
||||
exit(128);
|
||||
|
||||
if (!submodule_from_path(the_repository, null_oid(), path))
|
||||
if (!submodule_from_path(the_repository, null_oid(the_hash_algo), path))
|
||||
die(_("no submodule mapping found in .gitmodules for path '%s'"),
|
||||
path);
|
||||
|
||||
displaypath = get_submodule_displaypath(path, prefix, super_prefix);
|
||||
|
||||
if ((CE_STAGEMASK & ce_flags) >> CE_STAGESHIFT) {
|
||||
print_status(flags, 'U', path, null_oid(), displaypath);
|
||||
print_status(flags, 'U', path, null_oid(the_hash_algo), displaypath);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
@ -912,7 +912,7 @@ static void generate_submodule_summary(struct summary_cb *info,
|
|||
struct strbuf errmsg = STRBUF_INIT;
|
||||
int total_commits = -1;
|
||||
|
||||
if (!info->cached && oideq(&p->oid_dst, null_oid())) {
|
||||
if (!info->cached && oideq(&p->oid_dst, null_oid(the_hash_algo))) {
|
||||
if (S_ISGITLINK(p->mod_dst)) {
|
||||
struct ref_store *refs = repo_get_submodule_ref_store(the_repository,
|
||||
p->sm_path);
|
||||
|
@ -1051,7 +1051,7 @@ static void prepare_submodule_summary(struct summary_cb *info,
|
|||
|
||||
if (info->for_status && p->status != 'A' &&
|
||||
(sub = submodule_from_path(the_repository,
|
||||
null_oid(), p->sm_path))) {
|
||||
null_oid(the_hash_algo), p->sm_path))) {
|
||||
char *config_key = NULL;
|
||||
const char *value;
|
||||
int ignore_all = 0;
|
||||
|
@ -1259,7 +1259,7 @@ static void sync_submodule(const char *path, const char *prefix,
|
|||
if (validate_submodule_path(path) < 0)
|
||||
exit(128);
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
|
||||
if (sub && sub->url) {
|
||||
if (starts_with_dot_dot_slash(sub->url) ||
|
||||
|
@ -1404,7 +1404,7 @@ static void deinit_submodule(const char *path, const char *prefix,
|
|||
if (validate_submodule_path(path) < 0)
|
||||
exit(128);
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
|
||||
if (!sub || !sub->name)
|
||||
goto cleanup;
|
||||
|
@ -1929,7 +1929,7 @@ static int determine_submodule_update_strategy(struct repository *r,
|
|||
enum submodule_update_type update,
|
||||
struct submodule_update_strategy *out)
|
||||
{
|
||||
const struct submodule *sub = submodule_from_path(r, null_oid(), path);
|
||||
const struct submodule *sub = submodule_from_path(r, null_oid(the_hash_algo), path);
|
||||
char *key;
|
||||
const char *val;
|
||||
int ret;
|
||||
|
@ -2089,7 +2089,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), ce->name);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), ce->name);
|
||||
|
||||
if (!sub) {
|
||||
next_submodule_warn_missing(suc, out, displaypath);
|
||||
|
@ -2485,7 +2485,7 @@ static int remote_submodule_branch(const char *path, const char **branch)
|
|||
char *key;
|
||||
*branch = NULL;
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
if (!sub)
|
||||
return die_message(_("could not initialize submodule at path '%s'"),
|
||||
path);
|
||||
|
@ -2531,7 +2531,7 @@ static int ensure_core_worktree(const char *path)
|
|||
const char *cw;
|
||||
struct repository subrepo;
|
||||
|
||||
if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
|
||||
if (repo_submodule_init(&subrepo, the_repository, path, null_oid(the_hash_algo)))
|
||||
return die_message(_("could not get a repository handle for submodule '%s'"),
|
||||
path);
|
||||
|
||||
|
@ -2644,7 +2644,7 @@ static int update_submodule(struct update_data *update_data)
|
|||
return ret;
|
||||
|
||||
if (update_data->just_cloned)
|
||||
oidcpy(&update_data->suboid, null_oid());
|
||||
oidcpy(&update_data->suboid, null_oid(the_hash_algo));
|
||||
else if (repo_resolve_gitlink_ref(the_repository, update_data->sm_path,
|
||||
"HEAD", &update_data->suboid))
|
||||
return die_message(_("Unable to find current revision in submodule path '%s'"),
|
||||
|
@ -2697,8 +2697,8 @@ static int update_submodule(struct update_data *update_data)
|
|||
struct update_data next = *update_data;
|
||||
|
||||
next.prefix = NULL;
|
||||
oidcpy(&next.oid, null_oid());
|
||||
oidcpy(&next.suboid, null_oid());
|
||||
oidcpy(&next.oid, null_oid(the_hash_algo));
|
||||
oidcpy(&next.suboid, null_oid(the_hash_algo));
|
||||
|
||||
cp.dir = update_data->sm_path;
|
||||
cp.git_cmd = 1;
|
||||
|
@ -3057,7 +3057,7 @@ static int module_set_url(int argc, const char **argv, const char *prefix,
|
|||
if (argc != 2 || !(path = argv[0]) || !(newurl = argv[1]))
|
||||
usage_with_options(usage, options);
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
|
||||
if (!sub)
|
||||
die(_("no submodule mapping found in .gitmodules for path '%s'"),
|
||||
|
@ -3113,7 +3113,7 @@ static int module_set_branch(int argc, const char **argv, const char *prefix,
|
|||
if (argc != 1 || !(path = argv[0]))
|
||||
usage_with_options(usage, options);
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
|
||||
if (!sub)
|
||||
die(_("no submodule mapping found in .gitmodules for path '%s'"),
|
||||
|
|
|
@ -172,7 +172,7 @@ static int do_sign(struct strbuf *buffer, struct object_id **compat_oid,
|
|||
if (compat) {
|
||||
const struct git_hash_algo *algo = the_repository->hash_algo;
|
||||
|
||||
if (convert_object_file(&compat_buf, algo, compat,
|
||||
if (convert_object_file(the_repository ,&compat_buf, algo, compat,
|
||||
buffer->buf, buffer->len, OBJ_TAG, 1))
|
||||
goto out;
|
||||
if (sign_buffer(&compat_buf, &compat_sig, keyid))
|
||||
|
|
|
@ -505,7 +505,7 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
|
|||
* has not been resolved yet.
|
||||
*/
|
||||
oidclr(&obj_list[nr].oid, the_repository->hash_algo);
|
||||
add_delta_to_list(nr, null_oid(), base_offset,
|
||||
add_delta_to_list(nr, null_oid(the_hash_algo), base_offset,
|
||||
delta_data, delta_size);
|
||||
return;
|
||||
}
|
||||
|
@ -553,7 +553,8 @@ static void unpack_one(unsigned nr)
|
|||
|
||||
switch (type) {
|
||||
case OBJ_BLOB:
|
||||
if (!dry_run && size > big_file_threshold) {
|
||||
if (!dry_run &&
|
||||
size > repo_settings_get_big_file_threshold(the_repository)) {
|
||||
stream_blob(size, nr);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -503,7 +503,7 @@ static void parse_cmd_symref_verify(struct ref_transaction *transaction,
|
|||
*/
|
||||
old_target = parse_next_refname(&next);
|
||||
if (!old_target)
|
||||
oidcpy(&old_oid, null_oid());
|
||||
oidcpy(&old_oid, null_oid(the_hash_algo));
|
||||
|
||||
if (*next != line_termination)
|
||||
die("symref-verify %s: extra input: %s", refname, next);
|
||||
|
|
|
@ -578,7 +578,7 @@ done:
|
|||
|
||||
strvec_pushl(&opt.env, "GIT_DIR", "GIT_WORK_TREE", NULL);
|
||||
strvec_pushl(&opt.args,
|
||||
oid_to_hex(null_oid()),
|
||||
oid_to_hex(null_oid(the_hash_algo)),
|
||||
oid_to_hex(&commit->object.oid),
|
||||
"1",
|
||||
NULL);
|
||||
|
|
|
@ -43,7 +43,7 @@ static void finish_tmp_packfile(struct strbuf *basename,
|
|||
{
|
||||
char *idx_tmp_name = NULL;
|
||||
|
||||
stage_tmp_packfiles(the_hash_algo, basename, pack_tmp_name,
|
||||
stage_tmp_packfiles(the_repository, basename, pack_tmp_name,
|
||||
written_list, nr_written, NULL, pack_idx_opts, hash,
|
||||
&idx_tmp_name);
|
||||
rename_tmp_packfile_idx(basename, &idx_tmp_name);
|
||||
|
@ -240,7 +240,7 @@ static void prepare_to_stream(struct bulk_checkin_packfile *state,
|
|||
if (!(flags & HASH_WRITE_OBJECT) || state->f)
|
||||
return;
|
||||
|
||||
state->f = create_tmp_packfile(&state->pack_tmp_name);
|
||||
state->f = create_tmp_packfile(the_repository, &state->pack_tmp_name);
|
||||
reset_pack_idx_option(&state->pack_idx_opts);
|
||||
|
||||
/* Pretend we are going to write only one object */
|
||||
|
|
|
@ -1066,7 +1066,7 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent,
|
|||
&result_size, NULL, NULL);
|
||||
} else if (textconv) {
|
||||
struct diff_filespec *df = alloc_filespec(elem->path);
|
||||
fill_filespec(df, null_oid(), 0, st.st_mode);
|
||||
fill_filespec(df, null_oid(the_hash_algo), 0, st.st_mode);
|
||||
result_size = fill_textconv(opt->repo, textconv, df, &result);
|
||||
free_filespec(df);
|
||||
} else if (0 <= (fd = open(elem->path, O_RDONLY))) {
|
||||
|
|
|
@ -2090,11 +2090,13 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
|
|||
return -1;
|
||||
}
|
||||
|
||||
f = hashfd(get_tempfile_fd(graph_layer), get_tempfile_path(graph_layer));
|
||||
f = hashfd(the_repository->hash_algo,
|
||||
get_tempfile_fd(graph_layer), get_tempfile_path(graph_layer));
|
||||
} else {
|
||||
hold_lock_file_for_update_mode(&lk, ctx->graph_name,
|
||||
LOCK_DIE_ON_ERROR, 0444);
|
||||
f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
|
||||
f = hashfd(the_repository->hash_algo,
|
||||
get_lock_file_fd(&lk), get_lock_file_path(&lk));
|
||||
}
|
||||
|
||||
cf = init_chunkfile(f);
|
||||
|
@ -2716,7 +2718,8 @@ static void graph_report(const char *fmt, ...)
|
|||
|
||||
static int commit_graph_checksum_valid(struct commit_graph *g)
|
||||
{
|
||||
return hashfile_checksum_valid(g->data, g->data_len);
|
||||
return hashfile_checksum_valid(the_repository->hash_algo,
|
||||
g->data, g->data_len);
|
||||
}
|
||||
|
||||
static int verify_one_commit_graph(struct repository *r,
|
||||
|
|
2
commit.c
2
commit.c
|
@ -1378,7 +1378,7 @@ static int convert_commit_extra_headers(const struct commit_extra_header *orig,
|
|||
struct commit_extra_header *new;
|
||||
CALLOC_ARRAY(new, 1);
|
||||
if (!strcmp(orig->key, "mergetag")) {
|
||||
if (convert_object_file(&out, algo, compat,
|
||||
if (convert_object_file(the_repository, &out, algo, compat,
|
||||
orig->value, orig->len,
|
||||
OBJ_TAG, 1)) {
|
||||
free(new);
|
||||
|
|
5
config.c
5
config.c
|
@ -1490,11 +1490,6 @@ static int git_default_core_config(const char *var, const char *value,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!strcmp(var, "core.bigfilethreshold")) {
|
||||
big_file_threshold = git_config_ulong(var, value, ctx->kvi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!strcmp(var, "core.autocrlf")) {
|
||||
if (value && !strcasecmp(value, "input")) {
|
||||
auto_crlf = AUTO_CRLF_INPUT;
|
||||
|
|
28
csum-file.c
28
csum-file.c
|
@ -8,8 +8,6 @@
|
|||
* able to verify hasn't been messed with afterwards.
|
||||
*/
|
||||
|
||||
#define USE_THE_REPOSITORY_VARIABLE
|
||||
|
||||
#include "git-compat-util.h"
|
||||
#include "csum-file.h"
|
||||
#include "git-zlib.h"
|
||||
|
@ -148,21 +146,23 @@ void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
|
|||
}
|
||||
}
|
||||
|
||||
struct hashfile *hashfd_check(const char *name)
|
||||
struct hashfile *hashfd_check(const struct git_hash_algo *algop,
|
||||
const char *name)
|
||||
{
|
||||
int sink, check;
|
||||
struct hashfile *f;
|
||||
|
||||
sink = xopen("/dev/null", O_WRONLY);
|
||||
check = xopen(name, O_RDONLY);
|
||||
f = hashfd(sink, name);
|
||||
f = hashfd(algop, sink, name);
|
||||
f->check_fd = check;
|
||||
f->check_buffer = xmalloc(f->buffer_len);
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
static struct hashfile *hashfd_internal(int fd, const char *name,
|
||||
static struct hashfile *hashfd_internal(const struct git_hash_algo *algop,
|
||||
int fd, const char *name,
|
||||
struct progress *tp,
|
||||
size_t buffer_len)
|
||||
{
|
||||
|
@ -176,7 +176,7 @@ static struct hashfile *hashfd_internal(int fd, const char *name,
|
|||
f->do_crc = 0;
|
||||
f->skip_hash = 0;
|
||||
|
||||
f->algop = unsafe_hash_algo(the_hash_algo);
|
||||
f->algop = unsafe_hash_algo(algop);
|
||||
f->algop->init_fn(&f->ctx);
|
||||
|
||||
f->buffer_len = buffer_len;
|
||||
|
@ -186,17 +186,19 @@ static struct hashfile *hashfd_internal(int fd, const char *name,
|
|||
return f;
|
||||
}
|
||||
|
||||
struct hashfile *hashfd(int fd, const char *name)
|
||||
struct hashfile *hashfd(const struct git_hash_algo *algop,
|
||||
int fd, const char *name)
|
||||
{
|
||||
/*
|
||||
* Since we are not going to use a progress meter to
|
||||
* measure the rate of data passing through this hashfile,
|
||||
* use a larger buffer size to reduce fsync() calls.
|
||||
*/
|
||||
return hashfd_internal(fd, name, NULL, 128 * 1024);
|
||||
return hashfd_internal(algop, fd, name, NULL, 128 * 1024);
|
||||
}
|
||||
|
||||
struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp)
|
||||
struct hashfile *hashfd_throughput(const struct git_hash_algo *algop,
|
||||
int fd, const char *name, struct progress *tp)
|
||||
{
|
||||
/*
|
||||
* Since we are expecting to report progress of the
|
||||
|
@ -204,7 +206,7 @@ struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp
|
|||
* size so the progress indicators arrive at a more
|
||||
* frequent rate.
|
||||
*/
|
||||
return hashfd_internal(fd, name, tp, 8 * 1024);
|
||||
return hashfd_internal(algop, fd, name, tp, 8 * 1024);
|
||||
}
|
||||
|
||||
void hashfile_checkpoint_init(struct hashfile *f,
|
||||
|
@ -246,13 +248,15 @@ uint32_t crc32_end(struct hashfile *f)
|
|||
return f->crc32;
|
||||
}
|
||||
|
||||
int hashfile_checksum_valid(const unsigned char *data, size_t total_len)
|
||||
int hashfile_checksum_valid(const struct git_hash_algo *algop,
|
||||
const unsigned char *data, size_t total_len)
|
||||
{
|
||||
unsigned char got[GIT_MAX_RAWSZ];
|
||||
struct git_hash_ctx ctx;
|
||||
const struct git_hash_algo *algop = unsafe_hash_algo(the_hash_algo);
|
||||
size_t data_len = total_len - algop->rawsz;
|
||||
|
||||
algop = unsafe_hash_algo(algop);
|
||||
|
||||
if (total_len < algop->rawsz)
|
||||
return 0; /* say "too short"? */
|
||||
|
||||
|
|
12
csum-file.h
12
csum-file.h
|
@ -45,9 +45,12 @@ int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
|
|||
#define CSUM_FSYNC 2
|
||||
#define CSUM_HASH_IN_STREAM 4
|
||||
|
||||
struct hashfile *hashfd(int fd, const char *name);
|
||||
struct hashfile *hashfd_check(const char *name);
|
||||
struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
|
||||
struct hashfile *hashfd(const struct git_hash_algo *algop,
|
||||
int fd, const char *name);
|
||||
struct hashfile *hashfd_check(const struct git_hash_algo *algop,
|
||||
const char *name);
|
||||
struct hashfile *hashfd_throughput(const struct git_hash_algo *algop,
|
||||
int fd, const char *name, struct progress *tp);
|
||||
|
||||
/*
|
||||
* Free the hashfile without flushing its contents to disk. This only
|
||||
|
@ -66,7 +69,8 @@ void crc32_begin(struct hashfile *);
|
|||
uint32_t crc32_end(struct hashfile *);
|
||||
|
||||
/* Verify checksum validity while reading. Returns non-zero on success. */
|
||||
int hashfile_checksum_valid(const unsigned char *data, size_t len);
|
||||
int hashfile_checksum_valid(const struct git_hash_algo *algop,
|
||||
const unsigned char *data, size_t len);
|
||||
|
||||
/*
|
||||
* Returns the total number of bytes fed to the hashfile so far (including ones
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_REPOSITORY_VARIABLE
|
||||
#define DISABLE_SIGN_COMPARE_WARNINGS
|
||||
|
||||
#include "git-compat-util.h"
|
||||
|
@ -267,8 +266,7 @@ void resolve_tree_islands(struct repository *r,
|
|||
QSORT(todo, nr, tree_depth_compare);
|
||||
|
||||
if (progress)
|
||||
progress_state = start_progress(the_repository,
|
||||
_("Propagating island marks"), nr);
|
||||
progress_state = start_progress(r, _("Propagating island marks"), nr);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct object_entry *ent = todo[i].entry;
|
||||
|
@ -490,9 +488,9 @@ void load_delta_islands(struct repository *r, int progress)
|
|||
|
||||
island_marks = kh_init_oid_map();
|
||||
|
||||
git_config(island_config_callback, &ild);
|
||||
repo_config(r, island_config_callback, &ild);
|
||||
ild.remote_islands = kh_init_str();
|
||||
refs_for_each_ref(get_main_ref_store(the_repository),
|
||||
refs_for_each_ref(get_main_ref_store(r),
|
||||
find_island_for_ref, &ild);
|
||||
free_config_regexes(&ild);
|
||||
deduplicate_islands(ild.remote_islands, r);
|
||||
|
@ -502,7 +500,7 @@ void load_delta_islands(struct repository *r, int progress)
|
|||
fprintf(stderr, _("Marked %d islands, done.\n"), island_counter);
|
||||
}
|
||||
|
||||
void propagate_island_marks(struct commit *commit)
|
||||
void propagate_island_marks(struct repository *r, struct commit *commit)
|
||||
{
|
||||
khiter_t pos = kh_get_oid_map(island_marks, commit->object.oid);
|
||||
|
||||
|
@ -510,8 +508,8 @@ void propagate_island_marks(struct commit *commit)
|
|||
struct commit_list *p;
|
||||
struct island_bitmap *root_marks = kh_value(island_marks, pos);
|
||||
|
||||
repo_parse_commit(the_repository, commit);
|
||||
set_island_marks(&repo_get_commit_tree(the_repository, commit)->object,
|
||||
repo_parse_commit(r, commit);
|
||||
set_island_marks(&repo_get_commit_tree(r, commit)->object,
|
||||
root_marks);
|
||||
for (p = commit->parents; p; p = p->next)
|
||||
set_island_marks(&p->item->object, root_marks);
|
||||
|
|
|
@ -12,7 +12,7 @@ void resolve_tree_islands(struct repository *r,
|
|||
int progress,
|
||||
struct packing_data *to_pack);
|
||||
void load_delta_islands(struct repository *r, int progress);
|
||||
void propagate_island_marks(struct commit *commit);
|
||||
void propagate_island_marks(struct repository *r, struct commit *commit);
|
||||
int compute_pack_layers(struct packing_data *to_pack);
|
||||
void free_island_marks(void);
|
||||
|
||||
|
|
10
diff-lib.c
10
diff-lib.c
|
@ -172,7 +172,7 @@ void run_diff_files(struct rev_info *revs, unsigned int option)
|
|||
* these from (stage - 2).
|
||||
*/
|
||||
dpath = combine_diff_path_new(ce->name, ce_namelen(ce),
|
||||
wt_mode, null_oid(), 2);
|
||||
wt_mode, null_oid(the_hash_algo), 2);
|
||||
|
||||
while (i < entries) {
|
||||
struct cache_entry *nce = istate->cache[i];
|
||||
|
@ -257,7 +257,7 @@ void run_diff_files(struct rev_info *revs, unsigned int option)
|
|||
ce_intent_to_add(ce)) {
|
||||
newmode = ce_mode_from_stat(ce, st.st_mode);
|
||||
diff_addremove(&revs->diffopt, '+', newmode,
|
||||
null_oid(), 0, ce->name, 0);
|
||||
null_oid(the_hash_algo), 0, ce->name, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ void run_diff_files(struct rev_info *revs, unsigned int option)
|
|||
}
|
||||
oldmode = ce->ce_mode;
|
||||
old_oid = &ce->oid;
|
||||
new_oid = changed ? null_oid() : &ce->oid;
|
||||
new_oid = changed ? null_oid(the_hash_algo) : &ce->oid;
|
||||
diff_change(&revs->diffopt, oldmode, newmode,
|
||||
old_oid, new_oid,
|
||||
!is_null_oid(old_oid),
|
||||
|
@ -330,7 +330,7 @@ static int get_stat_data(const struct cache_entry *ce,
|
|||
0, dirty_submodule);
|
||||
if (changed) {
|
||||
mode = ce_mode_from_stat(ce, st.st_mode);
|
||||
oid = null_oid();
|
||||
oid = null_oid(the_hash_algo);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -402,7 +402,7 @@ static int show_modified(struct rev_info *revs,
|
|||
|
||||
p = combine_diff_path_new(new_entry->name,
|
||||
ce_namelen(new_entry),
|
||||
mode, null_oid(), 2);
|
||||
mode, null_oid(the_hash_algo), 2);
|
||||
p->parent[0].status = DIFF_STATUS_MODIFIED;
|
||||
p->parent[0].mode = new_entry->ce_mode;
|
||||
oidcpy(&p->parent[0].oid, &new_entry->oid);
|
||||
|
|
|
@ -113,7 +113,8 @@ static void populate_from_stdin(struct diff_filespec *s)
|
|||
populate_common(s, &buf);
|
||||
}
|
||||
|
||||
static struct diff_filespec *noindex_filespec(const char *name, int mode,
|
||||
static struct diff_filespec *noindex_filespec(const struct git_hash_algo *algop,
|
||||
const char *name, int mode,
|
||||
enum special special)
|
||||
{
|
||||
struct diff_filespec *s;
|
||||
|
@ -121,7 +122,7 @@ static struct diff_filespec *noindex_filespec(const char *name, int mode,
|
|||
if (!name)
|
||||
name = "/dev/null";
|
||||
s = alloc_filespec(name);
|
||||
fill_filespec(s, null_oid(), 0, mode);
|
||||
fill_filespec(s, null_oid(algop), 0, mode);
|
||||
if (special == SPECIAL_STDIN)
|
||||
populate_from_stdin(s);
|
||||
else if (special == SPECIAL_PIPE)
|
||||
|
@ -129,7 +130,7 @@ static struct diff_filespec *noindex_filespec(const char *name, int mode,
|
|||
return s;
|
||||
}
|
||||
|
||||
static int queue_diff(struct diff_options *o,
|
||||
static int queue_diff(struct diff_options *o, const struct git_hash_algo *algop,
|
||||
const char *name1, const char *name2, int recursing)
|
||||
{
|
||||
int mode1 = 0, mode2 = 0;
|
||||
|
@ -145,14 +146,14 @@ static int queue_diff(struct diff_options *o,
|
|||
|
||||
if (S_ISDIR(mode1)) {
|
||||
/* 2 is file that is created */
|
||||
d1 = noindex_filespec(NULL, 0, SPECIAL_NONE);
|
||||
d2 = noindex_filespec(name2, mode2, special2);
|
||||
d1 = noindex_filespec(algop, NULL, 0, SPECIAL_NONE);
|
||||
d2 = noindex_filespec(algop, name2, mode2, special2);
|
||||
name2 = NULL;
|
||||
mode2 = 0;
|
||||
} else {
|
||||
/* 1 is file that is deleted */
|
||||
d1 = noindex_filespec(name1, mode1, special1);
|
||||
d2 = noindex_filespec(NULL, 0, SPECIAL_NONE);
|
||||
d1 = noindex_filespec(algop, name1, mode1, special1);
|
||||
d2 = noindex_filespec(algop, NULL, 0, SPECIAL_NONE);
|
||||
name1 = NULL;
|
||||
mode1 = 0;
|
||||
}
|
||||
|
@ -217,7 +218,7 @@ static int queue_diff(struct diff_options *o,
|
|||
n2 = buffer2.buf;
|
||||
}
|
||||
|
||||
ret = queue_diff(o, n1, n2, 1);
|
||||
ret = queue_diff(o, algop, n1, n2, 1);
|
||||
}
|
||||
string_list_clear(&p1, 0);
|
||||
string_list_clear(&p2, 0);
|
||||
|
@ -234,8 +235,8 @@ static int queue_diff(struct diff_options *o,
|
|||
SWAP(special1, special2);
|
||||
}
|
||||
|
||||
d1 = noindex_filespec(name1, mode1, special1);
|
||||
d2 = noindex_filespec(name2, mode2, special2);
|
||||
d1 = noindex_filespec(algop, name1, mode1, special1);
|
||||
d2 = noindex_filespec(algop, name2, mode2, special2);
|
||||
diff_queue(&diff_queued_diff, d1, d2);
|
||||
return 0;
|
||||
}
|
||||
|
@ -297,9 +298,8 @@ static const char * const diff_no_index_usage[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
int diff_no_index(struct rev_info *revs,
|
||||
int implicit_no_index,
|
||||
int argc, const char **argv)
|
||||
int diff_no_index(struct rev_info *revs, const struct git_hash_algo *algop,
|
||||
int implicit_no_index, int argc, const char **argv)
|
||||
{
|
||||
int i, no_index;
|
||||
int ret = 1;
|
||||
|
@ -354,7 +354,7 @@ int diff_no_index(struct rev_info *revs,
|
|||
setup_diff_pager(&revs->diffopt);
|
||||
revs->diffopt.flags.exit_with_status = 1;
|
||||
|
||||
if (queue_diff(&revs->diffopt, paths[0], paths[1], 0))
|
||||
if (queue_diff(&revs->diffopt, algop, paths[0], paths[1], 0))
|
||||
goto out;
|
||||
diff_set_mnemonic_prefix(&revs->diffopt, "1/", "2/");
|
||||
diffcore_std(&revs->diffopt);
|
||||
|
|
14
diff.c
14
diff.c
|
@ -4193,7 +4193,8 @@ int diff_populate_filespec(struct repository *r,
|
|||
* is probably fine.
|
||||
*/
|
||||
if (check_binary &&
|
||||
s->size > big_file_threshold && s->is_binary == -1) {
|
||||
s->size > repo_settings_get_big_file_threshold(the_repository) &&
|
||||
s->is_binary == -1) {
|
||||
s->is_binary = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -4243,7 +4244,8 @@ object_read:
|
|||
if (size_only || check_binary) {
|
||||
if (size_only)
|
||||
return 0;
|
||||
if (s->size > big_file_threshold && s->is_binary == -1) {
|
||||
if (s->size > repo_settings_get_big_file_threshold(the_repository) &&
|
||||
s->is_binary == -1) {
|
||||
s->is_binary = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -4344,7 +4346,7 @@ static struct diff_tempfile *prepare_temp_file(struct repository *r,
|
|||
die_errno("readlink(%s)", one->path);
|
||||
prep_temp_blob(r->index, one->path, temp, sb.buf, sb.len,
|
||||
(one->oid_valid ?
|
||||
&one->oid : null_oid()),
|
||||
&one->oid : null_oid(the_hash_algo)),
|
||||
(one->oid_valid ?
|
||||
one->mode : S_IFLNK));
|
||||
strbuf_release(&sb);
|
||||
|
@ -4353,7 +4355,7 @@ static struct diff_tempfile *prepare_temp_file(struct repository *r,
|
|||
/* we can borrow from the file in the work tree */
|
||||
temp->name = one->path;
|
||||
if (!one->oid_valid)
|
||||
oid_to_hex_r(temp->hex, null_oid());
|
||||
oid_to_hex_r(temp->hex, null_oid(the_hash_algo));
|
||||
else
|
||||
oid_to_hex_r(temp->hex, &one->oid);
|
||||
/* Even though we may sometimes borrow the
|
||||
|
@ -6647,8 +6649,8 @@ static void create_filepairs_for_header_only_notifications(struct diff_options *
|
|||
|
||||
one = alloc_filespec(e->key);
|
||||
two = alloc_filespec(e->key);
|
||||
fill_filespec(one, null_oid(), 0, 0);
|
||||
fill_filespec(two, null_oid(), 0, 0);
|
||||
fill_filespec(one, null_oid(the_hash_algo), 0, 0);
|
||||
fill_filespec(two, null_oid(the_hash_algo), 0, 0);
|
||||
p = diff_queue(q, one, two);
|
||||
p->status = DIFF_STATUS_MODIFIED;
|
||||
}
|
||||
|
|
2
diff.h
2
diff.h
|
@ -689,7 +689,7 @@ void flush_one_hunk(struct object_id *result, struct git_hash_ctx *ctx);
|
|||
|
||||
int diff_result_code(struct rev_info *);
|
||||
|
||||
int diff_no_index(struct rev_info *,
|
||||
int diff_no_index(struct rev_info *, const struct git_hash_algo *algop,
|
||||
int implicit_no_index, int, const char **);
|
||||
|
||||
int index_differs_from(struct repository *r, const char *def,
|
||||
|
|
2
dir.c
2
dir.c
|
@ -4035,7 +4035,7 @@ static void connect_wt_gitdir_in_nested(const char *sub_worktree,
|
|||
*/
|
||||
i++;
|
||||
|
||||
sub = submodule_from_path(&subrepo, null_oid(), ce->name);
|
||||
sub = submodule_from_path(&subrepo, null_oid(the_hash_algo), ce->name);
|
||||
if (!sub || !is_submodule_active(&subrepo, ce->name))
|
||||
/* .gitmodules broken or inactive sub */
|
||||
continue;
|
||||
|
|
|
@ -49,7 +49,6 @@ int fsync_object_files = -1;
|
|||
int use_fsync = -1;
|
||||
enum fsync_method fsync_method = FSYNC_METHOD_DEFAULT;
|
||||
enum fsync_component fsync_components = FSYNC_COMPONENTS_DEFAULT;
|
||||
unsigned long big_file_threshold = 512 * 1024 * 1024;
|
||||
char *editor_program;
|
||||
char *askpass_program;
|
||||
char *excludes_file;
|
||||
|
|
|
@ -154,7 +154,6 @@ extern int zlib_compression_level;
|
|||
extern int pack_compression_level;
|
||||
extern size_t packed_git_window_size;
|
||||
extern size_t packed_git_limit;
|
||||
extern unsigned long big_file_threshold;
|
||||
extern unsigned long pack_size_limit_cfg;
|
||||
extern int max_allowed_tree_depth;
|
||||
|
||||
|
|
2
grep.c
2
grep.c
|
@ -1517,7 +1517,7 @@ static int fill_textconv_grep(struct repository *r,
|
|||
fill_filespec(df, gs->identifier, 1, 0100644);
|
||||
break;
|
||||
case GREP_SOURCE_FILE:
|
||||
fill_filespec(df, null_oid(), 0, 0100644);
|
||||
fill_filespec(df, null_oid(r->hash_algo), 0, 0100644);
|
||||
break;
|
||||
default:
|
||||
BUG("attempt to textconv something without a path?");
|
||||
|
|
|
@ -0,0 +1,277 @@
|
|||
#include "git-compat-util.h"
|
||||
#include "hash.h"
|
||||
#include "hex.h"
|
||||
|
||||
static const struct object_id empty_tree_oid = {
|
||||
.hash = {
|
||||
0x4b, 0x82, 0x5d, 0xc6, 0x42, 0xcb, 0x6e, 0xb9, 0xa0, 0x60,
|
||||
0xe5, 0x4b, 0xf8, 0xd6, 0x92, 0x88, 0xfb, 0xee, 0x49, 0x04
|
||||
},
|
||||
.algo = GIT_HASH_SHA1,
|
||||
};
|
||||
static const struct object_id empty_blob_oid = {
|
||||
.hash = {
|
||||
0xe6, 0x9d, 0xe2, 0x9b, 0xb2, 0xd1, 0xd6, 0x43, 0x4b, 0x8b,
|
||||
0x29, 0xae, 0x77, 0x5a, 0xd8, 0xc2, 0xe4, 0x8c, 0x53, 0x91
|
||||
},
|
||||
.algo = GIT_HASH_SHA1,
|
||||
};
|
||||
static const struct object_id null_oid_sha1 = {
|
||||
.hash = {0},
|
||||
.algo = GIT_HASH_SHA1,
|
||||
};
|
||||
static const struct object_id empty_tree_oid_sha256 = {
|
||||
.hash = {
|
||||
0x6e, 0xf1, 0x9b, 0x41, 0x22, 0x5c, 0x53, 0x69, 0xf1, 0xc1,
|
||||
0x04, 0xd4, 0x5d, 0x8d, 0x85, 0xef, 0xa9, 0xb0, 0x57, 0xb5,
|
||||
0x3b, 0x14, 0xb4, 0xb9, 0xb9, 0x39, 0xdd, 0x74, 0xde, 0xcc,
|
||||
0x53, 0x21
|
||||
},
|
||||
.algo = GIT_HASH_SHA256,
|
||||
};
|
||||
static const struct object_id empty_blob_oid_sha256 = {
|
||||
.hash = {
|
||||
0x47, 0x3a, 0x0f, 0x4c, 0x3b, 0xe8, 0xa9, 0x36, 0x81, 0xa2,
|
||||
0x67, 0xe3, 0xb1, 0xe9, 0xa7, 0xdc, 0xda, 0x11, 0x85, 0x43,
|
||||
0x6f, 0xe1, 0x41, 0xf7, 0x74, 0x91, 0x20, 0xa3, 0x03, 0x72,
|
||||
0x18, 0x13
|
||||
},
|
||||
.algo = GIT_HASH_SHA256,
|
||||
};
|
||||
static const struct object_id null_oid_sha256 = {
|
||||
.hash = {0},
|
||||
.algo = GIT_HASH_SHA256,
|
||||
};
|
||||
|
||||
static void git_hash_sha1_init(struct git_hash_ctx *ctx)
|
||||
{
|
||||
ctx->algop = &hash_algos[GIT_HASH_SHA1];
|
||||
git_SHA1_Init(&ctx->state.sha1);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_clone(struct git_hash_ctx *dst, const struct git_hash_ctx *src)
|
||||
{
|
||||
dst->algop = src->algop;
|
||||
git_SHA1_Clone(&dst->state.sha1, &src->state.sha1);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_update(struct git_hash_ctx *ctx, const void *data, size_t len)
|
||||
{
|
||||
git_SHA1_Update(&ctx->state.sha1, data, len);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_final(unsigned char *hash, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA1_Final(hash, &ctx->state.sha1);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_final_oid(struct object_id *oid, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA1_Final(oid->hash, &ctx->state.sha1);
|
||||
memset(oid->hash + GIT_SHA1_RAWSZ, 0, GIT_MAX_RAWSZ - GIT_SHA1_RAWSZ);
|
||||
oid->algo = GIT_HASH_SHA1;
|
||||
}
|
||||
|
||||
static void git_hash_sha1_init_unsafe(struct git_hash_ctx *ctx)
|
||||
{
|
||||
ctx->algop = unsafe_hash_algo(&hash_algos[GIT_HASH_SHA1]);
|
||||
git_SHA1_Init_unsafe(&ctx->state.sha1_unsafe);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_clone_unsafe(struct git_hash_ctx *dst, const struct git_hash_ctx *src)
|
||||
{
|
||||
dst->algop = src->algop;
|
||||
git_SHA1_Clone_unsafe(&dst->state.sha1_unsafe, &src->state.sha1_unsafe);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_update_unsafe(struct git_hash_ctx *ctx, const void *data,
|
||||
size_t len)
|
||||
{
|
||||
git_SHA1_Update_unsafe(&ctx->state.sha1_unsafe, data, len);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_final_unsafe(unsigned char *hash, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA1_Final_unsafe(hash, &ctx->state.sha1_unsafe);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_final_oid_unsafe(struct object_id *oid, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA1_Final_unsafe(oid->hash, &ctx->state.sha1_unsafe);
|
||||
memset(oid->hash + GIT_SHA1_RAWSZ, 0, GIT_MAX_RAWSZ - GIT_SHA1_RAWSZ);
|
||||
oid->algo = GIT_HASH_SHA1;
|
||||
}
|
||||
|
||||
static void git_hash_sha256_init(struct git_hash_ctx *ctx)
|
||||
{
|
||||
ctx->algop = unsafe_hash_algo(&hash_algos[GIT_HASH_SHA256]);
|
||||
git_SHA256_Init(&ctx->state.sha256);
|
||||
}
|
||||
|
||||
static void git_hash_sha256_clone(struct git_hash_ctx *dst, const struct git_hash_ctx *src)
|
||||
{
|
||||
dst->algop = src->algop;
|
||||
git_SHA256_Clone(&dst->state.sha256, &src->state.sha256);
|
||||
}
|
||||
|
||||
static void git_hash_sha256_update(struct git_hash_ctx *ctx, const void *data, size_t len)
|
||||
{
|
||||
git_SHA256_Update(&ctx->state.sha256, data, len);
|
||||
}
|
||||
|
||||
static void git_hash_sha256_final(unsigned char *hash, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA256_Final(hash, &ctx->state.sha256);
|
||||
}
|
||||
|
||||
static void git_hash_sha256_final_oid(struct object_id *oid, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA256_Final(oid->hash, &ctx->state.sha256);
|
||||
/*
|
||||
* This currently does nothing, so the compiler should optimize it out,
|
||||
* but keep it in case we extend the hash size again.
|
||||
*/
|
||||
memset(oid->hash + GIT_SHA256_RAWSZ, 0, GIT_MAX_RAWSZ - GIT_SHA256_RAWSZ);
|
||||
oid->algo = GIT_HASH_SHA256;
|
||||
}
|
||||
|
||||
static void git_hash_unknown_init(struct git_hash_ctx *ctx UNUSED)
|
||||
{
|
||||
BUG("trying to init unknown hash");
|
||||
}
|
||||
|
||||
static void git_hash_unknown_clone(struct git_hash_ctx *dst UNUSED,
|
||||
const struct git_hash_ctx *src UNUSED)
|
||||
{
|
||||
BUG("trying to clone unknown hash");
|
||||
}
|
||||
|
||||
static void git_hash_unknown_update(struct git_hash_ctx *ctx UNUSED,
|
||||
const void *data UNUSED,
|
||||
size_t len UNUSED)
|
||||
{
|
||||
BUG("trying to update unknown hash");
|
||||
}
|
||||
|
||||
static void git_hash_unknown_final(unsigned char *hash UNUSED,
|
||||
struct git_hash_ctx *ctx UNUSED)
|
||||
{
|
||||
BUG("trying to finalize unknown hash");
|
||||
}
|
||||
|
||||
static void git_hash_unknown_final_oid(struct object_id *oid UNUSED,
|
||||
struct git_hash_ctx *ctx UNUSED)
|
||||
{
|
||||
BUG("trying to finalize unknown hash");
|
||||
}
|
||||
|
||||
static const struct git_hash_algo sha1_unsafe_algo = {
|
||||
.name = "sha1",
|
||||
.format_id = GIT_SHA1_FORMAT_ID,
|
||||
.rawsz = GIT_SHA1_RAWSZ,
|
||||
.hexsz = GIT_SHA1_HEXSZ,
|
||||
.blksz = GIT_SHA1_BLKSZ,
|
||||
.init_fn = git_hash_sha1_init_unsafe,
|
||||
.clone_fn = git_hash_sha1_clone_unsafe,
|
||||
.update_fn = git_hash_sha1_update_unsafe,
|
||||
.final_fn = git_hash_sha1_final_unsafe,
|
||||
.final_oid_fn = git_hash_sha1_final_oid_unsafe,
|
||||
.empty_tree = &empty_tree_oid,
|
||||
.empty_blob = &empty_blob_oid,
|
||||
.null_oid = &null_oid_sha1,
|
||||
};
|
||||
|
||||
const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
|
||||
{
|
||||
.name = NULL,
|
||||
.format_id = 0x00000000,
|
||||
.rawsz = 0,
|
||||
.hexsz = 0,
|
||||
.blksz = 0,
|
||||
.init_fn = git_hash_unknown_init,
|
||||
.clone_fn = git_hash_unknown_clone,
|
||||
.update_fn = git_hash_unknown_update,
|
||||
.final_fn = git_hash_unknown_final,
|
||||
.final_oid_fn = git_hash_unknown_final_oid,
|
||||
.empty_tree = NULL,
|
||||
.empty_blob = NULL,
|
||||
.null_oid = NULL,
|
||||
},
|
||||
{
|
||||
.name = "sha1",
|
||||
.format_id = GIT_SHA1_FORMAT_ID,
|
||||
.rawsz = GIT_SHA1_RAWSZ,
|
||||
.hexsz = GIT_SHA1_HEXSZ,
|
||||
.blksz = GIT_SHA1_BLKSZ,
|
||||
.init_fn = git_hash_sha1_init,
|
||||
.clone_fn = git_hash_sha1_clone,
|
||||
.update_fn = git_hash_sha1_update,
|
||||
.final_fn = git_hash_sha1_final,
|
||||
.final_oid_fn = git_hash_sha1_final_oid,
|
||||
.unsafe = &sha1_unsafe_algo,
|
||||
.empty_tree = &empty_tree_oid,
|
||||
.empty_blob = &empty_blob_oid,
|
||||
.null_oid = &null_oid_sha1,
|
||||
},
|
||||
{
|
||||
.name = "sha256",
|
||||
.format_id = GIT_SHA256_FORMAT_ID,
|
||||
.rawsz = GIT_SHA256_RAWSZ,
|
||||
.hexsz = GIT_SHA256_HEXSZ,
|
||||
.blksz = GIT_SHA256_BLKSZ,
|
||||
.init_fn = git_hash_sha256_init,
|
||||
.clone_fn = git_hash_sha256_clone,
|
||||
.update_fn = git_hash_sha256_update,
|
||||
.final_fn = git_hash_sha256_final,
|
||||
.final_oid_fn = git_hash_sha256_final_oid,
|
||||
.empty_tree = &empty_tree_oid_sha256,
|
||||
.empty_blob = &empty_blob_oid_sha256,
|
||||
.null_oid = &null_oid_sha256,
|
||||
}
|
||||
};
|
||||
|
||||
const struct object_id *null_oid(const struct git_hash_algo *algop)
|
||||
{
|
||||
return algop->null_oid;
|
||||
}
|
||||
|
||||
const char *empty_tree_oid_hex(const struct git_hash_algo *algop)
|
||||
{
|
||||
static char buf[GIT_MAX_HEXSZ + 1];
|
||||
return oid_to_hex_r(buf, algop->empty_tree);
|
||||
}
|
||||
|
||||
int hash_algo_by_name(const char *name)
|
||||
{
|
||||
if (!name)
|
||||
return GIT_HASH_UNKNOWN;
|
||||
for (size_t i = 1; i < GIT_HASH_NALGOS; i++)
|
||||
if (!strcmp(name, hash_algos[i].name))
|
||||
return i;
|
||||
return GIT_HASH_UNKNOWN;
|
||||
}
|
||||
|
||||
int hash_algo_by_id(uint32_t format_id)
|
||||
{
|
||||
for (size_t i = 1; i < GIT_HASH_NALGOS; i++)
|
||||
if (format_id == hash_algos[i].format_id)
|
||||
return i;
|
||||
return GIT_HASH_UNKNOWN;
|
||||
}
|
||||
|
||||
int hash_algo_by_length(size_t len)
|
||||
{
|
||||
for (size_t i = 1; i < GIT_HASH_NALGOS; i++)
|
||||
if (len == hash_algos[i].rawsz)
|
||||
return i;
|
||||
return GIT_HASH_UNKNOWN;
|
||||
}
|
||||
|
||||
const struct git_hash_algo *unsafe_hash_algo(const struct git_hash_algo *algop)
|
||||
{
|
||||
/* If we have a faster "unsafe" implementation, use that. */
|
||||
if (algop->unsafe)
|
||||
return algop->unsafe;
|
||||
/* Otherwise use the default one. */
|
||||
return algop;
|
||||
}
|
4
hash.h
4
hash.h
|
@ -326,7 +326,7 @@ int hash_algo_by_name(const char *name);
|
|||
/* Identical, except based on the format ID. */
|
||||
int hash_algo_by_id(uint32_t format_id);
|
||||
/* Identical, except based on the length. */
|
||||
int hash_algo_by_length(int len);
|
||||
int hash_algo_by_length(size_t len);
|
||||
/* Identical, except for a pointer to struct git_hash_algo. */
|
||||
static inline int hash_algo_by_ptr(const struct git_hash_algo *p)
|
||||
{
|
||||
|
@ -341,7 +341,7 @@ static inline int hash_algo_by_ptr(const struct git_hash_algo *p)
|
|||
|
||||
const struct git_hash_algo *unsafe_hash_algo(const struct git_hash_algo *algop);
|
||||
|
||||
const struct object_id *null_oid(void);
|
||||
const struct object_id *null_oid(const struct git_hash_algo *algop);
|
||||
|
||||
static inline int hashcmp(const unsigned char *sha1, const unsigned char *sha2, const struct git_hash_algo *algop)
|
||||
{
|
||||
|
|
|
@ -499,7 +499,7 @@ void log_write_email_headers(struct rev_info *opt, struct commit *commit,
|
|||
{
|
||||
struct strbuf headers = STRBUF_INIT;
|
||||
const char *name = oid_to_hex(opt->zero_commit ?
|
||||
null_oid() : &commit->object.oid);
|
||||
null_oid(the_hash_algo) : &commit->object.oid);
|
||||
|
||||
*need_8bit_cte_p = 0; /* unknown */
|
||||
|
||||
|
|
26
merge-ort.c
26
merge-ort.c
|
@ -1817,7 +1817,7 @@ static int merge_submodule(struct merge_options *opt,
|
|||
BUG("submodule deleted on one side; this should be handled outside of merge_submodule()");
|
||||
|
||||
if ((sub_not_initialized = repo_submodule_init(&subrepo,
|
||||
opt->repo, path, null_oid()))) {
|
||||
opt->repo, path, null_oid(the_hash_algo)))) {
|
||||
path_msg(opt, CONFLICT_SUBMODULE_NOT_INITIALIZED, 0,
|
||||
path, NULL, NULL, NULL,
|
||||
_("Failed to merge submodule %s (not checked out)"),
|
||||
|
@ -2199,7 +2199,7 @@ static int handle_content_merge(struct merge_options *opt,
|
|||
two_way = ((S_IFMT & o->mode) != (S_IFMT & a->mode));
|
||||
|
||||
merge_status = merge_3way(opt, path,
|
||||
two_way ? null_oid() : &o->oid,
|
||||
two_way ? null_oid(the_hash_algo) : &o->oid,
|
||||
&a->oid, &b->oid,
|
||||
pathnames, extra_marker_size,
|
||||
&result_buf);
|
||||
|
@ -2231,7 +2231,7 @@ static int handle_content_merge(struct merge_options *opt,
|
|||
} else if (S_ISGITLINK(a->mode)) {
|
||||
int two_way = ((S_IFMT & o->mode) != (S_IFMT & a->mode));
|
||||
clean = merge_submodule(opt, pathnames[0],
|
||||
two_way ? null_oid() : &o->oid,
|
||||
two_way ? null_oid(the_hash_algo) : &o->oid,
|
||||
&a->oid, &b->oid, &result->oid);
|
||||
if (clean < 0)
|
||||
return -1;
|
||||
|
@ -2739,7 +2739,7 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
|
|||
assert(!new_ci->match_mask);
|
||||
new_ci->dirmask = 0;
|
||||
new_ci->stages[1].mode = 0;
|
||||
oidcpy(&new_ci->stages[1].oid, null_oid());
|
||||
oidcpy(&new_ci->stages[1].oid, null_oid(the_hash_algo));
|
||||
|
||||
/*
|
||||
* Now that we have the file information in new_ci, make sure
|
||||
|
@ -2752,7 +2752,7 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
|
|||
continue;
|
||||
/* zero out any entries related to files */
|
||||
ci->stages[i].mode = 0;
|
||||
oidcpy(&ci->stages[i].oid, null_oid());
|
||||
oidcpy(&ci->stages[i].oid, null_oid(the_hash_algo));
|
||||
}
|
||||
|
||||
/* Now we want to focus on new_ci, so reassign ci to it. */
|
||||
|
@ -3123,7 +3123,7 @@ static int process_renames(struct merge_options *opt,
|
|||
if (type_changed) {
|
||||
/* rename vs. typechange */
|
||||
/* Mark the original as resolved by removal */
|
||||
memcpy(&oldinfo->stages[0].oid, null_oid(),
|
||||
memcpy(&oldinfo->stages[0].oid, null_oid(the_hash_algo),
|
||||
sizeof(oldinfo->stages[0].oid));
|
||||
oldinfo->stages[0].mode = 0;
|
||||
oldinfo->filemask &= 0x06;
|
||||
|
@ -4006,7 +4006,7 @@ static int process_entry(struct merge_options *opt,
|
|||
if (ci->filemask & (1 << i))
|
||||
continue;
|
||||
ci->stages[i].mode = 0;
|
||||
oidcpy(&ci->stages[i].oid, null_oid());
|
||||
oidcpy(&ci->stages[i].oid, null_oid(the_hash_algo));
|
||||
}
|
||||
} else if (ci->df_conflict && ci->merged.result.mode != 0) {
|
||||
/*
|
||||
|
@ -4053,7 +4053,7 @@ static int process_entry(struct merge_options *opt,
|
|||
continue;
|
||||
/* zero out any entries related to directories */
|
||||
new_ci->stages[i].mode = 0;
|
||||
oidcpy(&new_ci->stages[i].oid, null_oid());
|
||||
oidcpy(&new_ci->stages[i].oid, null_oid(the_hash_algo));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4175,11 +4175,11 @@ static int process_entry(struct merge_options *opt,
|
|||
new_ci->merged.result.mode = ci->stages[2].mode;
|
||||
oidcpy(&new_ci->merged.result.oid, &ci->stages[2].oid);
|
||||
new_ci->stages[1].mode = 0;
|
||||
oidcpy(&new_ci->stages[1].oid, null_oid());
|
||||
oidcpy(&new_ci->stages[1].oid, null_oid(the_hash_algo));
|
||||
new_ci->filemask = 5;
|
||||
if ((S_IFMT & b_mode) != (S_IFMT & o_mode)) {
|
||||
new_ci->stages[0].mode = 0;
|
||||
oidcpy(&new_ci->stages[0].oid, null_oid());
|
||||
oidcpy(&new_ci->stages[0].oid, null_oid(the_hash_algo));
|
||||
new_ci->filemask = 4;
|
||||
}
|
||||
|
||||
|
@ -4187,11 +4187,11 @@ static int process_entry(struct merge_options *opt,
|
|||
ci->merged.result.mode = ci->stages[1].mode;
|
||||
oidcpy(&ci->merged.result.oid, &ci->stages[1].oid);
|
||||
ci->stages[2].mode = 0;
|
||||
oidcpy(&ci->stages[2].oid, null_oid());
|
||||
oidcpy(&ci->stages[2].oid, null_oid(the_hash_algo));
|
||||
ci->filemask = 3;
|
||||
if ((S_IFMT & a_mode) != (S_IFMT & o_mode)) {
|
||||
ci->stages[0].mode = 0;
|
||||
oidcpy(&ci->stages[0].oid, null_oid());
|
||||
oidcpy(&ci->stages[0].oid, null_oid(the_hash_algo));
|
||||
ci->filemask = 2;
|
||||
}
|
||||
|
||||
|
@ -4316,7 +4316,7 @@ static int process_entry(struct merge_options *opt,
|
|||
/* Deleted on both sides */
|
||||
ci->merged.is_null = 1;
|
||||
ci->merged.result.mode = 0;
|
||||
oidcpy(&ci->merged.result.oid, null_oid());
|
||||
oidcpy(&ci->merged.result.oid, null_oid(the_hash_algo));
|
||||
assert(!ci->df_conflict);
|
||||
ci->merged.clean = !ci->path_conflict;
|
||||
}
|
||||
|
|
|
@ -502,7 +502,7 @@ static int get_tree_entry_if_blob(struct repository *r,
|
|||
|
||||
ret = get_tree_entry(r, tree, path, &dfs->oid, &dfs->mode);
|
||||
if (S_ISDIR(dfs->mode)) {
|
||||
oidcpy(&dfs->oid, null_oid());
|
||||
oidcpy(&dfs->oid, null_oid(the_hash_algo));
|
||||
dfs->mode = 0;
|
||||
}
|
||||
return ret;
|
||||
|
@ -1238,7 +1238,7 @@ static int merge_submodule(struct merge_options *opt,
|
|||
if (is_null_oid(b))
|
||||
return 0;
|
||||
|
||||
if (repo_submodule_init(&subrepo, opt->repo, path, null_oid())) {
|
||||
if (repo_submodule_init(&subrepo, opt->repo, path, null_oid(the_hash_algo))) {
|
||||
output(opt, 1, _("Failed to merge submodule %s (not checked out)"), path);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1698,7 +1698,7 @@ static int handle_file_collision(struct merge_options *opt,
|
|||
|
||||
/* Store things in diff_filespecs for functions that need it */
|
||||
null.path = (char *)collide_path;
|
||||
oidcpy(&null.oid, null_oid());
|
||||
oidcpy(&null.oid, null_oid(the_hash_algo));
|
||||
null.mode = 0;
|
||||
|
||||
if (merge_mode_and_contents(opt, &null, a, b, collide_path,
|
||||
|
@ -2897,14 +2897,14 @@ static int process_renames(struct merge_options *opt,
|
|||
dst_other.mode = ren1->dst_entry->stages[other_stage].mode;
|
||||
try_merge = 0;
|
||||
|
||||
if (oideq(&src_other.oid, null_oid()) &&
|
||||
if (oideq(&src_other.oid, null_oid(the_hash_algo)) &&
|
||||
ren1->dir_rename_original_type == 'A') {
|
||||
setup_rename_conflict_info(RENAME_VIA_DIR,
|
||||
opt, ren1, NULL);
|
||||
} else if (renamed_to_self) {
|
||||
setup_rename_conflict_info(RENAME_NORMAL,
|
||||
opt, ren1, NULL);
|
||||
} else if (oideq(&src_other.oid, null_oid())) {
|
||||
} else if (oideq(&src_other.oid, null_oid(the_hash_algo))) {
|
||||
setup_rename_conflict_info(RENAME_DELETE,
|
||||
opt, ren1, NULL);
|
||||
} else if ((dst_other.mode == ren1->pair->two->mode) &&
|
||||
|
@ -2923,7 +2923,7 @@ static int process_renames(struct merge_options *opt,
|
|||
1, /* update_cache */
|
||||
0 /* update_wd */))
|
||||
clean_merge = -1;
|
||||
} else if (!oideq(&dst_other.oid, null_oid())) {
|
||||
} else if (!oideq(&dst_other.oid, null_oid(the_hash_algo))) {
|
||||
/*
|
||||
* Probably not a clean merge, but it's
|
||||
* premature to set clean_merge to 0 here,
|
||||
|
|
|
@ -311,6 +311,7 @@ libgit_sources = [
|
|||
'graph.c',
|
||||
'grep.c',
|
||||
'hash-lookup.c',
|
||||
'hash.c',
|
||||
'hashmap.c',
|
||||
'help.c',
|
||||
'hex.c',
|
||||
|
|
12
midx-write.c
12
midx-write.c
|
@ -664,7 +664,7 @@ static void write_midx_reverse_index(struct write_midx_context *ctx,
|
|||
get_midx_filename_ext(ctx->repo->hash_algo, &buf, object_dir,
|
||||
midx_hash, MIDX_EXT_REV);
|
||||
|
||||
tmp_file = write_rev_file_order(ctx->repo->hash_algo, NULL, ctx->pack_order,
|
||||
tmp_file = write_rev_file_order(ctx->repo, NULL, ctx->pack_order,
|
||||
ctx->entries_nr, midx_hash, WRITE_REV);
|
||||
|
||||
if (finalize_object_file(tmp_file, buf.buf))
|
||||
|
@ -714,7 +714,7 @@ static int add_ref_to_pending(const char *refname, const char *referent UNUSED,
|
|||
if (!peel_iterated_oid(revs->repo, oid, &peeled))
|
||||
oid = &peeled;
|
||||
|
||||
object = parse_object_or_die(oid, refname);
|
||||
object = parse_object_or_die(revs->repo, oid, refname);
|
||||
if (object->type != OBJ_COMMIT)
|
||||
return 0;
|
||||
|
||||
|
@ -774,7 +774,7 @@ static int read_refs_snapshot(const char *refs_snapshot,
|
|||
if (*end)
|
||||
die(_("malformed line: %s"), buf.buf);
|
||||
|
||||
object = parse_object_or_die(&oid, NULL);
|
||||
object = parse_object_or_die(revs->repo, &oid, NULL);
|
||||
if (preferred)
|
||||
object->flags |= NEEDS_BITMAP;
|
||||
|
||||
|
@ -1361,10 +1361,12 @@ static int write_midx_internal(struct repository *r, const char *object_dir,
|
|||
return -1;
|
||||
}
|
||||
|
||||
f = hashfd(get_tempfile_fd(incr), get_tempfile_path(incr));
|
||||
f = hashfd(r->hash_algo, get_tempfile_fd(incr),
|
||||
get_tempfile_path(incr));
|
||||
} else {
|
||||
hold_lock_file_for_update(&lk, midx_name.buf, LOCK_DIE_ON_ERROR);
|
||||
f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
|
||||
f = hashfd(r->hash_algo, get_lock_file_fd(&lk),
|
||||
get_lock_file_path(&lk));
|
||||
}
|
||||
|
||||
cf = init_chunkfile(f);
|
||||
|
|
3
midx.c
3
midx.c
|
@ -747,7 +747,8 @@ int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, i
|
|||
|
||||
int midx_checksum_valid(struct multi_pack_index *m)
|
||||
{
|
||||
return hashfile_checksum_valid(m->data, m->data_len);
|
||||
return hashfile_checksum_valid(m->repo->hash_algo,
|
||||
m->data, m->data_len);
|
||||
}
|
||||
|
||||
struct clear_midx_data {
|
||||
|
|
|
@ -617,7 +617,7 @@ int notes_merge(struct notes_merge_options *o,
|
|||
if (repo_get_merge_bases(the_repository, local, remote, &bases) < 0)
|
||||
exit(128);
|
||||
if (!bases) {
|
||||
base_oid = null_oid();
|
||||
base_oid = null_oid(the_hash_algo);
|
||||
base_tree_oid = the_hash_algo->empty_tree;
|
||||
if (o->verbosity >= 4)
|
||||
printf("No merge base found; doing history-less merge\n");
|
||||
|
|
2
notes.c
2
notes.c
|
@ -1353,7 +1353,7 @@ int copy_note(struct notes_tree *t,
|
|||
if (note)
|
||||
return add_note(t, to_obj, note, combine_notes);
|
||||
else if (existing_note)
|
||||
return add_note(t, to_obj, null_oid(), combine_notes);
|
||||
return add_note(t, to_obj, null_oid(the_hash_algo), combine_notes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_REPOSITORY_VARIABLE
|
||||
#define DISABLE_SIGN_COMPARE_WARNINGS
|
||||
|
||||
#include "git-compat-util.h"
|
||||
|
@ -63,7 +62,8 @@ static int decode_tree_entry_raw(struct object_id *oid, const char **path,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int convert_tree_object(struct strbuf *out,
|
||||
static int convert_tree_object(struct repository *repo,
|
||||
struct strbuf *out,
|
||||
const struct git_hash_algo *from,
|
||||
const struct git_hash_algo *to,
|
||||
const char *buffer, size_t size)
|
||||
|
@ -78,7 +78,7 @@ static int convert_tree_object(struct strbuf *out,
|
|||
if (decode_tree_entry_raw(&entry_oid, &path, &pathlen, from, p,
|
||||
end - p))
|
||||
return error(_("failed to decode tree entry"));
|
||||
if (repo_oid_to_algop(the_repository, &entry_oid, to, &mapped_oid))
|
||||
if (repo_oid_to_algop(repo, &entry_oid, to, &mapped_oid))
|
||||
return error(_("failed to map tree entry for %s"), oid_to_hex(&entry_oid));
|
||||
strbuf_add(out, p, path - p);
|
||||
strbuf_add(out, path, pathlen);
|
||||
|
@ -88,7 +88,8 @@ static int convert_tree_object(struct strbuf *out,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int convert_tag_object(struct strbuf *out,
|
||||
static int convert_tag_object(struct repository *repo,
|
||||
struct strbuf *out,
|
||||
const struct git_hash_algo *from,
|
||||
const struct git_hash_algo *to,
|
||||
const char *buffer, size_t size)
|
||||
|
@ -105,7 +106,7 @@ static int convert_tag_object(struct strbuf *out,
|
|||
return error("bogus tag object");
|
||||
if (parse_oid_hex_algop(buffer + 7, &oid, &p, from) < 0)
|
||||
return error("bad tag object ID");
|
||||
if (repo_oid_to_algop(the_repository, &oid, to, &mapped_oid))
|
||||
if (repo_oid_to_algop(repo, &oid, to, &mapped_oid))
|
||||
return error("unable to map tree %s in tag object",
|
||||
oid_to_hex(&oid));
|
||||
size -= ((p + 1) - buffer);
|
||||
|
@ -139,7 +140,8 @@ static int convert_tag_object(struct strbuf *out,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int convert_commit_object(struct strbuf *out,
|
||||
static int convert_commit_object(struct repository *repo,
|
||||
struct strbuf *out,
|
||||
const struct git_hash_algo *from,
|
||||
const struct git_hash_algo *to,
|
||||
const char *buffer, size_t size)
|
||||
|
@ -165,7 +167,7 @@ static int convert_commit_object(struct strbuf *out,
|
|||
(p != eol))
|
||||
return error(_("bad %s in commit"), "tree");
|
||||
|
||||
if (repo_oid_to_algop(the_repository, &oid, to, &mapped_oid))
|
||||
if (repo_oid_to_algop(repo, &oid, to, &mapped_oid))
|
||||
return error(_("unable to map %s %s in commit object"),
|
||||
"tree", oid_to_hex(&oid));
|
||||
strbuf_addf(out, "tree %s\n", oid_to_hex(&mapped_oid));
|
||||
|
@ -177,7 +179,7 @@ static int convert_commit_object(struct strbuf *out,
|
|||
(p != eol))
|
||||
return error(_("bad %s in commit"), "parent");
|
||||
|
||||
if (repo_oid_to_algop(the_repository, &oid, to, &mapped_oid))
|
||||
if (repo_oid_to_algop(repo, &oid, to, &mapped_oid))
|
||||
return error(_("unable to map %s %s in commit object"),
|
||||
"parent", oid_to_hex(&oid));
|
||||
|
||||
|
@ -202,7 +204,7 @@ static int convert_commit_object(struct strbuf *out,
|
|||
}
|
||||
|
||||
/* Compute the new tag object */
|
||||
if (convert_tag_object(&new_tag, from, to, tag.buf, tag.len)) {
|
||||
if (convert_tag_object(repo, &new_tag, from, to, tag.buf, tag.len)) {
|
||||
strbuf_release(&tag);
|
||||
strbuf_release(&new_tag);
|
||||
return -1;
|
||||
|
@ -241,7 +243,8 @@ static int convert_commit_object(struct strbuf *out,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int convert_object_file(struct strbuf *outbuf,
|
||||
int convert_object_file(struct repository *repo,
|
||||
struct strbuf *outbuf,
|
||||
const struct git_hash_algo *from,
|
||||
const struct git_hash_algo *to,
|
||||
const void *buf, size_t len,
|
||||
|
@ -256,13 +259,13 @@ int convert_object_file(struct strbuf *outbuf,
|
|||
|
||||
switch (type) {
|
||||
case OBJ_COMMIT:
|
||||
ret = convert_commit_object(outbuf, from, to, buf, len);
|
||||
ret = convert_commit_object(repo, outbuf, from, to, buf, len);
|
||||
break;
|
||||
case OBJ_TREE:
|
||||
ret = convert_tree_object(outbuf, from, to, buf, len);
|
||||
ret = convert_tree_object(repo, outbuf, from, to, buf, len);
|
||||
break;
|
||||
case OBJ_TAG:
|
||||
ret = convert_tag_object(outbuf, from, to, buf, len);
|
||||
ret = convert_tag_object(repo, outbuf, from, to, buf, len);
|
||||
break;
|
||||
default:
|
||||
/* Not implemented yet, so fail. */
|
||||
|
|
|
@ -14,7 +14,8 @@ int repo_oid_to_algop(struct repository *repo, const struct object_id *src,
|
|||
* Convert an object file from one hash algorithm to another algorithm.
|
||||
* Return -1 on failure, 0 on success.
|
||||
*/
|
||||
int convert_object_file(struct strbuf *outbuf,
|
||||
int convert_object_file(struct repository *repo,
|
||||
struct strbuf *outbuf,
|
||||
const struct git_hash_algo *from,
|
||||
const struct git_hash_algo *to,
|
||||
const void *buf, size_t len,
|
||||
|
|
292
object-file.c
292
object-file.c
|
@ -45,283 +45,6 @@
|
|||
/* The maximum size for an object header. */
|
||||
#define MAX_HEADER_LEN 32
|
||||
|
||||
static const struct object_id empty_tree_oid = {
|
||||
.hash = {
|
||||
0x4b, 0x82, 0x5d, 0xc6, 0x42, 0xcb, 0x6e, 0xb9, 0xa0, 0x60,
|
||||
0xe5, 0x4b, 0xf8, 0xd6, 0x92, 0x88, 0xfb, 0xee, 0x49, 0x04
|
||||
},
|
||||
.algo = GIT_HASH_SHA1,
|
||||
};
|
||||
static const struct object_id empty_blob_oid = {
|
||||
.hash = {
|
||||
0xe6, 0x9d, 0xe2, 0x9b, 0xb2, 0xd1, 0xd6, 0x43, 0x4b, 0x8b,
|
||||
0x29, 0xae, 0x77, 0x5a, 0xd8, 0xc2, 0xe4, 0x8c, 0x53, 0x91
|
||||
},
|
||||
.algo = GIT_HASH_SHA1,
|
||||
};
|
||||
static const struct object_id null_oid_sha1 = {
|
||||
.hash = {0},
|
||||
.algo = GIT_HASH_SHA1,
|
||||
};
|
||||
static const struct object_id empty_tree_oid_sha256 = {
|
||||
.hash = {
|
||||
0x6e, 0xf1, 0x9b, 0x41, 0x22, 0x5c, 0x53, 0x69, 0xf1, 0xc1,
|
||||
0x04, 0xd4, 0x5d, 0x8d, 0x85, 0xef, 0xa9, 0xb0, 0x57, 0xb5,
|
||||
0x3b, 0x14, 0xb4, 0xb9, 0xb9, 0x39, 0xdd, 0x74, 0xde, 0xcc,
|
||||
0x53, 0x21
|
||||
},
|
||||
.algo = GIT_HASH_SHA256,
|
||||
};
|
||||
static const struct object_id empty_blob_oid_sha256 = {
|
||||
.hash = {
|
||||
0x47, 0x3a, 0x0f, 0x4c, 0x3b, 0xe8, 0xa9, 0x36, 0x81, 0xa2,
|
||||
0x67, 0xe3, 0xb1, 0xe9, 0xa7, 0xdc, 0xda, 0x11, 0x85, 0x43,
|
||||
0x6f, 0xe1, 0x41, 0xf7, 0x74, 0x91, 0x20, 0xa3, 0x03, 0x72,
|
||||
0x18, 0x13
|
||||
},
|
||||
.algo = GIT_HASH_SHA256,
|
||||
};
|
||||
static const struct object_id null_oid_sha256 = {
|
||||
.hash = {0},
|
||||
.algo = GIT_HASH_SHA256,
|
||||
};
|
||||
|
||||
static void git_hash_sha1_init(struct git_hash_ctx *ctx)
|
||||
{
|
||||
ctx->algop = &hash_algos[GIT_HASH_SHA1];
|
||||
git_SHA1_Init(&ctx->state.sha1);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_clone(struct git_hash_ctx *dst, const struct git_hash_ctx *src)
|
||||
{
|
||||
dst->algop = src->algop;
|
||||
git_SHA1_Clone(&dst->state.sha1, &src->state.sha1);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_update(struct git_hash_ctx *ctx, const void *data, size_t len)
|
||||
{
|
||||
git_SHA1_Update(&ctx->state.sha1, data, len);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_final(unsigned char *hash, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA1_Final(hash, &ctx->state.sha1);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_final_oid(struct object_id *oid, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA1_Final(oid->hash, &ctx->state.sha1);
|
||||
memset(oid->hash + GIT_SHA1_RAWSZ, 0, GIT_MAX_RAWSZ - GIT_SHA1_RAWSZ);
|
||||
oid->algo = GIT_HASH_SHA1;
|
||||
}
|
||||
|
||||
static void git_hash_sha1_init_unsafe(struct git_hash_ctx *ctx)
|
||||
{
|
||||
ctx->algop = unsafe_hash_algo(&hash_algos[GIT_HASH_SHA1]);
|
||||
git_SHA1_Init_unsafe(&ctx->state.sha1_unsafe);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_clone_unsafe(struct git_hash_ctx *dst, const struct git_hash_ctx *src)
|
||||
{
|
||||
dst->algop = src->algop;
|
||||
git_SHA1_Clone_unsafe(&dst->state.sha1_unsafe, &src->state.sha1_unsafe);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_update_unsafe(struct git_hash_ctx *ctx, const void *data,
|
||||
size_t len)
|
||||
{
|
||||
git_SHA1_Update_unsafe(&ctx->state.sha1_unsafe, data, len);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_final_unsafe(unsigned char *hash, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA1_Final_unsafe(hash, &ctx->state.sha1_unsafe);
|
||||
}
|
||||
|
||||
static void git_hash_sha1_final_oid_unsafe(struct object_id *oid, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA1_Final_unsafe(oid->hash, &ctx->state.sha1_unsafe);
|
||||
memset(oid->hash + GIT_SHA1_RAWSZ, 0, GIT_MAX_RAWSZ - GIT_SHA1_RAWSZ);
|
||||
oid->algo = GIT_HASH_SHA1;
|
||||
}
|
||||
|
||||
static void git_hash_sha256_init(struct git_hash_ctx *ctx)
|
||||
{
|
||||
ctx->algop = unsafe_hash_algo(&hash_algos[GIT_HASH_SHA256]);
|
||||
git_SHA256_Init(&ctx->state.sha256);
|
||||
}
|
||||
|
||||
static void git_hash_sha256_clone(struct git_hash_ctx *dst, const struct git_hash_ctx *src)
|
||||
{
|
||||
dst->algop = src->algop;
|
||||
git_SHA256_Clone(&dst->state.sha256, &src->state.sha256);
|
||||
}
|
||||
|
||||
static void git_hash_sha256_update(struct git_hash_ctx *ctx, const void *data, size_t len)
|
||||
{
|
||||
git_SHA256_Update(&ctx->state.sha256, data, len);
|
||||
}
|
||||
|
||||
static void git_hash_sha256_final(unsigned char *hash, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA256_Final(hash, &ctx->state.sha256);
|
||||
}
|
||||
|
||||
static void git_hash_sha256_final_oid(struct object_id *oid, struct git_hash_ctx *ctx)
|
||||
{
|
||||
git_SHA256_Final(oid->hash, &ctx->state.sha256);
|
||||
/*
|
||||
* This currently does nothing, so the compiler should optimize it out,
|
||||
* but keep it in case we extend the hash size again.
|
||||
*/
|
||||
memset(oid->hash + GIT_SHA256_RAWSZ, 0, GIT_MAX_RAWSZ - GIT_SHA256_RAWSZ);
|
||||
oid->algo = GIT_HASH_SHA256;
|
||||
}
|
||||
|
||||
static void git_hash_unknown_init(struct git_hash_ctx *ctx UNUSED)
|
||||
{
|
||||
BUG("trying to init unknown hash");
|
||||
}
|
||||
|
||||
static void git_hash_unknown_clone(struct git_hash_ctx *dst UNUSED,
|
||||
const struct git_hash_ctx *src UNUSED)
|
||||
{
|
||||
BUG("trying to clone unknown hash");
|
||||
}
|
||||
|
||||
static void git_hash_unknown_update(struct git_hash_ctx *ctx UNUSED,
|
||||
const void *data UNUSED,
|
||||
size_t len UNUSED)
|
||||
{
|
||||
BUG("trying to update unknown hash");
|
||||
}
|
||||
|
||||
static void git_hash_unknown_final(unsigned char *hash UNUSED,
|
||||
struct git_hash_ctx *ctx UNUSED)
|
||||
{
|
||||
BUG("trying to finalize unknown hash");
|
||||
}
|
||||
|
||||
static void git_hash_unknown_final_oid(struct object_id *oid UNUSED,
|
||||
struct git_hash_ctx *ctx UNUSED)
|
||||
{
|
||||
BUG("trying to finalize unknown hash");
|
||||
}
|
||||
|
||||
static const struct git_hash_algo sha1_unsafe_algo = {
|
||||
.name = "sha1",
|
||||
.format_id = GIT_SHA1_FORMAT_ID,
|
||||
.rawsz = GIT_SHA1_RAWSZ,
|
||||
.hexsz = GIT_SHA1_HEXSZ,
|
||||
.blksz = GIT_SHA1_BLKSZ,
|
||||
.init_fn = git_hash_sha1_init_unsafe,
|
||||
.clone_fn = git_hash_sha1_clone_unsafe,
|
||||
.update_fn = git_hash_sha1_update_unsafe,
|
||||
.final_fn = git_hash_sha1_final_unsafe,
|
||||
.final_oid_fn = git_hash_sha1_final_oid_unsafe,
|
||||
.empty_tree = &empty_tree_oid,
|
||||
.empty_blob = &empty_blob_oid,
|
||||
.null_oid = &null_oid_sha1,
|
||||
};
|
||||
|
||||
const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
|
||||
{
|
||||
.name = NULL,
|
||||
.format_id = 0x00000000,
|
||||
.rawsz = 0,
|
||||
.hexsz = 0,
|
||||
.blksz = 0,
|
||||
.init_fn = git_hash_unknown_init,
|
||||
.clone_fn = git_hash_unknown_clone,
|
||||
.update_fn = git_hash_unknown_update,
|
||||
.final_fn = git_hash_unknown_final,
|
||||
.final_oid_fn = git_hash_unknown_final_oid,
|
||||
.empty_tree = NULL,
|
||||
.empty_blob = NULL,
|
||||
.null_oid = NULL,
|
||||
},
|
||||
{
|
||||
.name = "sha1",
|
||||
.format_id = GIT_SHA1_FORMAT_ID,
|
||||
.rawsz = GIT_SHA1_RAWSZ,
|
||||
.hexsz = GIT_SHA1_HEXSZ,
|
||||
.blksz = GIT_SHA1_BLKSZ,
|
||||
.init_fn = git_hash_sha1_init,
|
||||
.clone_fn = git_hash_sha1_clone,
|
||||
.update_fn = git_hash_sha1_update,
|
||||
.final_fn = git_hash_sha1_final,
|
||||
.final_oid_fn = git_hash_sha1_final_oid,
|
||||
.unsafe = &sha1_unsafe_algo,
|
||||
.empty_tree = &empty_tree_oid,
|
||||
.empty_blob = &empty_blob_oid,
|
||||
.null_oid = &null_oid_sha1,
|
||||
},
|
||||
{
|
||||
.name = "sha256",
|
||||
.format_id = GIT_SHA256_FORMAT_ID,
|
||||
.rawsz = GIT_SHA256_RAWSZ,
|
||||
.hexsz = GIT_SHA256_HEXSZ,
|
||||
.blksz = GIT_SHA256_BLKSZ,
|
||||
.init_fn = git_hash_sha256_init,
|
||||
.clone_fn = git_hash_sha256_clone,
|
||||
.update_fn = git_hash_sha256_update,
|
||||
.final_fn = git_hash_sha256_final,
|
||||
.final_oid_fn = git_hash_sha256_final_oid,
|
||||
.empty_tree = &empty_tree_oid_sha256,
|
||||
.empty_blob = &empty_blob_oid_sha256,
|
||||
.null_oid = &null_oid_sha256,
|
||||
}
|
||||
};
|
||||
|
||||
const struct object_id *null_oid(void)
|
||||
{
|
||||
return the_hash_algo->null_oid;
|
||||
}
|
||||
|
||||
const char *empty_tree_oid_hex(const struct git_hash_algo *algop)
|
||||
{
|
||||
static char buf[GIT_MAX_HEXSZ + 1];
|
||||
return oid_to_hex_r(buf, algop->empty_tree);
|
||||
}
|
||||
|
||||
int hash_algo_by_name(const char *name)
|
||||
{
|
||||
int i;
|
||||
if (!name)
|
||||
return GIT_HASH_UNKNOWN;
|
||||
for (i = 1; i < GIT_HASH_NALGOS; i++)
|
||||
if (!strcmp(name, hash_algos[i].name))
|
||||
return i;
|
||||
return GIT_HASH_UNKNOWN;
|
||||
}
|
||||
|
||||
int hash_algo_by_id(uint32_t format_id)
|
||||
{
|
||||
int i;
|
||||
for (i = 1; i < GIT_HASH_NALGOS; i++)
|
||||
if (format_id == hash_algos[i].format_id)
|
||||
return i;
|
||||
return GIT_HASH_UNKNOWN;
|
||||
}
|
||||
|
||||
int hash_algo_by_length(int len)
|
||||
{
|
||||
int i;
|
||||
for (i = 1; i < GIT_HASH_NALGOS; i++)
|
||||
if (len == hash_algos[i].rawsz)
|
||||
return i;
|
||||
return GIT_HASH_UNKNOWN;
|
||||
}
|
||||
|
||||
const struct git_hash_algo *unsafe_hash_algo(const struct git_hash_algo *algop)
|
||||
{
|
||||
/* If we have a faster "unsafe" implementation, use that. */
|
||||
if (algop->unsafe)
|
||||
return algop->unsafe;
|
||||
/* Otherwise use the default one. */
|
||||
return algop;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is meant to hold a *small* number of objects that you would
|
||||
* want repo_read_object_file() to be able to return, but yet you do not want
|
||||
|
@ -1793,7 +1516,7 @@ static int oid_object_info_convert(struct repository *r,
|
|||
if (type == -1)
|
||||
return -1;
|
||||
if (type != OBJ_BLOB) {
|
||||
ret = convert_object_file(&outbuf,
|
||||
ret = convert_object_file(the_repository, &outbuf,
|
||||
the_hash_algo, input_algo,
|
||||
content, size, type, !do_die);
|
||||
free(content);
|
||||
|
@ -2510,7 +2233,7 @@ int write_object_file_flags(const void *buf, unsigned long len,
|
|||
hash_object_file(compat, buf, len, type, &compat_oid);
|
||||
else {
|
||||
struct strbuf converted = STRBUF_INIT;
|
||||
convert_object_file(&converted, algo, compat,
|
||||
convert_object_file(the_repository, &converted, algo, compat,
|
||||
buf, len, type, 0);
|
||||
hash_object_file(compat, converted.buf, converted.len,
|
||||
type, &compat_oid);
|
||||
|
@ -2550,7 +2273,8 @@ int write_object_file_literally(const void *buf, unsigned long len,
|
|||
&compat_oid);
|
||||
else if (compat_type != -1) {
|
||||
struct strbuf converted = STRBUF_INIT;
|
||||
convert_object_file(&converted, algo, compat,
|
||||
convert_object_file(the_repository,
|
||||
&converted, algo, compat,
|
||||
buf, len, compat_type, 0);
|
||||
hash_object_file(compat, converted.buf, converted.len,
|
||||
compat_type, &compat_oid);
|
||||
|
@ -2681,7 +2405,7 @@ static int index_mem(struct index_state *istate,
|
|||
|
||||
opts.strict = 1;
|
||||
opts.error_func = hash_format_check_report;
|
||||
if (fsck_buffer(null_oid(), type, buf, size, &opts))
|
||||
if (fsck_buffer(null_oid(the_hash_algo), type, buf, size, &opts))
|
||||
die(_("refusing to create malformed object"));
|
||||
fsck_finish(&opts);
|
||||
}
|
||||
|
@ -2803,7 +2527,8 @@ int index_fd(struct index_state *istate, struct object_id *oid,
|
|||
ret = index_stream_convert_blob(istate, oid, fd, path, flags);
|
||||
else if (!S_ISREG(st->st_mode))
|
||||
ret = index_pipe(istate, oid, fd, type, path, flags);
|
||||
else if (st->st_size <= big_file_threshold || type != OBJ_BLOB ||
|
||||
else if (st->st_size <= repo_settings_get_big_file_threshold(the_repository) ||
|
||||
type != OBJ_BLOB ||
|
||||
(path && would_convert_to_git(istate, path)))
|
||||
ret = index_core(istate, oid, fd, xsize_t(st->st_size),
|
||||
type, path, flags);
|
||||
|
@ -3134,7 +2859,8 @@ int read_loose_object(const char *path,
|
|||
goto out_inflate;
|
||||
}
|
||||
|
||||
if (*oi->typep == OBJ_BLOB && *size > big_file_threshold) {
|
||||
if (*oi->typep == OBJ_BLOB &&
|
||||
*size > repo_settings_get_big_file_threshold(the_repository)) {
|
||||
if (check_stream_oid(&stream, hdr, *size, path, expected_oid) < 0)
|
||||
goto out_inflate;
|
||||
} else {
|
||||
|
|
21
object.c
21
object.c
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_REPOSITORY_VARIABLE
|
||||
#define DISABLE_SIGN_COMPARE_WARNINGS
|
||||
|
||||
#include "git-compat-util.h"
|
||||
|
@ -18,14 +17,15 @@
|
|||
#include "commit-graph.h"
|
||||
#include "loose.h"
|
||||
|
||||
unsigned int get_max_object_index(void)
|
||||
unsigned int get_max_object_index(const struct repository *repo)
|
||||
{
|
||||
return the_repository->parsed_objects->obj_hash_size;
|
||||
return repo->parsed_objects->obj_hash_size;
|
||||
}
|
||||
|
||||
struct object *get_indexed_object(unsigned int idx)
|
||||
struct object *get_indexed_object(const struct repository *repo,
|
||||
unsigned int idx)
|
||||
{
|
||||
return the_repository->parsed_objects->obj_hash[idx];
|
||||
return repo->parsed_objects->obj_hash[idx];
|
||||
}
|
||||
|
||||
static const char *object_type_strings[] = {
|
||||
|
@ -283,10 +283,11 @@ struct object *parse_object_buffer(struct repository *r, const struct object_id
|
|||
return obj;
|
||||
}
|
||||
|
||||
struct object *parse_object_or_die(const struct object_id *oid,
|
||||
struct object *parse_object_or_die(struct repository *repo,
|
||||
const struct object_id *oid,
|
||||
const char *name)
|
||||
{
|
||||
struct object *o = parse_object(the_repository, oid);
|
||||
struct object *o = parse_object(repo, oid);
|
||||
if (o)
|
||||
return o;
|
||||
|
||||
|
@ -524,12 +525,12 @@ void object_array_remove_duplicates(struct object_array *array)
|
|||
}
|
||||
}
|
||||
|
||||
void clear_object_flags(unsigned flags)
|
||||
void clear_object_flags(struct repository *repo, unsigned flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i=0; i < the_repository->parsed_objects->obj_hash_size; i++) {
|
||||
struct object *obj = the_repository->parsed_objects->obj_hash[i];
|
||||
for (i=0; i < repo->parsed_objects->obj_hash_size; i++) {
|
||||
struct object *obj = repo->parsed_objects->obj_hash[i];
|
||||
if (obj)
|
||||
obj->flags &= ~flags;
|
||||
}
|
||||
|
|
10
object.h
10
object.h
|
@ -169,12 +169,13 @@ int type_from_string_gently(const char *str, ssize_t, int gentle);
|
|||
/*
|
||||
* Return the current number of buckets in the object hashmap.
|
||||
*/
|
||||
unsigned int get_max_object_index(void);
|
||||
unsigned int get_max_object_index(const struct repository *repo);
|
||||
|
||||
/*
|
||||
* Return the object from the specified bucket in the object hashmap.
|
||||
*/
|
||||
struct object *get_indexed_object(unsigned int);
|
||||
struct object *get_indexed_object(const struct repository *repo,
|
||||
unsigned int);
|
||||
|
||||
/*
|
||||
* This can be used to see if we have heard of the object before, but
|
||||
|
@ -231,7 +232,8 @@ struct object *parse_object_with_flags(struct repository *r,
|
|||
* "name" parameter is not NULL, it is included in the error message
|
||||
* (otherwise, the hex object ID is given).
|
||||
*/
|
||||
struct object *parse_object_or_die(const struct object_id *oid, const char *name);
|
||||
struct object *parse_object_or_die(struct repository *repo, const struct object_id *oid,
|
||||
const char *name);
|
||||
|
||||
/* Given the result of read_sha1_file(), returns the object after
|
||||
* parsing it. eaten_p indicates if the object has a borrowed copy
|
||||
|
@ -336,7 +338,7 @@ void object_array_remove_duplicates(struct object_array *array);
|
|||
*/
|
||||
void object_array_clear(struct object_array *array);
|
||||
|
||||
void clear_object_flags(unsigned flags);
|
||||
void clear_object_flags(struct repository *repo, unsigned flags);
|
||||
|
||||
/*
|
||||
* Clear the specified object flags from all in-core commit objects from
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_REPOSITORY_VARIABLE
|
||||
#define DISABLE_SIGN_COMPARE_WARNINGS
|
||||
|
||||
#include "git-compat-util.h"
|
||||
|
@ -51,6 +50,7 @@ void bitmap_writer_init(struct bitmap_writer *writer, struct repository *r,
|
|||
memset(writer, 0, sizeof(struct bitmap_writer));
|
||||
if (writer->bitmaps)
|
||||
BUG("bitmap writer already initialized");
|
||||
writer->repo = r;
|
||||
writer->bitmaps = kh_init_oid_map();
|
||||
writer->pseudo_merge_commits = kh_init_oid_map();
|
||||
writer->to_pack = pdata;
|
||||
|
@ -442,9 +442,9 @@ next:
|
|||
bb->commits[bb->commits_nr++] = r->item;
|
||||
}
|
||||
|
||||
trace2_data_intmax("pack-bitmap-write", the_repository,
|
||||
trace2_data_intmax("pack-bitmap-write", writer->repo,
|
||||
"num_selected_commits", writer->selected_nr);
|
||||
trace2_data_intmax("pack-bitmap-write", the_repository,
|
||||
trace2_data_intmax("pack-bitmap-write", writer->repo,
|
||||
"num_maximal_commits", num_maximal);
|
||||
|
||||
release_revisions(&revs);
|
||||
|
@ -487,7 +487,7 @@ static int fill_bitmap_tree(struct bitmap_writer *writer,
|
|||
switch (object_type(entry.mode)) {
|
||||
case OBJ_TREE:
|
||||
if (fill_bitmap_tree(writer, bitmap,
|
||||
lookup_tree(the_repository, &entry.oid)) < 0)
|
||||
lookup_tree(writer->repo, &entry.oid)) < 0)
|
||||
return -1;
|
||||
break;
|
||||
case OBJ_BLOB:
|
||||
|
@ -563,7 +563,7 @@ static int fill_bitmap_commit(struct bitmap_writer *writer,
|
|||
return -1;
|
||||
bitmap_set(ent->bitmap, pos);
|
||||
prio_queue_put(tree_queue,
|
||||
repo_get_commit_tree(the_repository, c));
|
||||
repo_get_commit_tree(writer->repo, c));
|
||||
}
|
||||
|
||||
for (p = c->parents; p; p = p->next) {
|
||||
|
@ -617,11 +617,11 @@ int bitmap_writer_build(struct bitmap_writer *writer)
|
|||
int closed = 1; /* until proven otherwise */
|
||||
|
||||
if (writer->show_progress)
|
||||
writer->progress = start_progress(the_repository,
|
||||
writer->progress = start_progress(writer->repo,
|
||||
"Building bitmaps",
|
||||
writer->selected_nr);
|
||||
trace2_region_enter("pack-bitmap-write", "building_bitmaps_total",
|
||||
the_repository);
|
||||
writer->repo);
|
||||
|
||||
old_bitmap = prepare_bitmap_git(writer->to_pack->repo);
|
||||
if (old_bitmap)
|
||||
|
@ -672,10 +672,10 @@ int bitmap_writer_build(struct bitmap_writer *writer)
|
|||
free(mapping);
|
||||
|
||||
trace2_region_leave("pack-bitmap-write", "building_bitmaps_total",
|
||||
the_repository);
|
||||
trace2_data_intmax("pack-bitmap-write", the_repository,
|
||||
writer->repo);
|
||||
trace2_data_intmax("pack-bitmap-write", writer->repo,
|
||||
"building_bitmaps_reused", reused_bitmaps_nr);
|
||||
trace2_data_intmax("pack-bitmap-write", the_repository,
|
||||
trace2_data_intmax("pack-bitmap-write", writer->repo,
|
||||
"building_bitmaps_pseudo_merge_reused",
|
||||
reused_pseudo_merge_bitmaps_nr);
|
||||
|
||||
|
@ -738,7 +738,7 @@ void bitmap_writer_select_commits(struct bitmap_writer *writer,
|
|||
}
|
||||
|
||||
if (writer->show_progress)
|
||||
writer->progress = start_progress(the_repository,
|
||||
writer->progress = start_progress(writer->repo,
|
||||
"Selecting bitmap commits", 0);
|
||||
|
||||
for (;;) {
|
||||
|
@ -987,7 +987,7 @@ static void write_lookup_table(struct bitmap_writer *writer, struct hashfile *f,
|
|||
for (i = 0; i < bitmap_writer_nr_selected_commits(writer); i++)
|
||||
table_inv[table[i]] = i;
|
||||
|
||||
trace2_region_enter("pack-bitmap-write", "writing_lookup_table", the_repository);
|
||||
trace2_region_enter("pack-bitmap-write", "writing_lookup_table", writer->repo);
|
||||
for (i = 0; i < bitmap_writer_nr_selected_commits(writer); i++) {
|
||||
struct bitmapped_commit *selected = &writer->selected[table[i]];
|
||||
uint32_t xor_offset = selected->xor_offset;
|
||||
|
@ -1014,7 +1014,7 @@ static void write_lookup_table(struct bitmap_writer *writer, struct hashfile *f,
|
|||
hashwrite_be64(f, (uint64_t)offsets[table[i]]);
|
||||
hashwrite_be32(f, xor_row);
|
||||
}
|
||||
trace2_region_leave("pack-bitmap-write", "writing_lookup_table", the_repository);
|
||||
trace2_region_leave("pack-bitmap-write", "writing_lookup_table", writer->repo);
|
||||
|
||||
free(table);
|
||||
free(table_inv);
|
||||
|
@ -1035,7 +1035,7 @@ static void write_hash_cache(struct hashfile *f,
|
|||
void bitmap_writer_set_checksum(struct bitmap_writer *writer,
|
||||
const unsigned char *sha1)
|
||||
{
|
||||
hashcpy(writer->pack_checksum, sha1, the_repository->hash_algo);
|
||||
hashcpy(writer->pack_checksum, sha1, writer->repo->hash_algo);
|
||||
}
|
||||
|
||||
void bitmap_writer_finish(struct bitmap_writer *writer,
|
||||
|
@ -1057,15 +1057,15 @@ void bitmap_writer_finish(struct bitmap_writer *writer,
|
|||
if (writer->pseudo_merges_nr)
|
||||
options |= BITMAP_OPT_PSEUDO_MERGES;
|
||||
|
||||
f = hashfd(fd, tmp_file.buf);
|
||||
f = hashfd(writer->repo->hash_algo, fd, tmp_file.buf);
|
||||
|
||||
memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE));
|
||||
header.version = htons(default_version);
|
||||
header.options = htons(flags | options);
|
||||
header.entry_count = htonl(bitmap_writer_nr_selected_commits(writer));
|
||||
hashcpy(header.checksum, writer->pack_checksum, the_repository->hash_algo);
|
||||
hashcpy(header.checksum, writer->pack_checksum, writer->repo->hash_algo);
|
||||
|
||||
hashwrite(f, &header, sizeof(header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz);
|
||||
hashwrite(f, &header, sizeof(header) - GIT_MAX_RAWSZ + writer->repo->hash_algo->rawsz);
|
||||
dump_bitmap(f, writer->commits);
|
||||
dump_bitmap(f, writer->trees);
|
||||
dump_bitmap(f, writer->blobs);
|
||||
|
@ -1105,7 +1105,7 @@ void bitmap_writer_finish(struct bitmap_writer *writer,
|
|||
finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
|
||||
CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
|
||||
|
||||
if (adjust_shared_perm(the_repository, tmp_file.buf))
|
||||
if (adjust_shared_perm(writer->repo, tmp_file.buf))
|
||||
die_errno("unable to make temporary bitmap file readable");
|
||||
|
||||
if (rename(tmp_file.buf, filename))
|
||||
|
|
|
@ -1409,7 +1409,7 @@ static struct bitmap *find_boundary_objects(struct bitmap_index *bitmap_git,
|
|||
revs->tag_objects = tmp_tags;
|
||||
|
||||
reset_revision_walk();
|
||||
clear_object_flags(UNINTERESTING);
|
||||
clear_object_flags(repo, UNINTERESTING);
|
||||
|
||||
/*
|
||||
* Then add the boundary commit(s) as fill-in traversal tips.
|
||||
|
@ -2060,7 +2060,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
|
|||
struct object *object = revs->pending.objects[i].item;
|
||||
|
||||
if (object->type == OBJ_NONE)
|
||||
parse_object_or_die(&object->oid, NULL);
|
||||
parse_object_or_die(revs->repo, &object->oid, NULL);
|
||||
|
||||
while (object->type == OBJ_TAG) {
|
||||
struct tag *tag = (struct tag *) object;
|
||||
|
@ -2070,7 +2070,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
|
|||
else
|
||||
object_list_insert(object, &wants);
|
||||
|
||||
object = parse_object_or_die(get_tagged_oid(tag), NULL);
|
||||
object = parse_object_or_die(revs->repo, get_tagged_oid(tag), NULL);
|
||||
object->flags |= (tag->object.flags & UNINTERESTING);
|
||||
}
|
||||
|
||||
|
@ -3216,7 +3216,8 @@ int bitmap_is_preferred_refname(struct repository *r, const char *refname)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int verify_bitmap_file(const char *name)
|
||||
static int verify_bitmap_file(const struct git_hash_algo *algop,
|
||||
const char *name)
|
||||
{
|
||||
struct stat st;
|
||||
unsigned char *data;
|
||||
|
@ -3232,7 +3233,7 @@ static int verify_bitmap_file(const char *name)
|
|||
|
||||
data = xmmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
||||
close(fd);
|
||||
if (!hashfile_checksum_valid(data, st.st_size))
|
||||
if (!hashfile_checksum_valid(algop, data, st.st_size))
|
||||
res = error(_("bitmap file '%s' has invalid checksum"),
|
||||
name);
|
||||
|
||||
|
@ -3247,14 +3248,14 @@ int verify_bitmap_files(struct repository *r)
|
|||
for (struct multi_pack_index *m = get_multi_pack_index(r);
|
||||
m; m = m->next) {
|
||||
char *midx_bitmap_name = midx_bitmap_filename(m);
|
||||
res |= verify_bitmap_file(midx_bitmap_name);
|
||||
res |= verify_bitmap_file(r->hash_algo, midx_bitmap_name);
|
||||
free(midx_bitmap_name);
|
||||
}
|
||||
|
||||
for (struct packed_git *p = get_all_packs(r);
|
||||
p; p = p->next) {
|
||||
char *pack_bitmap_name = pack_bitmap_filename(p);
|
||||
res |= verify_bitmap_file(pack_bitmap_name);
|
||||
res |= verify_bitmap_file(r->hash_algo, pack_bitmap_name);
|
||||
free(pack_bitmap_name);
|
||||
}
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@ int bitmap_has_oid_in_uninteresting(struct bitmap_index *, const struct object_i
|
|||
off_t get_disk_usage_from_bitmap(struct bitmap_index *, struct rev_info *);
|
||||
|
||||
struct bitmap_writer {
|
||||
struct repository *repo;
|
||||
struct ewah_bitmap *commits;
|
||||
struct ewah_bitmap *trees;
|
||||
struct ewah_bitmap *blobs;
|
||||
|
|
12
pack-check.c
12
pack-check.c
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_REPOSITORY_VARIABLE
|
||||
#define DISABLE_SIGN_COMPARE_WARNINGS
|
||||
|
||||
#include "git-compat-util.h"
|
||||
|
@ -44,7 +43,7 @@ int check_pack_crc(struct packed_git *p, struct pack_window **w_curs,
|
|||
} while (len);
|
||||
|
||||
index_crc = p->index_data;
|
||||
index_crc += 2 + 256 + (size_t)p->num_objects * (the_hash_algo->rawsz/4) + nr;
|
||||
index_crc += 2 + 256 + (size_t)p->num_objects * (p->repo->hash_algo->rawsz/4) + nr;
|
||||
|
||||
return data_crc != ntohl(*index_crc);
|
||||
}
|
||||
|
@ -81,11 +80,11 @@ static int verify_packfile(struct repository *r,
|
|||
} while (offset < pack_sig_ofs);
|
||||
git_hash_final(hash, &ctx);
|
||||
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
|
||||
if (!hasheq(hash, pack_sig, the_repository->hash_algo))
|
||||
if (!hasheq(hash, pack_sig, r->hash_algo))
|
||||
err = error("%s pack checksum mismatch",
|
||||
p->pack_name);
|
||||
if (!hasheq(index_base + index_size - r->hash_algo->hexsz, pack_sig,
|
||||
the_repository->hash_algo))
|
||||
r->hash_algo))
|
||||
err = error("%s pack checksum does not match its index",
|
||||
p->pack_name);
|
||||
unuse_pack(w_curs);
|
||||
|
@ -131,7 +130,8 @@ static int verify_packfile(struct repository *r,
|
|||
type = unpack_object_header(p, w_curs, &curpos, &size);
|
||||
unuse_pack(w_curs);
|
||||
|
||||
if (type == OBJ_BLOB && big_file_threshold <= size) {
|
||||
if (type == OBJ_BLOB &&
|
||||
repo_settings_get_big_file_threshold(r) <= size) {
|
||||
/*
|
||||
* Let stream_object_signature() check it with
|
||||
* the streaming interface; no point slurping
|
||||
|
@ -180,7 +180,7 @@ int verify_pack_index(struct packed_git *p)
|
|||
return error("packfile %s index not opened", p->pack_name);
|
||||
|
||||
/* Verify SHA1 sum of the index file */
|
||||
if (!hashfile_checksum_valid(p->index_data, p->index_size))
|
||||
if (!hashfile_checksum_valid(p->repo->hash_algo, p->index_data, p->index_size))
|
||||
err = error("Packfile index for %s hash mismatch",
|
||||
p->pack_name);
|
||||
return err;
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
#define USE_THE_REPOSITORY_VARIABLE
|
||||
|
||||
#include "git-compat-util.h"
|
||||
#include "gettext.h"
|
||||
#include "pack-revindex.h"
|
||||
|
@ -9,6 +7,7 @@
|
|||
#include "strbuf.h"
|
||||
#include "trace2.h"
|
||||
#include "parse.h"
|
||||
#include "repository.h"
|
||||
#include "midx.h"
|
||||
#include "csum-file.h"
|
||||
|
||||
|
@ -137,7 +136,7 @@ static void create_pack_revindex(struct packed_git *p)
|
|||
const unsigned num_ent = p->num_objects;
|
||||
unsigned i;
|
||||
const char *index = p->index_data;
|
||||
const unsigned hashsz = the_hash_algo->rawsz;
|
||||
const unsigned hashsz = p->repo->hash_algo->rawsz;
|
||||
|
||||
ALLOC_ARRAY(p->revindex, num_ent + 1);
|
||||
index += 4 * 256;
|
||||
|
@ -193,7 +192,11 @@ static char *pack_revindex_filename(struct packed_git *p)
|
|||
}
|
||||
|
||||
#define RIDX_HEADER_SIZE (12)
|
||||
#define RIDX_MIN_SIZE (RIDX_HEADER_SIZE + (2 * the_hash_algo->rawsz))
|
||||
|
||||
static size_t ridx_min_size(const struct git_hash_algo *algo)
|
||||
{
|
||||
return RIDX_HEADER_SIZE + (2 * algo->rawsz);
|
||||
}
|
||||
|
||||
struct revindex_header {
|
||||
uint32_t signature;
|
||||
|
@ -201,7 +204,8 @@ struct revindex_header {
|
|||
uint32_t hash_id;
|
||||
};
|
||||
|
||||
static int load_revindex_from_disk(char *revindex_name,
|
||||
static int load_revindex_from_disk(const struct git_hash_algo *algo,
|
||||
char *revindex_name,
|
||||
uint32_t num_objects,
|
||||
const uint32_t **data_p, size_t *len_p)
|
||||
{
|
||||
|
@ -228,12 +232,12 @@ static int load_revindex_from_disk(char *revindex_name,
|
|||
|
||||
revindex_size = xsize_t(st.st_size);
|
||||
|
||||
if (revindex_size < RIDX_MIN_SIZE) {
|
||||
if (revindex_size < ridx_min_size(algo)) {
|
||||
ret = error(_("reverse-index file %s is too small"), revindex_name);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (revindex_size - RIDX_MIN_SIZE != st_mult(sizeof(uint32_t), num_objects)) {
|
||||
if (revindex_size - ridx_min_size(algo) != st_mult(sizeof(uint32_t), num_objects)) {
|
||||
ret = error(_("reverse-index file %s is corrupt"), revindex_name);
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -279,7 +283,8 @@ int load_pack_revindex_from_disk(struct packed_git *p)
|
|||
|
||||
revindex_name = pack_revindex_filename(p);
|
||||
|
||||
ret = load_revindex_from_disk(revindex_name,
|
||||
ret = load_revindex_from_disk(p->repo->hash_algo,
|
||||
revindex_name,
|
||||
p->num_objects,
|
||||
&p->revindex_map,
|
||||
&p->revindex_size);
|
||||
|
@ -322,7 +327,8 @@ int verify_pack_revindex(struct packed_git *p)
|
|||
if (!p->revindex_map || !p->revindex_data)
|
||||
return res;
|
||||
|
||||
if (!hashfile_checksum_valid((const unsigned char *)p->revindex_map, p->revindex_size)) {
|
||||
if (!hashfile_checksum_valid(p->repo->hash_algo,
|
||||
(const unsigned char *)p->revindex_map, p->revindex_size)) {
|
||||
error(_("invalid checksum"));
|
||||
res = -1;
|
||||
}
|
||||
|
@ -374,13 +380,13 @@ int load_midx_revindex(struct multi_pack_index *m)
|
|||
* not want to accidentally call munmap() in the middle of the
|
||||
* MIDX.
|
||||
*/
|
||||
trace2_data_string("load_midx_revindex", the_repository,
|
||||
trace2_data_string("load_midx_revindex", m->repo,
|
||||
"source", "midx");
|
||||
m->revindex_data = (const uint32_t *)m->chunk_revindex;
|
||||
return 0;
|
||||
}
|
||||
|
||||
trace2_data_string("load_midx_revindex", the_repository,
|
||||
trace2_data_string("load_midx_revindex", m->repo,
|
||||
"source", "rev");
|
||||
|
||||
if (m->has_chain)
|
||||
|
@ -392,7 +398,8 @@ int load_midx_revindex(struct multi_pack_index *m)
|
|||
m->object_dir, get_midx_checksum(m),
|
||||
MIDX_EXT_REV);
|
||||
|
||||
ret = load_revindex_from_disk(revindex_name.buf,
|
||||
ret = load_revindex_from_disk(m->repo->hash_algo,
|
||||
revindex_name.buf,
|
||||
m->num_objects,
|
||||
&m->revindex_map,
|
||||
&m->revindex_len);
|
||||
|
@ -424,7 +431,7 @@ int offset_to_pack_pos(struct packed_git *p, off_t ofs, uint32_t *pos)
|
|||
{
|
||||
unsigned lo, hi;
|
||||
|
||||
if (load_pack_revindex(the_repository, p) < 0)
|
||||
if (load_pack_revindex(p->repo, p) < 0)
|
||||
return -1;
|
||||
|
||||
lo = 0;
|
||||
|
@ -470,7 +477,7 @@ off_t pack_pos_to_offset(struct packed_git *p, uint32_t pos)
|
|||
if (p->revindex)
|
||||
return p->revindex[pos].offset;
|
||||
else if (pos == p->num_objects)
|
||||
return p->pack_size - the_hash_algo->rawsz;
|
||||
return p->pack_size - p->repo->hash_algo->rawsz;
|
||||
else
|
||||
return nth_packed_object_offset(p, pack_pos_to_index(p, pos));
|
||||
}
|
||||
|
|
55
pack-write.c
55
pack-write.c
|
@ -1,5 +1,3 @@
|
|||
#define USE_THE_REPOSITORY_VARIABLE
|
||||
|
||||
#include "git-compat-util.h"
|
||||
#include "environment.h"
|
||||
#include "gettext.h"
|
||||
|
@ -56,7 +54,7 @@ static int need_large_offset(off_t offset, const struct pack_idx_option *opts)
|
|||
* The *sha1 contains the pack content SHA1 hash.
|
||||
* The objects array passed in will be sorted by SHA1 on exit.
|
||||
*/
|
||||
const char *write_idx_file(const struct git_hash_algo *hash_algo,
|
||||
const char *write_idx_file(struct repository *repo,
|
||||
const char *index_name, struct pack_idx_entry **objects,
|
||||
int nr_objects, const struct pack_idx_option *opts,
|
||||
const unsigned char *sha1)
|
||||
|
@ -82,7 +80,7 @@ const char *write_idx_file(const struct git_hash_algo *hash_algo,
|
|||
|
||||
if (opts->flags & WRITE_IDX_VERIFY) {
|
||||
assert(index_name);
|
||||
f = hashfd_check(index_name);
|
||||
f = hashfd_check(repo->hash_algo, index_name);
|
||||
} else {
|
||||
if (!index_name) {
|
||||
struct strbuf tmp_file = STRBUF_INIT;
|
||||
|
@ -92,7 +90,7 @@ const char *write_idx_file(const struct git_hash_algo *hash_algo,
|
|||
unlink(index_name);
|
||||
fd = xopen(index_name, O_CREAT|O_EXCL|O_WRONLY, 0600);
|
||||
}
|
||||
f = hashfd(fd, index_name);
|
||||
f = hashfd(repo->hash_algo, fd, index_name);
|
||||
}
|
||||
|
||||
/* if last object's offset is >= 2^31 we should use index V2 */
|
||||
|
@ -131,7 +129,7 @@ const char *write_idx_file(const struct git_hash_algo *hash_algo,
|
|||
struct pack_idx_entry *obj = *list++;
|
||||
if (index_version < 2)
|
||||
hashwrite_be32(f, obj->offset);
|
||||
hashwrite(f, obj->oid.hash, hash_algo->rawsz);
|
||||
hashwrite(f, obj->oid.hash, repo->hash_algo->rawsz);
|
||||
if ((opts->flags & WRITE_IDX_STRICT) &&
|
||||
(i && oideq(&list[-2]->oid, &obj->oid)))
|
||||
die("The same object %s appears twice in the pack",
|
||||
|
@ -173,7 +171,7 @@ const char *write_idx_file(const struct git_hash_algo *hash_algo,
|
|||
}
|
||||
}
|
||||
|
||||
hashwrite(f, sha1, hash_algo->rawsz);
|
||||
hashwrite(f, sha1, repo->hash_algo->rawsz);
|
||||
finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
|
||||
CSUM_HASH_IN_STREAM | CSUM_CLOSE |
|
||||
((opts->flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC));
|
||||
|
@ -217,7 +215,7 @@ static void write_rev_trailer(const struct git_hash_algo *hash_algo,
|
|||
hashwrite(f, hash, hash_algo->rawsz);
|
||||
}
|
||||
|
||||
char *write_rev_file(const struct git_hash_algo *hash_algo,
|
||||
char *write_rev_file(struct repository *repo,
|
||||
const char *rev_name,
|
||||
struct pack_idx_entry **objects,
|
||||
uint32_t nr_objects,
|
||||
|
@ -236,7 +234,7 @@ char *write_rev_file(const struct git_hash_algo *hash_algo,
|
|||
pack_order[i] = i;
|
||||
QSORT_S(pack_order, nr_objects, pack_order_cmp, objects);
|
||||
|
||||
ret = write_rev_file_order(hash_algo, rev_name, pack_order, nr_objects,
|
||||
ret = write_rev_file_order(repo, rev_name, pack_order, nr_objects,
|
||||
hash, flags);
|
||||
|
||||
free(pack_order);
|
||||
|
@ -244,7 +242,7 @@ char *write_rev_file(const struct git_hash_algo *hash_algo,
|
|||
return ret;
|
||||
}
|
||||
|
||||
char *write_rev_file_order(const struct git_hash_algo *hash_algo,
|
||||
char *write_rev_file_order(struct repository *repo,
|
||||
const char *rev_name,
|
||||
uint32_t *pack_order,
|
||||
uint32_t nr_objects,
|
||||
|
@ -268,7 +266,7 @@ char *write_rev_file_order(const struct git_hash_algo *hash_algo,
|
|||
fd = xopen(rev_name, O_CREAT|O_EXCL|O_WRONLY, 0600);
|
||||
path = xstrdup(rev_name);
|
||||
}
|
||||
f = hashfd(fd, path);
|
||||
f = hashfd(repo->hash_algo, fd, path);
|
||||
} else if (flags & WRITE_REV_VERIFY) {
|
||||
struct stat statbuf;
|
||||
if (stat(rev_name, &statbuf)) {
|
||||
|
@ -278,18 +276,18 @@ char *write_rev_file_order(const struct git_hash_algo *hash_algo,
|
|||
} else
|
||||
die_errno(_("could not stat: %s"), rev_name);
|
||||
}
|
||||
f = hashfd_check(rev_name);
|
||||
f = hashfd_check(repo->hash_algo, rev_name);
|
||||
path = xstrdup(rev_name);
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
write_rev_header(hash_algo, f);
|
||||
write_rev_header(repo->hash_algo, f);
|
||||
|
||||
write_rev_index_positions(f, pack_order, nr_objects);
|
||||
write_rev_trailer(hash_algo, f, hash);
|
||||
write_rev_trailer(repo->hash_algo, f, hash);
|
||||
|
||||
if (adjust_shared_perm(the_repository, path) < 0)
|
||||
if (adjust_shared_perm(repo, path) < 0)
|
||||
die(_("failed to make %s readable"), path);
|
||||
|
||||
finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
|
||||
|
@ -330,7 +328,7 @@ static void write_mtimes_trailer(const struct git_hash_algo *hash_algo,
|
|||
hashwrite(f, hash, hash_algo->rawsz);
|
||||
}
|
||||
|
||||
static char *write_mtimes_file(const struct git_hash_algo *hash_algo,
|
||||
static char *write_mtimes_file(struct repository *repo,
|
||||
struct packing_data *to_pack,
|
||||
struct pack_idx_entry **objects,
|
||||
uint32_t nr_objects,
|
||||
|
@ -346,13 +344,13 @@ static char *write_mtimes_file(const struct git_hash_algo *hash_algo,
|
|||
|
||||
fd = odb_mkstemp(&tmp_file, "pack/tmp_mtimes_XXXXXX");
|
||||
mtimes_name = strbuf_detach(&tmp_file, NULL);
|
||||
f = hashfd(fd, mtimes_name);
|
||||
f = hashfd(repo->hash_algo, fd, mtimes_name);
|
||||
|
||||
write_mtimes_header(hash_algo, f);
|
||||
write_mtimes_header(repo->hash_algo, f);
|
||||
write_mtimes_objects(f, to_pack, objects, nr_objects);
|
||||
write_mtimes_trailer(hash_algo, f, hash);
|
||||
write_mtimes_trailer(repo->hash_algo, f, hash);
|
||||
|
||||
if (adjust_shared_perm(the_repository, mtimes_name) < 0)
|
||||
if (adjust_shared_perm(repo, mtimes_name) < 0)
|
||||
die(_("failed to make %s readable"), mtimes_name);
|
||||
|
||||
finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA,
|
||||
|
@ -527,14 +525,15 @@ int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
|
|||
return n;
|
||||
}
|
||||
|
||||
struct hashfile *create_tmp_packfile(char **pack_tmp_name)
|
||||
struct hashfile *create_tmp_packfile(struct repository *repo,
|
||||
char **pack_tmp_name)
|
||||
{
|
||||
struct strbuf tmpname = STRBUF_INIT;
|
||||
int fd;
|
||||
|
||||
fd = odb_mkstemp(&tmpname, "pack/tmp_pack_XXXXXX");
|
||||
*pack_tmp_name = strbuf_detach(&tmpname, NULL);
|
||||
return hashfd(fd, *pack_tmp_name);
|
||||
return hashfd(repo->hash_algo, fd, *pack_tmp_name);
|
||||
}
|
||||
|
||||
static void rename_tmp_packfile(struct strbuf *name_prefix, const char *source,
|
||||
|
@ -555,7 +554,7 @@ void rename_tmp_packfile_idx(struct strbuf *name_buffer,
|
|||
rename_tmp_packfile(name_buffer, *idx_tmp_name, "idx");
|
||||
}
|
||||
|
||||
void stage_tmp_packfiles(const struct git_hash_algo *hash_algo,
|
||||
void stage_tmp_packfiles(struct repository *repo,
|
||||
struct strbuf *name_buffer,
|
||||
const char *pack_tmp_name,
|
||||
struct pack_idx_entry **written_list,
|
||||
|
@ -568,19 +567,19 @@ void stage_tmp_packfiles(const struct git_hash_algo *hash_algo,
|
|||
char *rev_tmp_name = NULL;
|
||||
char *mtimes_tmp_name = NULL;
|
||||
|
||||
if (adjust_shared_perm(the_repository, pack_tmp_name))
|
||||
if (adjust_shared_perm(repo, pack_tmp_name))
|
||||
die_errno("unable to make temporary pack file readable");
|
||||
|
||||
*idx_tmp_name = (char *)write_idx_file(hash_algo, NULL, written_list,
|
||||
*idx_tmp_name = (char *)write_idx_file(repo, NULL, written_list,
|
||||
nr_written, pack_idx_opts, hash);
|
||||
if (adjust_shared_perm(the_repository, *idx_tmp_name))
|
||||
if (adjust_shared_perm(repo, *idx_tmp_name))
|
||||
die_errno("unable to make temporary index file readable");
|
||||
|
||||
rev_tmp_name = write_rev_file(hash_algo, NULL, written_list, nr_written,
|
||||
rev_tmp_name = write_rev_file(repo, NULL, written_list, nr_written,
|
||||
hash, pack_idx_opts->flags);
|
||||
|
||||
if (pack_idx_opts->flags & WRITE_MTIMES) {
|
||||
mtimes_tmp_name = write_mtimes_file(hash_algo, to_pack,
|
||||
mtimes_tmp_name = write_mtimes_file(repo, to_pack,
|
||||
written_list, nr_written,
|
||||
hash);
|
||||
}
|
||||
|
|
11
pack.h
11
pack.h
|
@ -87,7 +87,7 @@ struct progress;
|
|||
/* Note, the data argument could be NULL if object type is blob */
|
||||
typedef int (*verify_fn)(const struct object_id *, enum object_type, unsigned long, void*, int*);
|
||||
|
||||
const char *write_idx_file(const struct git_hash_algo *hash_algo,
|
||||
const char *write_idx_file(struct repository *repo,
|
||||
const char *index_name,
|
||||
struct pack_idx_entry **objects,
|
||||
int nr_objects,
|
||||
|
@ -106,13 +106,13 @@ struct ref;
|
|||
|
||||
void write_promisor_file(const char *promisor_name, struct ref **sought, int nr_sought);
|
||||
|
||||
char *write_rev_file(const struct git_hash_algo *hash_algo,
|
||||
char *write_rev_file(struct repository *repo,
|
||||
const char *rev_name,
|
||||
struct pack_idx_entry **objects,
|
||||
uint32_t nr_objects,
|
||||
const unsigned char *hash,
|
||||
unsigned flags);
|
||||
char *write_rev_file_order(const struct git_hash_algo *hash_algo,
|
||||
char *write_rev_file_order(struct repository *repo,
|
||||
const char *rev_name,
|
||||
uint32_t *pack_order,
|
||||
uint32_t nr_objects,
|
||||
|
@ -134,8 +134,9 @@ int read_pack_header(int fd, struct pack_header *);
|
|||
|
||||
struct packing_data;
|
||||
|
||||
struct hashfile *create_tmp_packfile(char **pack_tmp_name);
|
||||
void stage_tmp_packfiles(const struct git_hash_algo *hash_algo,
|
||||
struct hashfile *create_tmp_packfile(struct repository *repo,
|
||||
char **pack_tmp_name);
|
||||
void stage_tmp_packfiles(struct repository *repo,
|
||||
struct strbuf *name_buffer,
|
||||
const char *pack_tmp_name,
|
||||
struct pack_idx_entry **written_list,
|
||||
|
|
|
@ -145,7 +145,7 @@ int parse_opt_object_id(const struct option *opt, const char *arg, int unset)
|
|||
struct object_id *target = opt->value;
|
||||
|
||||
if (unset) {
|
||||
oidcpy(target, null_oid());
|
||||
oidcpy(target, null_oid(the_hash_algo));
|
||||
return 0;
|
||||
}
|
||||
if (!arg)
|
||||
|
|
|
@ -467,7 +467,7 @@ static struct diff_filespec *get_filespec(const char *name, const char *p)
|
|||
{
|
||||
struct diff_filespec *spec = alloc_filespec(name);
|
||||
|
||||
fill_filespec(spec, null_oid(), 0, 0100644);
|
||||
fill_filespec(spec, null_oid(the_hash_algo), 0, 0100644);
|
||||
spec->data = (char *)p;
|
||||
spec->size = strlen(p);
|
||||
spec->should_munmap = 0;
|
||||
|
|
|
@ -45,7 +45,7 @@ static void add_one_file(const char *path, struct rev_info *revs)
|
|||
}
|
||||
strbuf_trim(&buf);
|
||||
if (!get_oid_hex(buf.buf, &oid)) {
|
||||
object = parse_object_or_die(&oid, buf.buf);
|
||||
object = parse_object_or_die(the_repository, &oid, buf.buf);
|
||||
add_pending_object(revs, object, "");
|
||||
}
|
||||
strbuf_release(&buf);
|
||||
|
@ -94,7 +94,7 @@ static int add_one_ref(const char *path, const char *referent UNUSED, const stru
|
|||
return 0;
|
||||
}
|
||||
|
||||
object = parse_object_or_die(oid, path);
|
||||
object = parse_object_or_die(the_repository, oid, path);
|
||||
add_pending_object(revs, object, "");
|
||||
|
||||
return 0;
|
||||
|
@ -218,7 +218,7 @@ static void add_recent_object(const struct object_id *oid,
|
|||
switch (type) {
|
||||
case OBJ_TAG:
|
||||
case OBJ_COMMIT:
|
||||
obj = parse_object_or_die(oid, NULL);
|
||||
obj = parse_object_or_die(the_repository, oid, NULL);
|
||||
break;
|
||||
case OBJ_TREE:
|
||||
obj = (struct object *)lookup_tree(the_repository, oid);
|
||||
|
|
|
@ -1735,7 +1735,7 @@ static int verify_hdr(const struct cache_header *hdr, unsigned long size)
|
|||
end = (unsigned char *)hdr + size;
|
||||
start = end - the_hash_algo->rawsz;
|
||||
oidread(&oid, start, the_repository->hash_algo);
|
||||
if (oideq(&oid, null_oid()))
|
||||
if (oideq(&oid, null_oid(the_hash_algo)))
|
||||
return 0;
|
||||
|
||||
the_hash_algo->init_fn(&c);
|
||||
|
@ -2848,7 +2848,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
|
|||
struct strbuf sb = STRBUF_INIT;
|
||||
int nr, nr_threads, ret;
|
||||
|
||||
f = hashfd(tempfile->fd, tempfile->filename.buf);
|
||||
f = hashfd(the_repository->hash_algo, tempfile->fd, tempfile->filename.buf);
|
||||
|
||||
prepare_repo_settings(r);
|
||||
f->skip_hash = r->settings.index_skip_hash;
|
||||
|
|
12
refs.c
12
refs.c
|
@ -1377,7 +1377,7 @@ int ref_transaction_create(struct ref_transaction *transaction,
|
|||
return 1;
|
||||
}
|
||||
return ref_transaction_update(transaction, refname, new_oid,
|
||||
null_oid(), new_target, NULL, flags,
|
||||
null_oid(the_hash_algo), new_target, NULL, flags,
|
||||
msg, err);
|
||||
}
|
||||
|
||||
|
@ -1396,7 +1396,7 @@ int ref_transaction_delete(struct ref_transaction *transaction,
|
|||
if (old_target && !(flags & REF_NO_DEREF))
|
||||
BUG("delete cannot operate on symrefs with deref mode");
|
||||
return ref_transaction_update(transaction, refname,
|
||||
null_oid(), old_oid,
|
||||
null_oid(the_hash_algo), old_oid,
|
||||
NULL, old_target, flags,
|
||||
msg, err);
|
||||
}
|
||||
|
@ -2180,7 +2180,7 @@ struct ref_store *repo_get_submodule_ref_store(struct repository *repo,
|
|||
subrepo = xmalloc(sizeof(*subrepo));
|
||||
|
||||
if (repo_submodule_init(subrepo, repo, submodule,
|
||||
null_oid())) {
|
||||
null_oid(the_hash_algo))) {
|
||||
free(subrepo);
|
||||
goto done;
|
||||
}
|
||||
|
@ -2365,14 +2365,14 @@ static int run_transaction_hook(struct ref_transaction *transaction,
|
|||
strbuf_reset(&buf);
|
||||
|
||||
if (!(update->flags & REF_HAVE_OLD))
|
||||
strbuf_addf(&buf, "%s ", oid_to_hex(null_oid()));
|
||||
strbuf_addf(&buf, "%s ", oid_to_hex(null_oid(the_hash_algo)));
|
||||
else if (update->old_target)
|
||||
strbuf_addf(&buf, "ref:%s ", update->old_target);
|
||||
else
|
||||
strbuf_addf(&buf, "%s ", oid_to_hex(&update->old_oid));
|
||||
|
||||
if (!(update->flags & REF_HAVE_NEW))
|
||||
strbuf_addf(&buf, "%s ", oid_to_hex(null_oid()));
|
||||
strbuf_addf(&buf, "%s ", oid_to_hex(null_oid(the_hash_algo)));
|
||||
else if (update->new_target)
|
||||
strbuf_addf(&buf, "ref:%s ", update->new_target);
|
||||
else
|
||||
|
@ -2857,7 +2857,7 @@ static int migrate_one_ref(const char *refname, const char *referent UNUSED, con
|
|||
if (ret < 0)
|
||||
goto done;
|
||||
|
||||
ret = ref_transaction_update(data->transaction, refname, NULL, null_oid(),
|
||||
ret = ref_transaction_update(data->transaction, refname, NULL, null_oid(the_hash_algo),
|
||||
symref_target.buf, NULL,
|
||||
REF_SKIP_CREATE_REFLOG | REF_NO_DEREF, NULL, data->errbuf);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -227,7 +227,7 @@ static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname,
|
|||
struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store;
|
||||
int res = 0;
|
||||
|
||||
oidcpy(oid, null_oid());
|
||||
oidcpy(oid, null_oid(ref_store->repo->hash_algo));
|
||||
res = drefs->refs->be->read_raw_ref(drefs->refs, refname, oid, referent,
|
||||
type, failure_errno);
|
||||
|
||||
|
|
|
@ -1265,7 +1265,7 @@ static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
|
|||
ref_transaction_add_update(
|
||||
transaction, r->name,
|
||||
REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING,
|
||||
null_oid(), &r->oid, NULL, NULL, NULL, NULL);
|
||||
null_oid(the_hash_algo), &r->oid, NULL, NULL, NULL, NULL);
|
||||
if (ref_transaction_commit(transaction, &err))
|
||||
goto cleanup;
|
||||
|
||||
|
|
|
@ -20,6 +20,13 @@ static void repo_cfg_int(struct repository *r, const char *key, int *dest,
|
|||
*dest = def;
|
||||
}
|
||||
|
||||
static void repo_cfg_ulong(struct repository *r, const char *key, unsigned long *dest,
|
||||
unsigned long def)
|
||||
{
|
||||
if (repo_config_get_ulong(r, key, dest))
|
||||
*dest = def;
|
||||
}
|
||||
|
||||
void prepare_repo_settings(struct repository *r)
|
||||
{
|
||||
int experimental;
|
||||
|
@ -151,6 +158,19 @@ void repo_settings_clear(struct repository *r)
|
|||
r->settings = empty;
|
||||
}
|
||||
|
||||
unsigned long repo_settings_get_big_file_threshold(struct repository *repo)
|
||||
{
|
||||
if (!repo->settings.big_file_threshold)
|
||||
repo_cfg_ulong(repo, "core.bigfilethreshold",
|
||||
&repo->settings.big_file_threshold, 512 * 1024 * 1024);
|
||||
return repo->settings.big_file_threshold;
|
||||
}
|
||||
|
||||
void repo_settings_set_big_file_threshold(struct repository *repo, unsigned long value)
|
||||
{
|
||||
repo->settings.big_file_threshold = value;
|
||||
}
|
||||
|
||||
enum log_refs_config repo_settings_get_log_all_ref_updates(struct repository *repo)
|
||||
{
|
||||
const char *value;
|
||||
|
|
|
@ -64,6 +64,7 @@ struct repo_settings {
|
|||
size_t delta_base_cache_limit;
|
||||
size_t packed_git_window_size;
|
||||
size_t packed_git_limit;
|
||||
unsigned long big_file_threshold;
|
||||
|
||||
char *hooks_path;
|
||||
};
|
||||
|
@ -88,6 +89,10 @@ int repo_settings_get_warn_ambiguous_refs(struct repository *repo);
|
|||
/* Read the value for "core.hooksPath". */
|
||||
const char *repo_settings_get_hooks_path(struct repository *repo);
|
||||
|
||||
/* Read and set the value for "core.bigFileThreshold". */
|
||||
unsigned long repo_settings_get_big_file_threshold(struct repository *repo);
|
||||
void repo_settings_set_big_file_threshold(struct repository *repo, unsigned long value);
|
||||
|
||||
/* Read, set or reset the value for "core.sharedRepository". */
|
||||
int repo_settings_get_shared_repository(struct repository *repo);
|
||||
void repo_settings_set_shared_repository(struct repository *repo, int value);
|
||||
|
|
2
reset.c
2
reset.c
|
@ -80,7 +80,7 @@ static int update_refs(const struct reset_head_opts *opts,
|
|||
}
|
||||
if (!ret && run_hook)
|
||||
run_hooks_l(the_repository, "post-checkout",
|
||||
oid_to_hex(head ? head : null_oid()),
|
||||
oid_to_hex(head ? head : null_oid(the_hash_algo)),
|
||||
oid_to_hex(oid), "1", NULL);
|
||||
strbuf_release(&msg);
|
||||
return ret;
|
||||
|
|
|
@ -3612,7 +3612,8 @@ static void set_children(struct rev_info *revs)
|
|||
|
||||
void reset_revision_walk(void)
|
||||
{
|
||||
clear_object_flags(SEEN | ADDED | SHOWN | TOPO_WALK_EXPLORED | TOPO_WALK_INDEGREE);
|
||||
clear_object_flags(the_repository,
|
||||
SEEN | ADDED | SHOWN | TOPO_WALK_EXPLORED | TOPO_WALK_INDEGREE);
|
||||
}
|
||||
|
||||
static int mark_uninteresting(const struct object_id *oid,
|
||||
|
|
10
sequencer.c
10
sequencer.c
|
@ -265,8 +265,8 @@ static struct update_ref_record *init_update_ref_record(const char *ref)
|
|||
|
||||
CALLOC_ARRAY(rec, 1);
|
||||
|
||||
oidcpy(&rec->before, null_oid());
|
||||
oidcpy(&rec->after, null_oid());
|
||||
oidcpy(&rec->before, null_oid(the_hash_algo));
|
||||
oidcpy(&rec->after, null_oid(the_hash_algo));
|
||||
|
||||
/* This may fail, but that's fine, we will keep the null OID. */
|
||||
refs_read_ref(get_main_ref_store(the_repository), ref, &rec->before);
|
||||
|
@ -667,7 +667,7 @@ static int fast_forward_to(struct repository *r,
|
|||
if (!transaction ||
|
||||
ref_transaction_update(transaction, "HEAD",
|
||||
to, unborn && !is_rebase_i(opts) ?
|
||||
null_oid() : from, NULL, NULL,
|
||||
null_oid(the_hash_algo) : from, NULL, NULL,
|
||||
0, sb.buf, &err) ||
|
||||
ref_transaction_commit(transaction, &err)) {
|
||||
ref_transaction_free(transaction);
|
||||
|
@ -1301,7 +1301,7 @@ int update_head_with_reflog(const struct commit *old_head,
|
|||
0, err);
|
||||
if (!transaction ||
|
||||
ref_transaction_update(transaction, "HEAD", new_head,
|
||||
old_head ? &old_head->object.oid : null_oid(),
|
||||
old_head ? &old_head->object.oid : null_oid(the_hash_algo),
|
||||
NULL, NULL, 0, sb.buf, err) ||
|
||||
ref_transaction_commit(transaction, err)) {
|
||||
ret = -1;
|
||||
|
@ -4683,7 +4683,7 @@ static void create_autostash_internal(struct repository *r,
|
|||
write_file(path, "%s", oid_to_hex(&oid));
|
||||
} else {
|
||||
refs_update_ref(get_main_ref_store(r), "", refname,
|
||||
&oid, null_oid(), 0, UPDATE_REFS_DIE_ON_ERR);
|
||||
&oid, null_oid(the_hash_algo), 0, UPDATE_REFS_DIE_ON_ERR);
|
||||
}
|
||||
|
||||
printf(_("Created autostash: %s\n"), buf.buf);
|
||||
|
|
10
shallow.c
10
shallow.c
|
@ -226,7 +226,7 @@ struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
|
|||
* SHALLOW (excluded) and NOT_SHALLOW (included) should not be
|
||||
* set at this point. But better be safe than sorry.
|
||||
*/
|
||||
clear_object_flags(both_flags);
|
||||
clear_object_flags(the_repository, both_flags);
|
||||
|
||||
is_repository_shallow(the_repository); /* make sure shallows are read */
|
||||
|
||||
|
@ -613,9 +613,9 @@ static void paint_down(struct paint_info *info, const struct object_id *oid,
|
|||
}
|
||||
}
|
||||
|
||||
nr = get_max_object_index();
|
||||
nr = get_max_object_index(the_repository);
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct object *o = get_indexed_object(i);
|
||||
struct object *o = get_indexed_object(the_repository, i);
|
||||
if (o && o->type == OBJ_COMMIT)
|
||||
o->flags &= ~SEEN;
|
||||
}
|
||||
|
@ -675,9 +675,9 @@ void assign_shallow_commits_to_refs(struct shallow_info *info,
|
|||
* Prepare the commit graph to track what refs can reach what
|
||||
* (new) shallow commits.
|
||||
*/
|
||||
nr = get_max_object_index();
|
||||
nr = get_max_object_index(the_repository);
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct object *o = get_indexed_object(i);
|
||||
struct object *o = get_indexed_object(the_repository, i);
|
||||
if (!o || o->type != OBJ_COMMIT)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -431,7 +431,8 @@ static int istream_source(struct git_istream *st,
|
|||
st->open = open_istream_loose;
|
||||
return 0;
|
||||
case OI_PACKED:
|
||||
if (!oi.u.packed.is_delta && big_file_threshold < size) {
|
||||
if (!oi.u.packed.is_delta &&
|
||||
repo_settings_get_big_file_threshold(the_repository) < size) {
|
||||
st->u.in_pack.pack = oi.u.packed.pack;
|
||||
st->u.in_pack.pos = oi.u.packed.offset;
|
||||
st->open = open_istream_pack_non_delta;
|
||||
|
|
|
@ -831,7 +831,7 @@ static int gitmodules_cb(const char *var, const char *value,
|
|||
|
||||
parameter.cache = repo->submodule_cache;
|
||||
parameter.treeish_name = NULL;
|
||||
parameter.gitmodules_oid = null_oid();
|
||||
parameter.gitmodules_oid = null_oid(the_hash_algo);
|
||||
parameter.overwrite = 1;
|
||||
|
||||
return parse_config(var, value, ctx, ¶meter);
|
||||
|
|
28
submodule.c
28
submodule.c
|
@ -124,7 +124,7 @@ int update_path_in_gitmodules(const char *oldpath, const char *newpath)
|
|||
if (is_gitmodules_unmerged(the_repository->index))
|
||||
die(_("Cannot change unmerged .gitmodules, resolve merge conflicts first"));
|
||||
|
||||
submodule = submodule_from_path(the_repository, null_oid(), oldpath);
|
||||
submodule = submodule_from_path(the_repository, null_oid(the_hash_algo), oldpath);
|
||||
if (!submodule || !submodule->name) {
|
||||
warning(_("Could not find section in .gitmodules where path=%s"), oldpath);
|
||||
return -1;
|
||||
|
@ -153,7 +153,7 @@ int remove_path_from_gitmodules(const char *path)
|
|||
if (is_gitmodules_unmerged(the_repository->index))
|
||||
die(_("Cannot change unmerged .gitmodules, resolve merge conflicts first"));
|
||||
|
||||
submodule = submodule_from_path(the_repository, null_oid(), path);
|
||||
submodule = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
if (!submodule || !submodule->name) {
|
||||
warning(_("Could not find section in .gitmodules where path=%s"), path);
|
||||
return -1;
|
||||
|
@ -204,7 +204,7 @@ void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt,
|
|||
const char *path)
|
||||
{
|
||||
const struct submodule *submodule = submodule_from_path(the_repository,
|
||||
null_oid(),
|
||||
null_oid(the_hash_algo),
|
||||
path);
|
||||
if (submodule) {
|
||||
const char *ignore;
|
||||
|
@ -312,7 +312,7 @@ int is_tree_submodule_active(struct repository *repo,
|
|||
|
||||
int is_submodule_active(struct repository *repo, const char *path)
|
||||
{
|
||||
return is_tree_submodule_active(repo, null_oid(), path);
|
||||
return is_tree_submodule_active(repo, null_oid(the_hash_algo), path);
|
||||
}
|
||||
|
||||
int is_submodule_populated_gently(const char *path, int *return_error_code)
|
||||
|
@ -778,7 +778,7 @@ const struct submodule *submodule_from_ce(const struct cache_entry *ce)
|
|||
if (!should_update_submodules())
|
||||
return NULL;
|
||||
|
||||
return submodule_from_path(the_repository, null_oid(), ce->name);
|
||||
return submodule_from_path(the_repository, null_oid(the_hash_algo), ce->name);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1062,7 +1062,7 @@ static int submodule_needs_pushing(struct repository *r,
|
|||
const char *path,
|
||||
struct oid_array *commits)
|
||||
{
|
||||
if (!submodule_has_commits(r, path, null_oid(), commits))
|
||||
if (!submodule_has_commits(r, path, null_oid(the_hash_algo), commits))
|
||||
/*
|
||||
* NOTE: We do consider it safe to return "no" here. The
|
||||
* correct answer would be "We do not know" instead of
|
||||
|
@ -1126,7 +1126,7 @@ int find_unpushed_submodules(struct repository *r,
|
|||
const struct submodule *submodule;
|
||||
const char *path = NULL;
|
||||
|
||||
submodule = submodule_from_name(r, null_oid(), name->string);
|
||||
submodule = submodule_from_name(r, null_oid(the_hash_algo), name->string);
|
||||
if (submodule)
|
||||
path = submodule->path;
|
||||
else
|
||||
|
@ -1351,7 +1351,7 @@ static void calculate_changed_submodule_paths(struct repository *r,
|
|||
const struct submodule *submodule;
|
||||
const char *path = NULL;
|
||||
|
||||
submodule = submodule_from_name(r, null_oid(), name->string);
|
||||
submodule = submodule_from_name(r, null_oid(the_hash_algo), name->string);
|
||||
if (submodule)
|
||||
path = submodule->path;
|
||||
else
|
||||
|
@ -1360,7 +1360,7 @@ static void calculate_changed_submodule_paths(struct repository *r,
|
|||
if (!path)
|
||||
continue;
|
||||
|
||||
if (submodule_has_commits(r, path, null_oid(), &cs_data->new_commits)) {
|
||||
if (submodule_has_commits(r, path, null_oid(the_hash_algo), &cs_data->new_commits)) {
|
||||
changed_submodule_data_clear(cs_data);
|
||||
*name->string = '\0';
|
||||
}
|
||||
|
@ -1602,7 +1602,7 @@ get_fetch_task_from_index(struct submodule_parallel_fetch *spf,
|
|||
if (!S_ISGITLINK(ce->ce_mode))
|
||||
continue;
|
||||
|
||||
task = fetch_task_create(spf, ce->name, null_oid());
|
||||
task = fetch_task_create(spf, ce->name, null_oid(the_hash_algo));
|
||||
if (!task)
|
||||
continue;
|
||||
|
||||
|
@ -2166,7 +2166,7 @@ int submodule_move_head(const char *path, const char *super_prefix,
|
|||
if (old_head && !is_submodule_populated_gently(path, error_code_ptr))
|
||||
return 0;
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
|
||||
if (!sub)
|
||||
BUG("could not get submodule information for '%s'", path);
|
||||
|
@ -2376,7 +2376,7 @@ static void relocate_single_git_dir_into_superproject(const char *path,
|
|||
|
||||
real_old_git_dir = real_pathdup(old_git_dir, 1);
|
||||
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
if (!sub)
|
||||
die(_("could not lookup name for submodule '%s'"), path);
|
||||
|
||||
|
@ -2462,7 +2462,7 @@ void absorb_git_dir_into_superproject(const char *path,
|
|||
* superproject did not rewrite the git file links yet,
|
||||
* fix it now.
|
||||
*/
|
||||
sub = submodule_from_path(the_repository, null_oid(), path);
|
||||
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
|
||||
if (!sub)
|
||||
die(_("could not lookup name for submodule '%s'"), path);
|
||||
submodule_name_to_gitdir(&sub_gitdir, the_repository, sub->name);
|
||||
|
@ -2594,7 +2594,7 @@ int submodule_to_gitdir(struct repository *repo,
|
|||
strbuf_addstr(buf, git_dir);
|
||||
}
|
||||
if (!is_git_directory(buf->buf)) {
|
||||
sub = submodule_from_path(repo, null_oid(), submodule);
|
||||
sub = submodule_from_path(repo, null_oid(the_hash_algo), submodule);
|
||||
if (!sub) {
|
||||
ret = -1;
|
||||
goto cleanup;
|
||||
|
|
|
@ -179,7 +179,7 @@ static int cmd_for_each_ref__exclude(struct ref_store *refs, const char **argv)
|
|||
|
||||
static int cmd_resolve_ref(struct ref_store *refs, const char **argv)
|
||||
{
|
||||
struct object_id oid = *null_oid();
|
||||
struct object_id oid = *null_oid(the_hash_algo);
|
||||
const char *refname = notnull(*argv++, "refname");
|
||||
int resolve_flags = arg_flags(*argv++, "resolve-flags", empty_flags);
|
||||
int flags;
|
||||
|
|
|
@ -21,7 +21,7 @@ int cmd__submodule_nested_repo_config(int argc, const char **argv)
|
|||
|
||||
setup_git_directory();
|
||||
|
||||
if (repo_submodule_init(&subrepo, the_repository, argv[1], null_oid())) {
|
||||
if (repo_submodule_init(&subrepo, the_repository, argv[1], null_oid(the_hash_algo))) {
|
||||
die_usage(argv, "Submodule not found.");
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,8 @@ test_description='adding and checking out large blobs'
|
|||
. ./test-lib.sh
|
||||
|
||||
test_expect_success 'core.bigFileThreshold must be non-negative' '
|
||||
test_must_fail git -c core.bigFileThreshold=-1 rev-parse >out 2>err &&
|
||||
: >input &&
|
||||
test_must_fail git -c core.bigFileThreshold=-1 hash-object input >out 2>err &&
|
||||
grep "bad numeric config value" err &&
|
||||
test_must_be_empty out
|
||||
'
|
||||
|
|
|
@ -181,7 +181,7 @@ static void emit_path(struct combine_diff_path ***tail,
|
|||
|
||||
strbuf_add(base, path, pathlen);
|
||||
p = combine_diff_path_new(base->buf, base->len, mode,
|
||||
oid ? oid : null_oid(),
|
||||
oid ? oid : null_oid(the_hash_algo),
|
||||
nparent);
|
||||
strbuf_setlen(base, old_baselen);
|
||||
|
||||
|
@ -206,7 +206,7 @@ static void emit_path(struct combine_diff_path ***tail,
|
|||
mode_i = tp[i].entry.mode;
|
||||
}
|
||||
else {
|
||||
oid_i = null_oid();
|
||||
oid_i = null_oid(the_hash_algo);
|
||||
mode_i = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -665,8 +665,8 @@ static int do_reachable_revlist(struct child_process *cmd,
|
|||
|
||||
cmd_in = xfdopen(cmd->in, "w");
|
||||
|
||||
for (i = get_max_object_index(); 0 < i; ) {
|
||||
o = get_indexed_object(--i);
|
||||
for (i = get_max_object_index(the_repository); 0 < i; ) {
|
||||
o = get_indexed_object(the_repository, --i);
|
||||
if (!o)
|
||||
continue;
|
||||
if (reachable && o->type == OBJ_COMMIT)
|
||||
|
@ -734,8 +734,8 @@ static int get_reachable_list(struct upload_pack_data *data,
|
|||
o->flags &= ~TMP_MARK;
|
||||
}
|
||||
}
|
||||
for (i = get_max_object_index(); 0 < i; i--) {
|
||||
o = get_indexed_object(i - 1);
|
||||
for (i = get_max_object_index(the_repository); 0 < i; i--) {
|
||||
o = get_indexed_object(the_repository, i - 1);
|
||||
if (o && o->type == OBJ_COMMIT &&
|
||||
(o->flags & TMP_MARK)) {
|
||||
add_object_array(o, NULL, reachable);
|
||||
|
@ -1449,7 +1449,7 @@ void upload_pack(const int advertise_refs, const int stateless_rpc,
|
|||
for_each_namespaced_ref_1(send_ref, &data);
|
||||
if (!data.sent_capabilities) {
|
||||
const char *refname = "capabilities^{}";
|
||||
write_v0_ref(&data, refname, refname, null_oid());
|
||||
write_v0_ref(&data, refname, refname, null_oid(the_hash_algo));
|
||||
}
|
||||
/*
|
||||
* fflush stdout before calling advertise_shallow_grafts because send_ref
|
||||
|
@ -1557,7 +1557,7 @@ static int parse_want_ref(struct packet_writer *writer, const char *line,
|
|||
}
|
||||
|
||||
if (!o)
|
||||
o = parse_object_or_die(&oid, refname_nons);
|
||||
o = parse_object_or_die(the_repository, &oid, refname_nons);
|
||||
|
||||
if (!(o->flags & WANTED)) {
|
||||
o->flags |= WANTED;
|
||||
|
@ -1793,7 +1793,7 @@ int upload_pack_v2(struct repository *r, struct packet_reader *request)
|
|||
enum fetch_state state = FETCH_PROCESS_ARGS;
|
||||
struct upload_pack_data data;
|
||||
|
||||
clear_object_flags(ALL_FLAGS);
|
||||
clear_object_flags(the_repository, ALL_FLAGS);
|
||||
|
||||
upload_pack_data_init(&data);
|
||||
data.use_sideband = LARGE_PACKET_MAX;
|
||||
|
|
|
@ -1824,10 +1824,10 @@ void wt_status_get_state(struct repository *r,
|
|||
if (!sequencer_get_last_command(r, &action)) {
|
||||
if (action == REPLAY_PICK && !state->cherry_pick_in_progress) {
|
||||
state->cherry_pick_in_progress = 1;
|
||||
oidcpy(&state->cherry_pick_head_oid, null_oid());
|
||||
oidcpy(&state->cherry_pick_head_oid, null_oid(the_hash_algo));
|
||||
} else if (action == REPLAY_REVERT && !state->revert_in_progress) {
|
||||
state->revert_in_progress = 1;
|
||||
oidcpy(&state->revert_head_oid, null_oid());
|
||||
oidcpy(&state->revert_head_oid, null_oid(the_hash_algo));
|
||||
}
|
||||
}
|
||||
if (get_detached_from)
|
||||
|
|
|
@ -181,7 +181,7 @@ void read_mmblob(mmfile_t *ptr, const struct object_id *oid)
|
|||
unsigned long size;
|
||||
enum object_type type;
|
||||
|
||||
if (oideq(oid, null_oid())) {
|
||||
if (oideq(oid, null_oid(the_hash_algo))) {
|
||||
ptr->ptr = xstrdup("");
|
||||
ptr->size = 0;
|
||||
return;
|
||||
|
|
Loading…
Reference in New Issue