Merge branch 'ps/object-file-wo-the-repository'
Reduce implicit assumption and dependence on the_repository in the object-file subsystem. * ps/object-file-wo-the-repository: object-file: get rid of `the_repository` in index-related functions object-file: get rid of `the_repository` in `force_object_loose()` object-file: get rid of `the_repository` in `read_loose_object()` object-file: get rid of `the_repository` in loose object iterators object-file: remove declaration for `for_each_file_in_obj_subdir()` object-file: inline `for_each_loose_file_in_objdir_buf()` object-file: get rid of `the_repository` when writing objects odb: introduce `odb_write_object()` loose: write loose objects map via their source object-file: get rid of `the_repository` in `finalize_object_file()` object-file: get rid of `the_repository` in `loose_object_info()` object-file: get rid of `the_repository` when freshening objects object-file: inline `check_and_freshen()` functions object-file: get rid of `the_repository` in `has_loose_object()` object-file: stop using `the_hash_algo` object-file: fix -Wsign-compare warningsmaint
						commit
						4ce0caa7cc
					
				
							
								
								
									
										11
									
								
								apply.c
								
								
								
								
							
							
						
						
									
										11
									
								
								apply.c
								
								
								
								
							|  | @ -3621,7 +3621,7 @@ static int try_threeway(struct apply_state *state, | |||
|  | ||||
| 	/* Preimage the patch was prepared for */ | ||||
| 	if (patch->is_new) | ||||
| 		write_object_file("", 0, OBJ_BLOB, &pre_oid); | ||||
| 		odb_write_object(the_repository->objects, "", 0, OBJ_BLOB, &pre_oid); | ||||
| 	else if (repo_get_oid(the_repository, patch->old_oid_prefix, &pre_oid) || | ||||
| 		 read_blob_object(&buf, &pre_oid, patch->old_mode)) | ||||
| 		return error(_("repository lacks the necessary blob to perform 3-way merge.")); | ||||
|  | @ -3637,7 +3637,8 @@ static int try_threeway(struct apply_state *state, | |||
| 		return -1; | ||||
| 	} | ||||
| 	/* post_oid is theirs */ | ||||
| 	write_object_file(tmp_image.buf.buf, tmp_image.buf.len, OBJ_BLOB, &post_oid); | ||||
| 	odb_write_object(the_repository->objects, tmp_image.buf.buf, | ||||
| 			 tmp_image.buf.len, OBJ_BLOB, &post_oid); | ||||
| 	image_clear(&tmp_image); | ||||
|  | ||||
| 	/* our_oid is ours */ | ||||
|  | @ -3650,7 +3651,8 @@ static int try_threeway(struct apply_state *state, | |||
| 			return error(_("cannot read the current contents of '%s'"), | ||||
| 				     patch->old_name); | ||||
| 	} | ||||
| 	write_object_file(tmp_image.buf.buf, tmp_image.buf.len, OBJ_BLOB, &our_oid); | ||||
| 	odb_write_object(the_repository->objects, tmp_image.buf.buf, | ||||
| 			 tmp_image.buf.len, OBJ_BLOB, &our_oid); | ||||
| 	image_clear(&tmp_image); | ||||
|  | ||||
| 	/* in-core three-way merge between post and our using pre as base */ | ||||
|  | @ -4360,7 +4362,8 @@ static int add_index_file(struct apply_state *state, | |||
| 			} | ||||
| 			fill_stat_cache_info(state->repo->index, ce, &st); | ||||
| 		} | ||||
| 		if (write_object_file(buf, size, OBJ_BLOB, &ce->oid) < 0) { | ||||
| 		if (odb_write_object(the_repository->objects, buf, size, | ||||
| 				     OBJ_BLOB, &ce->oid) < 0) { | ||||
| 			discard_cache_entry(ce); | ||||
| 			return error(_("unable to create backing store " | ||||
| 				       "for newly created file %s"), path); | ||||
|  |  | |||
|  | @ -848,7 +848,7 @@ static void batch_each_object(struct batch_options *opt, | |||
| 	}; | ||||
| 	struct bitmap_index *bitmap = prepare_bitmap_git(the_repository); | ||||
|  | ||||
| 	for_each_loose_object(batch_one_object_loose, &payload, 0); | ||||
| 	for_each_loose_object(the_repository->objects, batch_one_object_loose, &payload, 0); | ||||
|  | ||||
| 	if (bitmap && !for_each_bitmapped_object(bitmap, &opt->objects_filter, | ||||
| 						 batch_one_object_bitmapped, &payload)) { | ||||
|  |  | |||
|  | @ -327,7 +327,7 @@ static int checkout_merged(int pos, const struct checkout *state, | |||
| 	 * (it also writes the merge result to the object database even | ||||
| 	 * when it may contain conflicts). | ||||
| 	 */ | ||||
| 	if (write_object_file(result_buf.ptr, result_buf.size, OBJ_BLOB, &oid)) | ||||
| 	if (odb_write_object(the_repository->objects, result_buf.ptr, result_buf.size, OBJ_BLOB, &oid)) | ||||
| 		die(_("Unable to add merge result for '%s'"), path); | ||||
| 	free(result_buf.ptr); | ||||
| 	ce = make_transient_cache_entry(mode, &oid, path, 2, ce_mem_pool); | ||||
|  |  | |||
|  | @ -118,7 +118,7 @@ int cmd_count_objects(int argc, | |||
| 		report_linked_checkout_garbage(the_repository); | ||||
| 	} | ||||
|  | ||||
| 	for_each_loose_file_in_objdir(repo_get_object_directory(the_repository), | ||||
| 	for_each_loose_file_in_source(the_repository->objects->sources, | ||||
| 				      count_loose, count_cruft, NULL, NULL); | ||||
|  | ||||
| 	if (verbose) { | ||||
|  |  | |||
|  | @ -822,11 +822,11 @@ static char *keep_pack(const char *curr_index_name) | |||
| 		die_errno("failed to write keep file"); | ||||
|  | ||||
| 	odb_pack_name(pack_data->repo, &name, pack_data->hash, "pack"); | ||||
| 	if (finalize_object_file(pack_data->pack_name, name.buf)) | ||||
| 	if (finalize_object_file(pack_data->repo, pack_data->pack_name, name.buf)) | ||||
| 		die("cannot store pack file"); | ||||
|  | ||||
| 	odb_pack_name(pack_data->repo, &name, pack_data->hash, "idx"); | ||||
| 	if (finalize_object_file(curr_index_name, name.buf)) | ||||
| 	if (finalize_object_file(pack_data->repo, curr_index_name, name.buf)) | ||||
| 		die("cannot store index file"); | ||||
| 	free((void *)curr_index_name); | ||||
| 	return strbuf_detach(&name, NULL); | ||||
|  |  | |||
|  | @ -393,7 +393,8 @@ static void check_connectivity(void) | |||
| 		 * and ignore any that weren't present in our earlier | ||||
| 		 * traversal. | ||||
| 		 */ | ||||
| 		for_each_loose_object(mark_loose_unreachable_referents, NULL, 0); | ||||
| 		for_each_loose_object(the_repository->objects, | ||||
| 				      mark_loose_unreachable_referents, NULL, 0); | ||||
| 		for_each_packed_object(the_repository, | ||||
| 				       mark_packed_unreachable_referents, | ||||
| 				       NULL, | ||||
|  | @ -632,7 +633,7 @@ static int fsck_loose(const struct object_id *oid, const char *path, | |||
| 	oi.sizep = &size; | ||||
| 	oi.typep = &type; | ||||
|  | ||||
| 	if (read_loose_object(path, oid, &real_oid, &contents, &oi) < 0) { | ||||
| 	if (read_loose_object(the_repository, path, oid, &real_oid, &contents, &oi) < 0) { | ||||
| 		if (contents && !oideq(&real_oid, oid)) | ||||
| 			err = error(_("%s: hash-path mismatch, found at: %s"), | ||||
| 				    oid_to_hex(&real_oid), path); | ||||
|  | @ -687,7 +688,7 @@ static int fsck_subdir(unsigned int nr, const char *path UNUSED, void *data) | |||
| 	return 0; | ||||
| } | ||||
|  | ||||
| static void fsck_object_dir(const char *path) | ||||
| static void fsck_source(struct odb_source *source) | ||||
| { | ||||
| 	struct progress *progress = NULL; | ||||
| 	struct for_each_loose_cb cb_data = { | ||||
|  | @ -701,8 +702,8 @@ static void fsck_object_dir(const char *path) | |||
| 		progress = start_progress(the_repository, | ||||
| 					  _("Checking object directories"), 256); | ||||
|  | ||||
| 	for_each_loose_file_in_objdir(path, fsck_loose, fsck_cruft, fsck_subdir, | ||||
| 				      &cb_data); | ||||
| 	for_each_loose_file_in_source(source, fsck_loose, | ||||
| 				      fsck_cruft, fsck_subdir, &cb_data); | ||||
| 	display_progress(progress, 256); | ||||
| 	stop_progress(&progress); | ||||
| } | ||||
|  | @ -994,13 +995,14 @@ int cmd_fsck(int argc, | |||
| 		fsck_refs(the_repository); | ||||
|  | ||||
| 	if (connectivity_only) { | ||||
| 		for_each_loose_object(mark_loose_for_connectivity, NULL, 0); | ||||
| 		for_each_loose_object(the_repository->objects, | ||||
| 				      mark_loose_for_connectivity, NULL, 0); | ||||
| 		for_each_packed_object(the_repository, | ||||
| 				       mark_packed_for_connectivity, NULL, 0); | ||||
| 	} else { | ||||
| 		odb_prepare_alternates(the_repository->objects); | ||||
| 		for (source = the_repository->objects->sources; source; source = source->next) | ||||
| 			fsck_object_dir(source->path); | ||||
| 			fsck_source(source); | ||||
|  | ||||
| 		if (check_full) { | ||||
| 			struct packed_git *p; | ||||
|  |  | |||
							
								
								
									
										10
									
								
								builtin/gc.c
								
								
								
								
							
							
						
						
									
										10
									
								
								builtin/gc.c
								
								
								
								
							|  | @ -1309,7 +1309,7 @@ static int loose_object_auto_condition(struct gc_config *cfg UNUSED) | |||
| 	if (loose_object_auto_limit < 0) | ||||
| 		return 1; | ||||
|  | ||||
| 	return for_each_loose_file_in_objdir(the_repository->objects->sources->path, | ||||
| 	return for_each_loose_file_in_source(the_repository->objects->sources, | ||||
| 					     loose_object_count, | ||||
| 					     NULL, NULL, &count); | ||||
| } | ||||
|  | @ -1344,7 +1344,7 @@ static int pack_loose(struct maintenance_run_opts *opts) | |||
| 	 * Do not start pack-objects process | ||||
| 	 * if there are no loose objects. | ||||
| 	 */ | ||||
| 	if (!for_each_loose_file_in_objdir(r->objects->sources->path, | ||||
| 	if (!for_each_loose_file_in_source(r->objects->sources, | ||||
| 					   bail_on_loose, | ||||
| 					   NULL, NULL, NULL)) | ||||
| 		return 0; | ||||
|  | @ -1384,11 +1384,9 @@ static int pack_loose(struct maintenance_run_opts *opts) | |||
| 	else if (data.batch_size > 0) | ||||
| 		data.batch_size--; /* Decrease for equality on limit. */ | ||||
|  | ||||
| 	for_each_loose_file_in_objdir(r->objects->sources->path, | ||||
| 	for_each_loose_file_in_source(r->objects->sources, | ||||
| 				      write_loose_object_to_stdin, | ||||
| 				      NULL, | ||||
| 				      NULL, | ||||
| 				      &data); | ||||
| 				      NULL, NULL, &data); | ||||
|  | ||||
| 	fclose(data.in); | ||||
|  | ||||
|  |  | |||
|  | @ -1598,7 +1598,7 @@ static void rename_tmp_packfile(const char **final_name, | |||
| 	if (!*final_name || strcmp(*final_name, curr_name)) { | ||||
| 		if (!*final_name) | ||||
| 			*final_name = odb_pack_name(the_repository, name, hash, ext); | ||||
| 		if (finalize_object_file(curr_name, *final_name)) | ||||
| 		if (finalize_object_file(the_repository, curr_name, *final_name)) | ||||
| 			die(_("unable to rename temporary '*.%s' file to '%s'"), | ||||
| 			    ext, *final_name); | ||||
| 	} else if (make_read_only_if_same) { | ||||
|  |  | |||
|  | @ -155,7 +155,8 @@ int cmd_merge_file(int argc, | |||
| 		if (object_id && !to_stdout) { | ||||
| 			struct object_id oid; | ||||
| 			if (result.size) { | ||||
| 				if (write_object_file(result.ptr, result.size, OBJ_BLOB, &oid) < 0) | ||||
| 				if (odb_write_object(the_repository->objects, result.ptr, | ||||
| 						     result.size, OBJ_BLOB, &oid) < 0) | ||||
| 					ret = error(_("Could not write object file")); | ||||
| 			} else { | ||||
| 				oidcpy(&oid, the_hash_algo->empty_blob); | ||||
|  |  | |||
|  | @ -106,7 +106,7 @@ int cmd_mktag(int argc, | |||
| 	if (verify_object_in_tag(&tagged_oid, &tagged_type) < 0) | ||||
| 		die(_("tag on stdin did not refer to a valid object")); | ||||
|  | ||||
| 	if (write_object_file(buf.buf, buf.len, OBJ_TAG, &result) < 0) | ||||
| 	if (odb_write_object(the_repository->objects, buf.buf, buf.len, OBJ_TAG, &result) < 0) | ||||
| 		die(_("unable to write tag file")); | ||||
|  | ||||
| 	strbuf_release(&buf); | ||||
|  |  | |||
|  | @ -63,7 +63,7 @@ static void write_tree(struct object_id *oid) | |||
| 		strbuf_add(&buf, ent->oid.hash, the_hash_algo->rawsz); | ||||
| 	} | ||||
|  | ||||
| 	write_object_file(buf.buf, buf.len, OBJ_TREE, oid); | ||||
| 	odb_write_object(the_repository->objects, buf.buf, buf.len, OBJ_TREE, oid); | ||||
| 	strbuf_release(&buf); | ||||
| } | ||||
|  | ||||
|  |  | |||
|  | @ -229,7 +229,8 @@ static void prepare_note_data(const struct object_id *object, struct note_data * | |||
|  | ||||
| static void write_note_data(struct note_data *d, struct object_id *oid) | ||||
| { | ||||
| 	if (write_object_file(d->buf.buf, d->buf.len, OBJ_BLOB, oid)) { | ||||
| 	if (odb_write_object(the_repository->objects, d->buf.buf, | ||||
| 			     d->buf.len, OBJ_BLOB, oid)) { | ||||
| 		int status = die_message(_("unable to write note object")); | ||||
|  | ||||
| 		if (d->edit_path) | ||||
|  |  | |||
|  | @ -1455,7 +1455,7 @@ static void write_pack_file(void) | |||
| 				strbuf_setlen(&tmpname, tmpname_len); | ||||
| 			} | ||||
|  | ||||
| 			rename_tmp_packfile_idx(&tmpname, &idx_tmp_name); | ||||
| 			rename_tmp_packfile_idx(the_repository, &tmpname, &idx_tmp_name); | ||||
|  | ||||
| 			free(idx_tmp_name); | ||||
| 			strbuf_release(&tmpname); | ||||
|  | @ -1709,8 +1709,16 @@ static int want_object_in_pack_mtime(const struct object_id *oid, | |||
| 	struct odb_source *source; | ||||
| 	struct list_head *pos; | ||||
|  | ||||
| 	if (!exclude && local && has_loose_object_nonlocal(oid)) | ||||
| 	if (!exclude && local) { | ||||
| 		/* | ||||
| 		 * Note that we start iterating at `sources->next` so that we | ||||
| 		 * skip the local object source. | ||||
| 		 */ | ||||
| 		struct odb_source *source = the_repository->objects->sources->next; | ||||
| 		for (; source; source = source->next) | ||||
| 			if (has_loose_object(source, oid)) | ||||
| 				return 0; | ||||
| 	} | ||||
|  | ||||
| 	/* | ||||
| 	 * If we already know the pack object lives in, start checks from that | ||||
|  | @ -3966,7 +3974,14 @@ static void add_cruft_object_entry(const struct object_id *oid, enum object_type | |||
| 	} else { | ||||
| 		if (!want_object_in_pack_mtime(oid, 0, &pack, &offset, mtime)) | ||||
| 			return; | ||||
| 		if (!pack && type == OBJ_BLOB && !has_loose_object(oid)) { | ||||
| 		if (!pack && type == OBJ_BLOB) { | ||||
| 			struct odb_source *source = the_repository->objects->sources; | ||||
| 			int found = 0; | ||||
|  | ||||
| 			for (; !found && source; source = source->next) | ||||
| 				if (has_loose_object(source, oid)) | ||||
| 					found = 1; | ||||
|  | ||||
| 			/* | ||||
| 			 * If a traversed tree has a missing blob then we want | ||||
| 			 * to avoid adding that missing object to our pack. | ||||
|  | @ -3980,6 +3995,7 @@ static void add_cruft_object_entry(const struct object_id *oid, enum object_type | |||
| 			 * limited to "ensure non-tip blobs which don't exist in | ||||
| 			 * packs do exist via loose objects". Confused? | ||||
| 			 */ | ||||
| 			if (!found) | ||||
| 				return; | ||||
| 		} | ||||
|  | ||||
|  | @ -4368,7 +4384,7 @@ static int add_loose_object(const struct object_id *oid, const char *path, | |||
|  */ | ||||
| static void add_unreachable_loose_objects(struct rev_info *revs) | ||||
| { | ||||
| 	for_each_loose_file_in_objdir(repo_get_object_directory(the_repository), | ||||
| 	for_each_loose_file_in_source(the_repository->objects->sources, | ||||
| 				      add_loose_object, NULL, NULL, revs); | ||||
| } | ||||
|  | ||||
|  | @ -4437,7 +4453,8 @@ static void loosen_unused_packed_objects(void) | |||
| 			if (!packlist_find(&to_pack, &oid) && | ||||
| 			    !has_sha1_pack_kept_or_nonlocal(&oid) && | ||||
| 			    !loosened_object_can_be_discarded(&oid, p->mtime)) { | ||||
| 				if (force_object_loose(&oid, p->mtime)) | ||||
| 				if (force_object_loose(the_repository->objects->sources, | ||||
| 						       &oid, p->mtime)) | ||||
| 					die(_("unable to force loose object")); | ||||
| 				loosened_objects_nr++; | ||||
| 			} | ||||
|  |  | |||
|  | @ -198,7 +198,7 @@ int cmd_prune(int argc, | |||
| 		revs.exclude_promisor_objects = 1; | ||||
| 	} | ||||
|  | ||||
| 	for_each_loose_file_in_objdir(repo_get_object_directory(repo), | ||||
| 	for_each_loose_file_in_source(repo->objects->sources, | ||||
| 				      prune_object, prune_cruft, prune_subdir, &revs); | ||||
|  | ||||
| 	prune_packed_objects(show_only ? PRUNE_PACKED_DRY_RUN : 0); | ||||
|  |  | |||
|  | @ -760,8 +760,8 @@ static void prepare_push_cert_sha1(struct child_process *proc) | |||
| 		int bogs /* beginning_of_gpg_sig */; | ||||
|  | ||||
| 		already_done = 1; | ||||
| 		if (write_object_file(push_cert.buf, push_cert.len, OBJ_BLOB, | ||||
| 				      &push_cert_oid)) | ||||
| 		if (odb_write_object(the_repository->objects, push_cert.buf, | ||||
| 				     push_cert.len, OBJ_BLOB, &push_cert_oid)) | ||||
| 			oidclr(&push_cert_oid, the_repository->hash_algo); | ||||
|  | ||||
| 		memset(&sigcheck, '\0', sizeof(sigcheck)); | ||||
|  |  | |||
|  | @ -489,7 +489,8 @@ static int create_graft(int argc, const char **argv, int force, int gentle) | |||
| 		return -1; | ||||
| 	} | ||||
|  | ||||
| 	if (write_object_file(buf.buf, buf.len, OBJ_COMMIT, &new_oid)) { | ||||
| 	if (odb_write_object(the_repository->objects, buf.buf, | ||||
| 			     buf.len, OBJ_COMMIT, &new_oid)) { | ||||
| 		strbuf_release(&buf); | ||||
| 		return error(_("could not write replacement commit for: '%s'"), | ||||
| 			     old_ref); | ||||
|  |  | |||
|  | @ -271,8 +271,8 @@ static int build_tag_object(struct strbuf *buf, int sign, struct object_id *resu | |||
| 	struct object_id *compat_oid = NULL, compat_oid_buf; | ||||
| 	if (sign && do_sign(buf, &compat_oid, &compat_oid_buf) < 0) | ||||
| 		return error(_("unable to sign the tag")); | ||||
| 	if (write_object_file_flags(buf->buf, buf->len, OBJ_TAG, result, | ||||
| 				    compat_oid, 0) < 0) | ||||
| 	if (odb_write_object_ext(the_repository->objects, buf->buf, | ||||
| 				 buf->len, OBJ_TAG, result, compat_oid, 0) < 0) | ||||
| 		return error(_("unable to write tag file")); | ||||
| 	return 0; | ||||
| } | ||||
|  |  | |||
|  | @ -204,7 +204,7 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf) | |||
| { | ||||
| 	struct object_id oid; | ||||
|  | ||||
| 	if (write_object_file(obj_buf->buffer, obj_buf->size, | ||||
| 	if (odb_write_object(the_repository->objects, obj_buf->buffer, obj_buf->size, | ||||
| 			     obj->type, &oid) < 0) | ||||
| 		die("failed to write object %s", oid_to_hex(&obj->oid)); | ||||
| 	obj->flags |= FLAG_WRITTEN; | ||||
|  | @ -272,7 +272,7 @@ static void write_object(unsigned nr, enum object_type type, | |||
| 			 void *buf, unsigned long size) | ||||
| { | ||||
| 	if (!strict) { | ||||
| 		if (write_object_file(buf, size, type, | ||||
| 		if (odb_write_object(the_repository->objects, buf, size, type, | ||||
| 				     &obj_list[nr].oid) < 0) | ||||
| 			die("failed to write object"); | ||||
| 		added_object(nr, type, buf, size); | ||||
|  | @ -280,7 +280,7 @@ static void write_object(unsigned nr, enum object_type type, | |||
| 		obj_list[nr].obj = NULL; | ||||
| 	} else if (type == OBJ_BLOB) { | ||||
| 		struct blob *blob; | ||||
| 		if (write_object_file(buf, size, type, | ||||
| 		if (odb_write_object(the_repository->objects, buf, size, type, | ||||
| 				     &obj_list[nr].oid) < 0) | ||||
| 			die("failed to write object"); | ||||
| 		added_object(nr, type, buf, size); | ||||
|  | @ -403,7 +403,8 @@ static void stream_blob(unsigned long size, unsigned nr) | |||
| 	data.zstream = &zstream; | ||||
| 	git_inflate_init(&zstream); | ||||
|  | ||||
| 	if (stream_loose_object(&in_stream, size, &info->oid)) | ||||
| 	if (stream_loose_object(the_repository->objects->sources, | ||||
| 				&in_stream, size, &info->oid)) | ||||
| 		die(_("failed to write object in stream")); | ||||
|  | ||||
| 	if (data.status != Z_STREAM_END) | ||||
|  |  | |||
|  | @ -46,7 +46,7 @@ static void finish_tmp_packfile(struct strbuf *basename, | |||
| 	stage_tmp_packfiles(the_repository, basename, pack_tmp_name, | ||||
| 			    written_list, nr_written, NULL, pack_idx_opts, hash, | ||||
| 			    &idx_tmp_name); | ||||
| 	rename_tmp_packfile_idx(basename, &idx_tmp_name); | ||||
| 	rename_tmp_packfile_idx(the_repository, basename, &idx_tmp_name); | ||||
|  | ||||
| 	free(idx_tmp_name); | ||||
| } | ||||
|  |  | |||
|  | @ -456,9 +456,8 @@ static int update_one(struct cache_tree *it, | |||
| 	} else if (dryrun) { | ||||
| 		hash_object_file(the_hash_algo, buffer.buf, buffer.len, | ||||
| 				 OBJ_TREE, &it->oid); | ||||
| 	} else if (write_object_file_flags(buffer.buf, buffer.len, OBJ_TREE, | ||||
| 					   &it->oid, NULL, flags & WRITE_TREE_SILENT | ||||
| 					   ? WRITE_OBJECT_FILE_SILENT : 0)) { | ||||
| 	} else if (odb_write_object_ext(the_repository->objects, buffer.buf, buffer.len, OBJ_TREE, | ||||
| 					&it->oid, NULL, flags & WRITE_TREE_SILENT ? WRITE_OBJECT_SILENT : 0)) { | ||||
| 		strbuf_release(&buffer); | ||||
| 		return -1; | ||||
| 	} | ||||
|  |  | |||
							
								
								
									
										4
									
								
								commit.c
								
								
								
								
							
							
						
						
									
										4
									
								
								commit.c
								
								
								
								
							|  | @ -1805,8 +1805,8 @@ int commit_tree_extended(const char *msg, size_t msg_len, | |||
| 		compat_oid = &compat_oid_buf; | ||||
| 	} | ||||
|  | ||||
| 	result = write_object_file_flags(buffer.buf, buffer.len, OBJ_COMMIT, | ||||
| 					 ret, compat_oid, 0); | ||||
| 	result = odb_write_object_ext(the_repository->objects, buffer.buf, buffer.len, | ||||
| 				      OBJ_COMMIT, ret, compat_oid, 0); | ||||
| out: | ||||
| 	free(parent_buf); | ||||
| 	strbuf_release(&buffer); | ||||
|  |  | |||
							
								
								
									
										4
									
								
								http.c
								
								
								
								
							
							
						
						
									
										4
									
								
								http.c
								
								
								
								
							|  | @ -2332,7 +2332,7 @@ int http_get_file(const char *url, const char *filename, | |||
| 	ret = http_request_reauth(url, result, HTTP_REQUEST_FILE, options); | ||||
| 	fclose(result); | ||||
|  | ||||
| 	if (ret == HTTP_OK && finalize_object_file(tmpfile.buf, filename)) | ||||
| 	if (ret == HTTP_OK && finalize_object_file(the_repository, tmpfile.buf, filename)) | ||||
| 		ret = HTTP_ERROR; | ||||
| cleanup: | ||||
| 	strbuf_release(&tmpfile); | ||||
|  | @ -2816,7 +2816,7 @@ int finish_http_object_request(struct http_object_request *freq) | |||
| 		return -1; | ||||
| 	} | ||||
| 	odb_loose_path(the_repository->objects->sources, &filename, &freq->oid); | ||||
| 	freq->rename = finalize_object_file(freq->tmpfile.buf, filename.buf); | ||||
| 	freq->rename = finalize_object_file(the_repository, freq->tmpfile.buf, filename.buf); | ||||
| 	strbuf_release(&filename); | ||||
|  | ||||
| 	return freq->rename; | ||||
|  |  | |||
							
								
								
									
										16
									
								
								loose.c
								
								
								
								
							
							
						
						
									
										16
									
								
								loose.c
								
								
								
								
							|  | @ -166,7 +166,8 @@ errout: | |||
| 	return -1; | ||||
| } | ||||
|  | ||||
| static int write_one_object(struct repository *repo, const struct object_id *oid, | ||||
| static int write_one_object(struct odb_source *source, | ||||
| 			    const struct object_id *oid, | ||||
| 			    const struct object_id *compat_oid) | ||||
| { | ||||
| 	struct lock_file lock; | ||||
|  | @ -174,7 +175,7 @@ static int write_one_object(struct repository *repo, const struct object_id *oid | |||
| 	struct stat st; | ||||
| 	struct strbuf buf = STRBUF_INIT, path = STRBUF_INIT; | ||||
|  | ||||
| 	repo_common_path_replace(repo, &path, "objects/loose-object-idx"); | ||||
| 	strbuf_addf(&path, "%s/loose-object-idx", source->path); | ||||
| 	hold_lock_file_for_update_timeout(&lock, path.buf, LOCK_DIE_ON_ERROR, -1); | ||||
|  | ||||
| 	fd = open(path.buf, O_WRONLY | O_CREAT | O_APPEND, 0666); | ||||
|  | @ -190,7 +191,7 @@ static int write_one_object(struct repository *repo, const struct object_id *oid | |||
| 		goto errout; | ||||
| 	if (close(fd)) | ||||
| 		goto errout; | ||||
| 	adjust_shared_perm(repo, path.buf); | ||||
| 	adjust_shared_perm(source->odb->repo, path.buf); | ||||
| 	rollback_lock_file(&lock); | ||||
| 	strbuf_release(&buf); | ||||
| 	strbuf_release(&path); | ||||
|  | @ -204,17 +205,18 @@ errout: | |||
| 	return -1; | ||||
| } | ||||
|  | ||||
| int repo_add_loose_object_map(struct repository *repo, const struct object_id *oid, | ||||
| int repo_add_loose_object_map(struct odb_source *source, | ||||
| 			      const struct object_id *oid, | ||||
| 			      const struct object_id *compat_oid) | ||||
| { | ||||
| 	int inserted = 0; | ||||
|  | ||||
| 	if (!should_use_loose_object_map(repo)) | ||||
| 	if (!should_use_loose_object_map(source->odb->repo)) | ||||
| 		return 0; | ||||
|  | ||||
| 	inserted = insert_loose_map(repo->objects->sources, oid, compat_oid); | ||||
| 	inserted = insert_loose_map(source, oid, compat_oid); | ||||
| 	if (inserted) | ||||
| 		return write_one_object(repo, oid, compat_oid); | ||||
| 		return write_one_object(source, oid, compat_oid); | ||||
| 	return 0; | ||||
| } | ||||
|  | ||||
|  |  | |||
							
								
								
									
										4
									
								
								loose.h
								
								
								
								
							
							
						
						
									
										4
									
								
								loose.h
								
								
								
								
							|  | @ -4,6 +4,7 @@ | |||
| #include "khash.h" | ||||
|  | ||||
| struct repository; | ||||
| struct odb_source; | ||||
|  | ||||
| struct loose_object_map { | ||||
| 	kh_oid_map_t *to_compat; | ||||
|  | @ -16,7 +17,8 @@ int repo_loose_object_map_oid(struct repository *repo, | |||
| 			      const struct object_id *src, | ||||
| 			      const struct git_hash_algo *dest_algo, | ||||
| 			      struct object_id *dest); | ||||
| int repo_add_loose_object_map(struct repository *repo, const struct object_id *oid, | ||||
| int repo_add_loose_object_map(struct odb_source *source, | ||||
| 			      const struct object_id *oid, | ||||
| 			      const struct object_id *compat_oid); | ||||
| int repo_read_loose_object_map(struct repository *repo); | ||||
| int repo_write_loose_object_map(struct repository *repo); | ||||
|  |  | |||
|  | @ -246,7 +246,7 @@ static int splice_tree(struct repository *r, | |||
| 		rewrite_with = oid2; | ||||
| 	} | ||||
| 	hashcpy(rewrite_here, rewrite_with->hash, r->hash_algo); | ||||
| 	status = write_object_file(buf, sz, OBJ_TREE, result); | ||||
| 	status = odb_write_object(r->objects, buf, sz, OBJ_TREE, result); | ||||
| 	free(buf); | ||||
| 	return status; | ||||
| } | ||||
|  |  | |||
|  | @ -2216,7 +2216,7 @@ static int handle_content_merge(struct merge_options *opt, | |||
| 		} | ||||
|  | ||||
| 		if (!ret && record_object && | ||||
| 		    write_object_file(result_buf.ptr, result_buf.size, | ||||
| 		    odb_write_object(the_repository->objects, result_buf.ptr, result_buf.size, | ||||
| 				     OBJ_BLOB, &result->oid)) { | ||||
| 			path_msg(opt, ERROR_OBJECT_WRITE_FAILED, 0, | ||||
| 				 pathnames[0], pathnames[1], pathnames[2], NULL, | ||||
|  | @ -3772,7 +3772,8 @@ static int write_tree(struct object_id *result_oid, | |||
| 	} | ||||
|  | ||||
| 	/* Write this object file out, and record in result_oid */ | ||||
| 	if (write_object_file(buf.buf, buf.len, OBJ_TREE, result_oid)) | ||||
| 	if (odb_write_object(the_repository->objects, buf.buf, | ||||
| 			     buf.len, OBJ_TREE, result_oid)) | ||||
| 		ret = -1; | ||||
| 	strbuf_release(&buf); | ||||
| 	return ret; | ||||
|  |  | |||
|  | @ -667,7 +667,7 @@ static void write_midx_reverse_index(struct write_midx_context *ctx, | |||
| 	tmp_file = write_rev_file_order(ctx->repo, NULL, ctx->pack_order, | ||||
| 					ctx->entries_nr, midx_hash, WRITE_REV); | ||||
|  | ||||
| 	if (finalize_object_file(tmp_file, buf.buf)) | ||||
| 	if (finalize_object_file(ctx->repo, tmp_file, buf.buf)) | ||||
| 		die(_("cannot store reverse index file")); | ||||
|  | ||||
| 	strbuf_release(&buf); | ||||
|  |  | |||
|  | @ -98,7 +98,8 @@ int notes_cache_put(struct notes_cache *c, struct object_id *key_oid, | |||
| { | ||||
| 	struct object_id value_oid; | ||||
|  | ||||
| 	if (write_object_file(data, size, OBJ_BLOB, &value_oid) < 0) | ||||
| 	if (odb_write_object(the_repository->objects, data, | ||||
| 			     size, OBJ_BLOB, &value_oid) < 0) | ||||
| 		return -1; | ||||
| 	return add_note(&c->tree, key_oid, &value_oid, NULL); | ||||
| } | ||||
|  |  | |||
							
								
								
									
										12
									
								
								notes.c
								
								
								
								
							
							
						
						
									
										12
									
								
								notes.c
								
								
								
								
							|  | @ -682,7 +682,8 @@ static int tree_write_stack_finish_subtree(struct tree_write_stack *tws) | |||
| 		ret = tree_write_stack_finish_subtree(n); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 		ret = write_object_file(n->buf.buf, n->buf.len, OBJ_TREE, &s); | ||||
| 		ret = odb_write_object(the_repository->objects, n->buf.buf, | ||||
| 				       n->buf.len, OBJ_TREE, &s); | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 		strbuf_release(&n->buf); | ||||
|  | @ -847,7 +848,8 @@ int combine_notes_concatenate(struct object_id *cur_oid, | |||
| 	free(new_msg); | ||||
|  | ||||
| 	/* create a new blob object from buf */ | ||||
| 	ret = write_object_file(buf, buf_len, OBJ_BLOB, cur_oid); | ||||
| 	ret = odb_write_object(the_repository->objects, buf, | ||||
| 			       buf_len, OBJ_BLOB, cur_oid); | ||||
| 	free(buf); | ||||
| 	return ret; | ||||
| } | ||||
|  | @ -927,7 +929,8 @@ int combine_notes_cat_sort_uniq(struct object_id *cur_oid, | |||
| 				 string_list_join_lines_helper, &buf)) | ||||
| 		goto out; | ||||
|  | ||||
| 	ret = write_object_file(buf.buf, buf.len, OBJ_BLOB, cur_oid); | ||||
| 	ret = odb_write_object(the_repository->objects, buf.buf, | ||||
| 			       buf.len, OBJ_BLOB, cur_oid); | ||||
|  | ||||
| out: | ||||
| 	strbuf_release(&buf); | ||||
|  | @ -1215,7 +1218,8 @@ int write_notes_tree(struct notes_tree *t, struct object_id *result) | |||
| 	ret = for_each_note(t, flags, write_each_note, &cb_data) || | ||||
| 	      write_each_non_note_until(NULL, &cb_data) || | ||||
| 	      tree_write_stack_finish_subtree(&root) || | ||||
| 	      write_object_file(root.buf.buf, root.buf.len, OBJ_TREE, result); | ||||
| 	      odb_write_object(the_repository->objects, root.buf.buf, | ||||
| 			       root.buf.len, OBJ_TREE, result); | ||||
| 	strbuf_release(&root.buf); | ||||
| 	return ret; | ||||
| } | ||||
|  |  | |||
							
								
								
									
										284
									
								
								object-file.c
								
								
								
								
							
							
						
						
									
										284
									
								
								object-file.c
								
								
								
								
							|  | @ -8,7 +8,6 @@ | |||
|  */ | ||||
|  | ||||
| #define USE_THE_REPOSITORY_VARIABLE | ||||
| #define DISABLE_SIGN_COMPARE_WARNINGS | ||||
|  | ||||
| #include "git-compat-util.h" | ||||
| #include "bulk-checkin.h" | ||||
|  | @ -26,6 +25,7 @@ | |||
| #include "pack.h" | ||||
| #include "packfile.h" | ||||
| #include "path.h" | ||||
| #include "read-cache-ll.h" | ||||
| #include "setup.h" | ||||
| #include "streaming.h" | ||||
|  | ||||
|  | @ -42,10 +42,11 @@ static int get_conv_flags(unsigned flags) | |||
| 		return 0; | ||||
| } | ||||
|  | ||||
| static void fill_loose_path(struct strbuf *buf, const struct object_id *oid) | ||||
| static void fill_loose_path(struct strbuf *buf, | ||||
| 			    const struct object_id *oid, | ||||
| 			    const struct git_hash_algo *algop) | ||||
| { | ||||
| 	int i; | ||||
| 	for (i = 0; i < the_hash_algo->rawsz; i++) { | ||||
| 	for (size_t i = 0; i < algop->rawsz; i++) { | ||||
| 		static char hex[] = "0123456789abcdef"; | ||||
| 		unsigned int val = oid->hash[i]; | ||||
| 		strbuf_addch(buf, hex[val >> 4]); | ||||
|  | @ -62,7 +63,7 @@ const char *odb_loose_path(struct odb_source *source, | |||
| 	strbuf_reset(buf); | ||||
| 	strbuf_addstr(buf, source->path); | ||||
| 	strbuf_addch(buf, '/'); | ||||
| 	fill_loose_path(buf, oid); | ||||
| 	fill_loose_path(buf, oid, source->odb->repo->hash_algo); | ||||
| 	return buf->buf; | ||||
| } | ||||
|  | ||||
|  | @ -88,7 +89,7 @@ int check_and_freshen_file(const char *fn, int freshen) | |||
| 	return 1; | ||||
| } | ||||
|  | ||||
| static int check_and_freshen_odb(struct odb_source *source, | ||||
| static int check_and_freshen_source(struct odb_source *source, | ||||
| 				    const struct object_id *oid, | ||||
| 				    int freshen) | ||||
| { | ||||
|  | @ -97,37 +98,10 @@ static int check_and_freshen_odb(struct odb_source *source, | |||
| 	return check_and_freshen_file(path.buf, freshen); | ||||
| } | ||||
|  | ||||
| static int check_and_freshen_local(const struct object_id *oid, int freshen) | ||||
| int has_loose_object(struct odb_source *source, | ||||
| 		     const struct object_id *oid) | ||||
| { | ||||
| 	return check_and_freshen_odb(the_repository->objects->sources, oid, freshen); | ||||
| } | ||||
|  | ||||
| static int check_and_freshen_nonlocal(const struct object_id *oid, int freshen) | ||||
| { | ||||
| 	struct odb_source *source; | ||||
|  | ||||
| 	odb_prepare_alternates(the_repository->objects); | ||||
| 	for (source = the_repository->objects->sources->next; source; source = source->next) { | ||||
| 		if (check_and_freshen_odb(source, oid, freshen)) | ||||
| 			return 1; | ||||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
|  | ||||
| static int check_and_freshen(const struct object_id *oid, int freshen) | ||||
| { | ||||
| 	return check_and_freshen_local(oid, freshen) || | ||||
| 	       check_and_freshen_nonlocal(oid, freshen); | ||||
| } | ||||
|  | ||||
| int has_loose_object_nonlocal(const struct object_id *oid) | ||||
| { | ||||
| 	return check_and_freshen_nonlocal(oid, 0); | ||||
| } | ||||
|  | ||||
| int has_loose_object(const struct object_id *oid) | ||||
| { | ||||
| 	return check_and_freshen(oid, 0); | ||||
| 	return check_and_freshen_source(source, oid, 0); | ||||
| } | ||||
|  | ||||
| int format_object_header(char *str, size_t size, enum object_type type, | ||||
|  | @ -327,9 +301,8 @@ static void *unpack_loose_rest(git_zstream *stream, | |||
| 			       void *buffer, unsigned long size, | ||||
| 			       const struct object_id *oid) | ||||
| { | ||||
| 	int bytes = strlen(buffer) + 1; | ||||
| 	size_t bytes = strlen(buffer) + 1, n; | ||||
| 	unsigned char *buf = xmallocz(size); | ||||
| 	unsigned long n; | ||||
| 	int status = Z_OK; | ||||
|  | ||||
| 	n = stream->total_out - bytes; | ||||
|  | @ -448,7 +421,7 @@ int loose_object_info(struct repository *r, | |||
| 	enum object_type type_scratch; | ||||
|  | ||||
| 	if (oi->delta_base_oid) | ||||
| 		oidclr(oi->delta_base_oid, the_repository->hash_algo); | ||||
| 		oidclr(oi->delta_base_oid, r->hash_algo); | ||||
|  | ||||
| 	/* | ||||
| 	 * If we don't care about type or size, then we don't | ||||
|  | @ -596,7 +569,7 @@ static int check_collision(const char *source, const char *dest) | |||
| 			goto out; | ||||
| 		} | ||||
|  | ||||
| 		if (sz_a < sizeof(buf_source)) | ||||
| 		if ((size_t) sz_a < sizeof(buf_source)) | ||||
| 			break; | ||||
| 	} | ||||
|  | ||||
|  | @ -611,12 +584,14 @@ out: | |||
| /* | ||||
|  * Move the just written object into its final resting place. | ||||
|  */ | ||||
| int finalize_object_file(const char *tmpfile, const char *filename) | ||||
| int finalize_object_file(struct repository *repo, | ||||
| 			 const char *tmpfile, const char *filename) | ||||
| { | ||||
| 	return finalize_object_file_flags(tmpfile, filename, 0); | ||||
| 	return finalize_object_file_flags(repo, tmpfile, filename, 0); | ||||
| } | ||||
|  | ||||
| int finalize_object_file_flags(const char *tmpfile, const char *filename, | ||||
| int finalize_object_file_flags(struct repository *repo, | ||||
| 			       const char *tmpfile, const char *filename, | ||||
| 			       enum finalize_object_file_flags flags) | ||||
| { | ||||
| 	unsigned retries = 0; | ||||
|  | @ -676,7 +651,7 @@ retry: | |||
| 	} | ||||
|  | ||||
| out: | ||||
| 	if (adjust_shared_perm(the_repository, filename)) | ||||
| 	if (adjust_shared_perm(repo, filename)) | ||||
| 		return error(_("unable to set permission to '%s'"), filename); | ||||
| 	return 0; | ||||
| } | ||||
|  | @ -692,9 +667,10 @@ void hash_object_file(const struct git_hash_algo *algo, const void *buf, | |||
| } | ||||
|  | ||||
| /* Finalize a file on disk, and close it. */ | ||||
| static void close_loose_object(int fd, const char *filename) | ||||
| static void close_loose_object(struct odb_source *source, | ||||
| 			       int fd, const char *filename) | ||||
| { | ||||
| 	if (the_repository->objects->sources->will_destroy) | ||||
| 	if (source->will_destroy) | ||||
| 		goto out; | ||||
|  | ||||
| 	if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT)) | ||||
|  | @ -726,7 +702,8 @@ static inline int directory_size(const char *filename) | |||
|  * We want to avoid cross-directory filename renames, because those | ||||
|  * can have problems on various filesystems (FAT, NFS, Coda). | ||||
|  */ | ||||
| static int create_tmpfile(struct strbuf *tmp, const char *filename) | ||||
| static int create_tmpfile(struct repository *repo, | ||||
| 			  struct strbuf *tmp, const char *filename) | ||||
| { | ||||
| 	int fd, dirlen = directory_size(filename); | ||||
|  | ||||
|  | @ -745,7 +722,7 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename) | |||
| 		strbuf_add(tmp, filename, dirlen - 1); | ||||
| 		if (mkdir(tmp->buf, 0777) && errno != EEXIST) | ||||
| 			return -1; | ||||
| 		if (adjust_shared_perm(the_repository, tmp->buf)) | ||||
| 		if (adjust_shared_perm(repo, tmp->buf)) | ||||
| 			return -1; | ||||
|  | ||||
| 		/* Try again */ | ||||
|  | @ -766,26 +743,26 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename) | |||
|  * Returns a "fd", which should later be provided to | ||||
|  * end_loose_object_common(). | ||||
|  */ | ||||
| static int start_loose_object_common(struct strbuf *tmp_file, | ||||
| static int start_loose_object_common(struct odb_source *source, | ||||
| 				     struct strbuf *tmp_file, | ||||
| 				     const char *filename, unsigned flags, | ||||
| 				     git_zstream *stream, | ||||
| 				     unsigned char *buf, size_t buflen, | ||||
| 				     struct git_hash_ctx *c, struct git_hash_ctx *compat_c, | ||||
| 				     char *hdr, int hdrlen) | ||||
| { | ||||
| 	struct repository *repo = the_repository; | ||||
| 	const struct git_hash_algo *algo = repo->hash_algo; | ||||
| 	const struct git_hash_algo *compat = repo->compat_hash_algo; | ||||
| 	const struct git_hash_algo *algo = source->odb->repo->hash_algo; | ||||
| 	const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo; | ||||
| 	int fd; | ||||
|  | ||||
| 	fd = create_tmpfile(tmp_file, filename); | ||||
| 	fd = create_tmpfile(source->odb->repo, tmp_file, filename); | ||||
| 	if (fd < 0) { | ||||
| 		if (flags & WRITE_OBJECT_FILE_SILENT) | ||||
| 		if (flags & WRITE_OBJECT_SILENT) | ||||
| 			return -1; | ||||
| 		else if (errno == EACCES) | ||||
| 			return error(_("insufficient permission for adding " | ||||
| 				       "an object to repository database %s"), | ||||
| 				     repo_get_object_directory(the_repository)); | ||||
| 				     source->path); | ||||
| 		else | ||||
| 			return error_errno( | ||||
| 				_("unable to create temporary file")); | ||||
|  | @ -815,14 +792,14 @@ static int start_loose_object_common(struct strbuf *tmp_file, | |||
|  * Common steps for the inner git_deflate() loop for writing loose | ||||
|  * objects. Returns what git_deflate() returns. | ||||
|  */ | ||||
| static int write_loose_object_common(struct git_hash_ctx *c, struct git_hash_ctx *compat_c, | ||||
| static int write_loose_object_common(struct odb_source *source, | ||||
| 				     struct git_hash_ctx *c, struct git_hash_ctx *compat_c, | ||||
| 				     git_zstream *stream, const int flush, | ||||
| 				     unsigned char *in0, const int fd, | ||||
| 				     unsigned char *compressed, | ||||
| 				     const size_t compressed_len) | ||||
| { | ||||
| 	struct repository *repo = the_repository; | ||||
| 	const struct git_hash_algo *compat = repo->compat_hash_algo; | ||||
| 	const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo; | ||||
| 	int ret; | ||||
|  | ||||
| 	ret = git_deflate(stream, flush ? Z_FINISH : 0); | ||||
|  | @ -843,12 +820,12 @@ static int write_loose_object_common(struct git_hash_ctx *c, struct git_hash_ctx | |||
|  * - End the compression of zlib stream. | ||||
|  * - Get the calculated oid to "oid". | ||||
|  */ | ||||
| static int end_loose_object_common(struct git_hash_ctx *c, struct git_hash_ctx *compat_c, | ||||
| static int end_loose_object_common(struct odb_source *source, | ||||
| 				   struct git_hash_ctx *c, struct git_hash_ctx *compat_c, | ||||
| 				   git_zstream *stream, struct object_id *oid, | ||||
| 				   struct object_id *compat_oid) | ||||
| { | ||||
| 	struct repository *repo = the_repository; | ||||
| 	const struct git_hash_algo *compat = repo->compat_hash_algo; | ||||
| 	const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo; | ||||
| 	int ret; | ||||
|  | ||||
| 	ret = git_deflate_end_gently(stream); | ||||
|  | @ -861,7 +838,8 @@ static int end_loose_object_common(struct git_hash_ctx *c, struct git_hash_ctx * | |||
| 	return Z_OK; | ||||
| } | ||||
|  | ||||
| static int write_loose_object(const struct object_id *oid, char *hdr, | ||||
| static int write_loose_object(struct odb_source *source, | ||||
| 			      const struct object_id *oid, char *hdr, | ||||
| 			      int hdrlen, const void *buf, unsigned long len, | ||||
| 			      time_t mtime, unsigned flags) | ||||
| { | ||||
|  | @ -876,9 +854,9 @@ static int write_loose_object(const struct object_id *oid, char *hdr, | |||
| 	if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT)) | ||||
| 		prepare_loose_object_bulk_checkin(); | ||||
|  | ||||
| 	odb_loose_path(the_repository->objects->sources, &filename, oid); | ||||
| 	odb_loose_path(source, &filename, oid); | ||||
|  | ||||
| 	fd = start_loose_object_common(&tmp_file, filename.buf, flags, | ||||
| 	fd = start_loose_object_common(source, &tmp_file, filename.buf, flags, | ||||
| 				       &stream, compressed, sizeof(compressed), | ||||
| 				       &c, NULL, hdr, hdrlen); | ||||
| 	if (fd < 0) | ||||
|  | @ -890,14 +868,14 @@ static int write_loose_object(const struct object_id *oid, char *hdr, | |||
| 	do { | ||||
| 		unsigned char *in0 = stream.next_in; | ||||
|  | ||||
| 		ret = write_loose_object_common(&c, NULL, &stream, 1, in0, fd, | ||||
| 		ret = write_loose_object_common(source, &c, NULL, &stream, 1, in0, fd, | ||||
| 						compressed, sizeof(compressed)); | ||||
| 	} while (ret == Z_OK); | ||||
|  | ||||
| 	if (ret != Z_STREAM_END) | ||||
| 		die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid), | ||||
| 		    ret); | ||||
| 	ret = end_loose_object_common(&c, NULL, &stream, ¶no_oid, NULL); | ||||
| 	ret = end_loose_object_common(source, &c, NULL, &stream, ¶no_oid, NULL); | ||||
| 	if (ret != Z_OK) | ||||
| 		die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid), | ||||
| 		    ret); | ||||
|  | @ -905,30 +883,36 @@ static int write_loose_object(const struct object_id *oid, char *hdr, | |||
| 		die(_("confused by unstable object source data for %s"), | ||||
| 		    oid_to_hex(oid)); | ||||
|  | ||||
| 	close_loose_object(fd, tmp_file.buf); | ||||
| 	close_loose_object(source, fd, tmp_file.buf); | ||||
|  | ||||
| 	if (mtime) { | ||||
| 		struct utimbuf utb; | ||||
| 		utb.actime = mtime; | ||||
| 		utb.modtime = mtime; | ||||
| 		if (utime(tmp_file.buf, &utb) < 0 && | ||||
| 		    !(flags & WRITE_OBJECT_FILE_SILENT)) | ||||
| 		    !(flags & WRITE_OBJECT_SILENT)) | ||||
| 			warning_errno(_("failed utime() on %s"), tmp_file.buf); | ||||
| 	} | ||||
|  | ||||
| 	return finalize_object_file_flags(tmp_file.buf, filename.buf, | ||||
| 	return finalize_object_file_flags(source->odb->repo, tmp_file.buf, filename.buf, | ||||
| 					  FOF_SKIP_COLLISION_CHECK); | ||||
| } | ||||
|  | ||||
| static int freshen_loose_object(const struct object_id *oid) | ||||
| static int freshen_loose_object(struct object_database *odb, | ||||
| 				const struct object_id *oid) | ||||
| { | ||||
| 	return check_and_freshen(oid, 1); | ||||
| 	odb_prepare_alternates(odb); | ||||
| 	for (struct odb_source *source = odb->sources; source; source = source->next) | ||||
| 		if (check_and_freshen_source(source, oid, 1)) | ||||
| 			return 1; | ||||
| 	return 0; | ||||
| } | ||||
|  | ||||
| static int freshen_packed_object(const struct object_id *oid) | ||||
| static int freshen_packed_object(struct object_database *odb, | ||||
| 				 const struct object_id *oid) | ||||
| { | ||||
| 	struct pack_entry e; | ||||
| 	if (!find_pack_entry(the_repository, oid, &e)) | ||||
| 	if (!find_pack_entry(odb->repo, oid, &e)) | ||||
| 		return 0; | ||||
| 	if (e.p->is_cruft) | ||||
| 		return 0; | ||||
|  | @ -940,10 +924,11 @@ static int freshen_packed_object(const struct object_id *oid) | |||
| 	return 1; | ||||
| } | ||||
|  | ||||
| int stream_loose_object(struct input_stream *in_stream, size_t len, | ||||
| int stream_loose_object(struct odb_source *source, | ||||
| 			struct input_stream *in_stream, size_t len, | ||||
| 			struct object_id *oid) | ||||
| { | ||||
| 	const struct git_hash_algo *compat = the_repository->compat_hash_algo; | ||||
| 	const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo; | ||||
| 	struct object_id compat_oid; | ||||
| 	int fd, ret, err = 0, flush = 0; | ||||
| 	unsigned char compressed[4096]; | ||||
|  | @ -959,7 +944,7 @@ int stream_loose_object(struct input_stream *in_stream, size_t len, | |||
| 		prepare_loose_object_bulk_checkin(); | ||||
|  | ||||
| 	/* Since oid is not determined, save tmp file to odb path. */ | ||||
| 	strbuf_addf(&filename, "%s/", repo_get_object_directory(the_repository)); | ||||
| 	strbuf_addf(&filename, "%s/", source->path); | ||||
| 	hdrlen = format_object_header(hdr, sizeof(hdr), OBJ_BLOB, len); | ||||
|  | ||||
| 	/* | ||||
|  | @ -970,7 +955,7 @@ int stream_loose_object(struct input_stream *in_stream, size_t len, | |||
| 	 *  - Setup zlib stream for compression. | ||||
| 	 *  - Start to feed header to zlib stream. | ||||
| 	 */ | ||||
| 	fd = start_loose_object_common(&tmp_file, filename.buf, 0, | ||||
| 	fd = start_loose_object_common(source, &tmp_file, filename.buf, 0, | ||||
| 				       &stream, compressed, sizeof(compressed), | ||||
| 				       &c, &compat_c, hdr, hdrlen); | ||||
| 	if (fd < 0) { | ||||
|  | @ -990,7 +975,7 @@ int stream_loose_object(struct input_stream *in_stream, size_t len, | |||
| 			if (in_stream->is_finished) | ||||
| 				flush = 1; | ||||
| 		} | ||||
| 		ret = write_loose_object_common(&c, &compat_c, &stream, flush, in0, fd, | ||||
| 		ret = write_loose_object_common(source, &c, &compat_c, &stream, flush, in0, fd, | ||||
| 						compressed, sizeof(compressed)); | ||||
| 		/* | ||||
| 		 * Unlike write_loose_object(), we do not have the entire | ||||
|  | @ -1013,17 +998,18 @@ int stream_loose_object(struct input_stream *in_stream, size_t len, | |||
| 	 */ | ||||
| 	if (ret != Z_STREAM_END) | ||||
| 		die(_("unable to stream deflate new object (%d)"), ret); | ||||
| 	ret = end_loose_object_common(&c, &compat_c, &stream, oid, &compat_oid); | ||||
| 	ret = end_loose_object_common(source, &c, &compat_c, &stream, oid, &compat_oid); | ||||
| 	if (ret != Z_OK) | ||||
| 		die(_("deflateEnd on stream object failed (%d)"), ret); | ||||
| 	close_loose_object(fd, tmp_file.buf); | ||||
| 	close_loose_object(source, fd, tmp_file.buf); | ||||
|  | ||||
| 	if (freshen_packed_object(oid) || freshen_loose_object(oid)) { | ||||
| 	if (freshen_packed_object(source->odb, oid) || | ||||
| 	    freshen_loose_object(source->odb, oid)) { | ||||
| 		unlink_or_warn(tmp_file.buf); | ||||
| 		goto cleanup; | ||||
| 	} | ||||
|  | ||||
| 	odb_loose_path(the_repository->objects->sources, &filename, oid); | ||||
| 	odb_loose_path(source, &filename, oid); | ||||
|  | ||||
| 	/* We finally know the object path, and create the missing dir. */ | ||||
| 	dirlen = directory_size(filename.buf); | ||||
|  | @ -1031,7 +1017,7 @@ int stream_loose_object(struct input_stream *in_stream, size_t len, | |||
| 		struct strbuf dir = STRBUF_INIT; | ||||
| 		strbuf_add(&dir, filename.buf, dirlen); | ||||
|  | ||||
| 		if (safe_create_dir_in_gitdir(the_repository, dir.buf) && | ||||
| 		if (safe_create_dir_in_gitdir(source->odb->repo, dir.buf) && | ||||
| 		    errno != EEXIST) { | ||||
| 			err = error_errno(_("unable to create directory %s"), dir.buf); | ||||
| 			strbuf_release(&dir); | ||||
|  | @ -1040,23 +1026,23 @@ int stream_loose_object(struct input_stream *in_stream, size_t len, | |||
| 		strbuf_release(&dir); | ||||
| 	} | ||||
|  | ||||
| 	err = finalize_object_file_flags(tmp_file.buf, filename.buf, | ||||
| 	err = finalize_object_file_flags(source->odb->repo, tmp_file.buf, filename.buf, | ||||
| 					 FOF_SKIP_COLLISION_CHECK); | ||||
| 	if (!err && compat) | ||||
| 		err = repo_add_loose_object_map(the_repository, oid, &compat_oid); | ||||
| 		err = repo_add_loose_object_map(source, oid, &compat_oid); | ||||
| cleanup: | ||||
| 	strbuf_release(&tmp_file); | ||||
| 	strbuf_release(&filename); | ||||
| 	return err; | ||||
| } | ||||
|  | ||||
| int write_object_file_flags(const void *buf, unsigned long len, | ||||
| int write_object_file(struct odb_source *source, | ||||
| 		      const void *buf, unsigned long len, | ||||
| 		      enum object_type type, struct object_id *oid, | ||||
| 		      struct object_id *compat_oid_in, unsigned flags) | ||||
| { | ||||
| 	struct repository *repo = the_repository; | ||||
| 	const struct git_hash_algo *algo = repo->hash_algo; | ||||
| 	const struct git_hash_algo *compat = repo->compat_hash_algo; | ||||
| 	const struct git_hash_algo *algo = source->odb->repo->hash_algo; | ||||
| 	const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo; | ||||
| 	struct object_id compat_oid; | ||||
| 	char hdr[MAX_HEADER_LEN]; | ||||
| 	int hdrlen = sizeof(hdr); | ||||
|  | @ -1069,7 +1055,7 @@ int write_object_file_flags(const void *buf, unsigned long len, | |||
| 			hash_object_file(compat, buf, len, type, &compat_oid); | ||||
| 		else { | ||||
| 			struct strbuf converted = STRBUF_INIT; | ||||
| 			convert_object_file(the_repository, &converted, algo, compat, | ||||
| 			convert_object_file(source->odb->repo, &converted, algo, compat, | ||||
| 					    buf, len, type, 0); | ||||
| 			hash_object_file(compat, converted.buf, converted.len, | ||||
| 					 type, &compat_oid); | ||||
|  | @ -1081,19 +1067,20 @@ int write_object_file_flags(const void *buf, unsigned long len, | |||
| 	 * it out into .git/objects/??/?{38} file. | ||||
| 	 */ | ||||
| 	write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen); | ||||
| 	if (freshen_packed_object(oid) || freshen_loose_object(oid)) | ||||
| 	if (freshen_packed_object(source->odb, oid) || | ||||
| 	    freshen_loose_object(source->odb, oid)) | ||||
| 		return 0; | ||||
| 	if (write_loose_object(oid, hdr, hdrlen, buf, len, 0, flags)) | ||||
| 	if (write_loose_object(source, oid, hdr, hdrlen, buf, len, 0, flags)) | ||||
| 		return -1; | ||||
| 	if (compat) | ||||
| 		return repo_add_loose_object_map(repo, oid, &compat_oid); | ||||
| 		return repo_add_loose_object_map(source, oid, &compat_oid); | ||||
| 	return 0; | ||||
| } | ||||
|  | ||||
| int force_object_loose(const struct object_id *oid, time_t mtime) | ||||
| int force_object_loose(struct odb_source *source, | ||||
| 		       const struct object_id *oid, time_t mtime) | ||||
| { | ||||
| 	struct repository *repo = the_repository; | ||||
| 	const struct git_hash_algo *compat = repo->compat_hash_algo; | ||||
| 	const struct git_hash_algo *compat = source->odb->repo->compat_hash_algo; | ||||
| 	void *buf; | ||||
| 	unsigned long len; | ||||
| 	struct object_info oi = OBJECT_INFO_INIT; | ||||
|  | @ -1103,22 +1090,24 @@ int force_object_loose(const struct object_id *oid, time_t mtime) | |||
| 	int hdrlen; | ||||
| 	int ret; | ||||
|  | ||||
| 	if (has_loose_object(oid)) | ||||
| 	for (struct odb_source *s = source->odb->sources; s; s = s->next) | ||||
| 		if (has_loose_object(s, oid)) | ||||
| 			return 0; | ||||
|  | ||||
| 	oi.typep = &type; | ||||
| 	oi.sizep = &len; | ||||
| 	oi.contentp = &buf; | ||||
| 	if (odb_read_object_info_extended(the_repository->objects, oid, &oi, 0)) | ||||
| 	if (odb_read_object_info_extended(source->odb, oid, &oi, 0)) | ||||
| 		return error(_("cannot read object for %s"), oid_to_hex(oid)); | ||||
| 	if (compat) { | ||||
| 		if (repo_oid_to_algop(repo, oid, compat, &compat_oid)) | ||||
| 		if (repo_oid_to_algop(source->odb->repo, oid, compat, &compat_oid)) | ||||
| 			return error(_("cannot map object %s to %s"), | ||||
| 				     oid_to_hex(oid), compat->name); | ||||
| 	} | ||||
| 	hdrlen = format_object_header(hdr, sizeof(hdr), type, len); | ||||
| 	ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime, 0); | ||||
| 	ret = write_loose_object(source, oid, hdr, hdrlen, buf, len, mtime, 0); | ||||
| 	if (!ret && compat) | ||||
| 		ret = repo_add_loose_object_map(the_repository, oid, &compat_oid); | ||||
| 		ret = repo_add_loose_object_map(source, oid, &compat_oid); | ||||
| 	free(buf); | ||||
|  | ||||
| 	return ret; | ||||
|  | @ -1168,15 +1157,15 @@ static int index_mem(struct index_state *istate, | |||
|  | ||||
| 		opts.strict = 1; | ||||
| 		opts.error_func = hash_format_check_report; | ||||
| 		if (fsck_buffer(null_oid(the_hash_algo), type, buf, size, &opts)) | ||||
| 		if (fsck_buffer(null_oid(istate->repo->hash_algo), type, buf, size, &opts)) | ||||
| 			die(_("refusing to create malformed object")); | ||||
| 		fsck_finish(&opts); | ||||
| 	} | ||||
|  | ||||
| 	if (write_object) | ||||
| 		ret = write_object_file(buf, size, type, oid); | ||||
| 		ret = odb_write_object(istate->repo->objects, buf, size, type, oid); | ||||
| 	else | ||||
| 		hash_object_file(the_hash_algo, buf, size, type, oid); | ||||
| 		hash_object_file(istate->repo->hash_algo, buf, size, type, oid); | ||||
|  | ||||
| 	strbuf_release(&nbuf); | ||||
| 	return ret; | ||||
|  | @ -1199,10 +1188,10 @@ static int index_stream_convert_blob(struct index_state *istate, | |||
| 				 get_conv_flags(flags)); | ||||
|  | ||||
| 	if (write_object) | ||||
| 		ret = write_object_file(sbuf.buf, sbuf.len, OBJ_BLOB, | ||||
| 		ret = odb_write_object(istate->repo->objects, sbuf.buf, sbuf.len, OBJ_BLOB, | ||||
| 				       oid); | ||||
| 	else | ||||
| 		hash_object_file(the_hash_algo, sbuf.buf, sbuf.len, OBJ_BLOB, | ||||
| 		hash_object_file(istate->repo->hash_algo, sbuf.buf, sbuf.len, OBJ_BLOB, | ||||
| 				 oid); | ||||
| 	strbuf_release(&sbuf); | ||||
| 	return ret; | ||||
|  | @ -1240,7 +1229,7 @@ static int index_core(struct index_state *istate, | |||
| 		if (read_result < 0) | ||||
| 			ret = error_errno(_("read error while indexing %s"), | ||||
| 					  path ? path : "<unknown>"); | ||||
| 		else if (read_result != size) | ||||
| 		else if ((size_t) read_result != size) | ||||
| 			ret = error(_("short read while indexing %s"), | ||||
| 				    path ? path : "<unknown>"); | ||||
| 		else | ||||
|  | @ -1268,7 +1257,7 @@ int index_fd(struct index_state *istate, struct object_id *oid, | |||
| 		ret = index_stream_convert_blob(istate, oid, fd, path, flags); | ||||
| 	else if (!S_ISREG(st->st_mode)) | ||||
| 		ret = index_pipe(istate, oid, fd, type, path, flags); | ||||
| 	else if (st->st_size <= repo_settings_get_big_file_threshold(the_repository) || | ||||
| 	else if ((st->st_size >= 0 && (size_t) st->st_size <= repo_settings_get_big_file_threshold(istate->repo)) || | ||||
| 		 type != OBJ_BLOB || | ||||
| 		 (path && would_convert_to_git(istate, path))) | ||||
| 		ret = index_core(istate, oid, fd, xsize_t(st->st_size), | ||||
|  | @ -1300,14 +1289,14 @@ int index_path(struct index_state *istate, struct object_id *oid, | |||
| 		if (strbuf_readlink(&sb, path, st->st_size)) | ||||
| 			return error_errno("readlink(\"%s\")", path); | ||||
| 		if (!(flags & INDEX_WRITE_OBJECT)) | ||||
| 			hash_object_file(the_hash_algo, sb.buf, sb.len, | ||||
| 			hash_object_file(istate->repo->hash_algo, sb.buf, sb.len, | ||||
| 					 OBJ_BLOB, oid); | ||||
| 		else if (write_object_file(sb.buf, sb.len, OBJ_BLOB, oid)) | ||||
| 		else if (odb_write_object(istate->repo->objects, sb.buf, sb.len, OBJ_BLOB, oid)) | ||||
| 			rc = error(_("%s: failed to insert into database"), path); | ||||
| 		strbuf_release(&sb); | ||||
| 		break; | ||||
| 	case S_IFDIR: | ||||
| 		return repo_resolve_gitlink_ref(the_repository, path, "HEAD", oid); | ||||
| 		return repo_resolve_gitlink_ref(istate->repo, path, "HEAD", oid); | ||||
| 	default: | ||||
| 		return error(_("%s: unsupported file type"), path); | ||||
| 	} | ||||
|  | @ -1329,8 +1318,9 @@ int read_pack_header(int fd, struct pack_header *header) | |||
| 	return 0; | ||||
| } | ||||
|  | ||||
| int for_each_file_in_obj_subdir(unsigned int subdir_nr, | ||||
| static int for_each_file_in_obj_subdir(unsigned int subdir_nr, | ||||
| 				       struct strbuf *path, | ||||
| 				       const struct git_hash_algo *algop, | ||||
| 				       each_loose_object_fn obj_cb, | ||||
| 				       each_loose_cruft_fn cruft_cb, | ||||
| 				       each_loose_subdir_fn subdir_cb, | ||||
|  | @ -1367,12 +1357,12 @@ int for_each_file_in_obj_subdir(unsigned int subdir_nr, | |||
| 		namelen = strlen(de->d_name); | ||||
| 		strbuf_setlen(path, baselen); | ||||
| 		strbuf_add(path, de->d_name, namelen); | ||||
| 		if (namelen == the_hash_algo->hexsz - 2 && | ||||
| 		if (namelen == algop->hexsz - 2 && | ||||
| 		    !hex_to_bytes(oid.hash + 1, de->d_name, | ||||
| 				  the_hash_algo->rawsz - 1)) { | ||||
| 			oid_set_algo(&oid, the_hash_algo); | ||||
| 			memset(oid.hash + the_hash_algo->rawsz, 0, | ||||
| 			       GIT_MAX_RAWSZ - the_hash_algo->rawsz); | ||||
| 				  algop->rawsz - 1)) { | ||||
| 			oid_set_algo(&oid, algop); | ||||
| 			memset(oid.hash + algop->rawsz, 0, | ||||
| 			       GIT_MAX_RAWSZ - algop->rawsz); | ||||
| 			if (obj_cb) { | ||||
| 				r = obj_cb(&oid, path->buf, data); | ||||
| 				if (r) | ||||
|  | @ -1398,26 +1388,7 @@ int for_each_file_in_obj_subdir(unsigned int subdir_nr, | |||
| 	return r; | ||||
| } | ||||
|  | ||||
| int for_each_loose_file_in_objdir_buf(struct strbuf *path, | ||||
| 			    each_loose_object_fn obj_cb, | ||||
| 			    each_loose_cruft_fn cruft_cb, | ||||
| 			    each_loose_subdir_fn subdir_cb, | ||||
| 			    void *data) | ||||
| { | ||||
| 	int r = 0; | ||||
| 	int i; | ||||
|  | ||||
| 	for (i = 0; i < 256; i++) { | ||||
| 		r = for_each_file_in_obj_subdir(i, path, obj_cb, cruft_cb, | ||||
| 						subdir_cb, data); | ||||
| 		if (r) | ||||
| 			break; | ||||
| 	} | ||||
|  | ||||
| 	return r; | ||||
| } | ||||
|  | ||||
| int for_each_loose_file_in_objdir(const char *path, | ||||
| int for_each_loose_file_in_source(struct odb_source *source, | ||||
| 				  each_loose_object_fn obj_cb, | ||||
| 				  each_loose_cruft_fn cruft_cb, | ||||
| 				  each_loose_subdir_fn subdir_cb, | ||||
|  | @ -1426,22 +1397,27 @@ int for_each_loose_file_in_objdir(const char *path, | |||
| 	struct strbuf buf = STRBUF_INIT; | ||||
| 	int r; | ||||
|  | ||||
| 	strbuf_addstr(&buf, path); | ||||
| 	r = for_each_loose_file_in_objdir_buf(&buf, obj_cb, cruft_cb, | ||||
| 					      subdir_cb, data); | ||||
| 	strbuf_release(&buf); | ||||
| 	strbuf_addstr(&buf, source->path); | ||||
| 	for (int i = 0; i < 256; i++) { | ||||
| 		r = for_each_file_in_obj_subdir(i, &buf, source->odb->repo->hash_algo, | ||||
| 						obj_cb, cruft_cb, subdir_cb, data); | ||||
| 		if (r) | ||||
| 			break; | ||||
| 	} | ||||
|  | ||||
| 	strbuf_release(&buf); | ||||
| 	return r; | ||||
| } | ||||
|  | ||||
| int for_each_loose_object(each_loose_object_fn cb, void *data, | ||||
| int for_each_loose_object(struct object_database *odb, | ||||
| 			  each_loose_object_fn cb, void *data, | ||||
| 			  enum for_each_object_flags flags) | ||||
| { | ||||
| 	struct odb_source *source; | ||||
|  | ||||
| 	odb_prepare_alternates(the_repository->objects); | ||||
| 	for (source = the_repository->objects->sources; source; source = source->next) { | ||||
| 		int r = for_each_loose_file_in_objdir(source->path, cb, NULL, | ||||
| 	odb_prepare_alternates(odb); | ||||
| 	for (source = odb->sources; source; source = source->next) { | ||||
| 		int r = for_each_loose_file_in_source(source, cb, NULL, | ||||
| 						      NULL, data); | ||||
| 		if (r) | ||||
| 			return r; | ||||
|  | @ -1472,7 +1448,7 @@ struct oidtree *odb_loose_cache(struct odb_source *source, | |||
| 	uint32_t *bitmap; | ||||
|  | ||||
| 	if (subdir_nr < 0 || | ||||
| 	    subdir_nr >= bitsizeof(source->loose_objects_subdir_seen)) | ||||
| 	    (size_t) subdir_nr >= bitsizeof(source->loose_objects_subdir_seen)) | ||||
| 		BUG("subdir_nr out of range"); | ||||
|  | ||||
| 	bitmap = &source->loose_objects_subdir_seen[word_index]; | ||||
|  | @ -1484,6 +1460,7 @@ struct oidtree *odb_loose_cache(struct odb_source *source, | |||
| 	} | ||||
| 	strbuf_addstr(&buf, source->path); | ||||
| 	for_each_file_in_obj_subdir(subdir_nr, &buf, | ||||
| 				    source->odb->repo->hash_algo, | ||||
| 				    append_loose_object, | ||||
| 				    NULL, NULL, | ||||
| 				    source->loose_objects_cache); | ||||
|  | @ -1504,7 +1481,8 @@ static int check_stream_oid(git_zstream *stream, | |||
| 			    const char *hdr, | ||||
| 			    unsigned long size, | ||||
| 			    const char *path, | ||||
| 			    const struct object_id *expected_oid) | ||||
| 			    const struct object_id *expected_oid, | ||||
| 			    const struct git_hash_algo *algop) | ||||
| { | ||||
| 	struct git_hash_ctx c; | ||||
| 	struct object_id real_oid; | ||||
|  | @ -1512,7 +1490,7 @@ static int check_stream_oid(git_zstream *stream, | |||
| 	unsigned long total_read; | ||||
| 	int status = Z_OK; | ||||
|  | ||||
| 	the_hash_algo->init_fn(&c); | ||||
| 	algop->init_fn(&c); | ||||
| 	git_hash_update(&c, hdr, stream->total_out); | ||||
|  | ||||
| 	/* | ||||
|  | @ -1557,7 +1535,8 @@ static int check_stream_oid(git_zstream *stream, | |||
| 	return 0; | ||||
| } | ||||
|  | ||||
| int read_loose_object(const char *path, | ||||
| int read_loose_object(struct repository *repo, | ||||
| 		      const char *path, | ||||
| 		      const struct object_id *expected_oid, | ||||
| 		      struct object_id *real_oid, | ||||
| 		      void **contents, | ||||
|  | @ -1596,8 +1575,9 @@ int read_loose_object(const char *path, | |||
| 	} | ||||
|  | ||||
| 	if (*oi->typep == OBJ_BLOB && | ||||
| 	    *size > repo_settings_get_big_file_threshold(the_repository)) { | ||||
| 		if (check_stream_oid(&stream, hdr, *size, path, expected_oid) < 0) | ||||
| 	    *size > repo_settings_get_big_file_threshold(repo)) { | ||||
| 		if (check_stream_oid(&stream, hdr, *size, path, expected_oid, | ||||
| 				     repo->hash_algo) < 0) | ||||
| 			goto out_inflate; | ||||
| 	} else { | ||||
| 		*contents = unpack_loose_rest(&stream, hdr, *size, expected_oid); | ||||
|  | @ -1605,7 +1585,7 @@ int read_loose_object(const char *path, | |||
| 			error(_("unable to unpack contents of %s"), path); | ||||
| 			goto out_inflate; | ||||
| 		} | ||||
| 		hash_object_file(the_repository->hash_algo, | ||||
| 		hash_object_file(repo->hash_algo, | ||||
| 				 *contents, *size, | ||||
| 				 *oi->typep, real_oid); | ||||
| 		if (!oideq(expected_oid, real_oid)) | ||||
|  |  | |||
|  | @ -45,13 +45,12 @@ const char *odb_loose_path(struct odb_source *source, | |||
| 			   const struct object_id *oid); | ||||
|  | ||||
| /* | ||||
|  * Return true iff an alternate object database has a loose object | ||||
|  * Return true iff an object database source has a loose object | ||||
|  * with the specified name.  This function does not respect replace | ||||
|  * references. | ||||
|  */ | ||||
| int has_loose_object_nonlocal(const struct object_id *); | ||||
|  | ||||
| int has_loose_object(const struct object_id *); | ||||
| int has_loose_object(struct odb_source *source, | ||||
| 		     const struct object_id *oid); | ||||
|  | ||||
| void *map_loose_object(struct repository *r, const struct object_id *oid, | ||||
| 		       unsigned long *size); | ||||
|  | @ -87,18 +86,7 @@ typedef int each_loose_cruft_fn(const char *basename, | |||
| typedef int each_loose_subdir_fn(unsigned int nr, | ||||
| 				 const char *path, | ||||
| 				 void *data); | ||||
| int for_each_file_in_obj_subdir(unsigned int subdir_nr, | ||||
| 				struct strbuf *path, | ||||
| 				each_loose_object_fn obj_cb, | ||||
| 				each_loose_cruft_fn cruft_cb, | ||||
| 				each_loose_subdir_fn subdir_cb, | ||||
| 				void *data); | ||||
| int for_each_loose_file_in_objdir(const char *path, | ||||
| 				  each_loose_object_fn obj_cb, | ||||
| 				  each_loose_cruft_fn cruft_cb, | ||||
| 				  each_loose_subdir_fn subdir_cb, | ||||
| 				  void *data); | ||||
| int for_each_loose_file_in_objdir_buf(struct strbuf *path, | ||||
| int for_each_loose_file_in_source(struct odb_source *source, | ||||
| 				  each_loose_object_fn obj_cb, | ||||
| 				  each_loose_cruft_fn cruft_cb, | ||||
| 				  each_loose_subdir_fn subdir_cb, | ||||
|  | @ -111,7 +99,8 @@ int for_each_loose_file_in_objdir_buf(struct strbuf *path, | |||
|  * | ||||
|  * Any flags specific to packs are ignored. | ||||
|  */ | ||||
| int for_each_loose_object(each_loose_object_fn, void *, | ||||
| int for_each_loose_object(struct object_database *odb, | ||||
| 			  each_loose_object_fn, void *, | ||||
| 			  enum for_each_object_flags flags); | ||||
|  | ||||
|  | ||||
|  | @ -157,29 +146,10 @@ enum unpack_loose_header_result unpack_loose_header(git_zstream *stream, | |||
| struct object_info; | ||||
| int parse_loose_header(const char *hdr, struct object_info *oi); | ||||
|  | ||||
| enum { | ||||
| 	/* | ||||
| 	 * By default, `write_object_file()` does not actually write | ||||
| 	 * anything into the object store, but only computes the object ID. | ||||
| 	 * This flag changes that so that the object will be written as a loose | ||||
| 	 * object and persisted. | ||||
| 	 */ | ||||
| 	WRITE_OBJECT_FILE_PERSIST = (1 << 0), | ||||
|  | ||||
| 	/* | ||||
| 	 * Do not print an error in case something gose wrong. | ||||
| 	 */ | ||||
| 	WRITE_OBJECT_FILE_SILENT = (1 << 1), | ||||
| }; | ||||
|  | ||||
| int write_object_file_flags(const void *buf, unsigned long len, | ||||
| int write_object_file(struct odb_source *source, | ||||
| 		      const void *buf, unsigned long len, | ||||
| 		      enum object_type type, struct object_id *oid, | ||||
| 		      struct object_id *compat_oid_in, unsigned flags); | ||||
| static inline int write_object_file(const void *buf, unsigned long len, | ||||
| 				    enum object_type type, struct object_id *oid) | ||||
| { | ||||
| 	return write_object_file_flags(buf, len, type, oid, NULL, 0); | ||||
| } | ||||
|  | ||||
| struct input_stream { | ||||
| 	const void *(*read)(struct input_stream *, unsigned long *len); | ||||
|  | @ -187,10 +157,12 @@ struct input_stream { | |||
| 	int is_finished; | ||||
| }; | ||||
|  | ||||
| int stream_loose_object(struct input_stream *in_stream, size_t len, | ||||
| int stream_loose_object(struct odb_source *source, | ||||
| 			struct input_stream *in_stream, size_t len, | ||||
| 			struct object_id *oid); | ||||
|  | ||||
| int force_object_loose(const struct object_id *oid, time_t mtime); | ||||
| int force_object_loose(struct odb_source *source, | ||||
| 		       const struct object_id *oid, time_t mtime); | ||||
|  | ||||
| /** | ||||
|  * With in-core object data in "buf", rehash it to make sure the | ||||
|  | @ -218,8 +190,10 @@ enum finalize_object_file_flags { | |||
| 	FOF_SKIP_COLLISION_CHECK = 1, | ||||
| }; | ||||
|  | ||||
| int finalize_object_file(const char *tmpfile, const char *filename); | ||||
| int finalize_object_file_flags(const char *tmpfile, const char *filename, | ||||
| int finalize_object_file(struct repository *repo, | ||||
| 			 const char *tmpfile, const char *filename); | ||||
| int finalize_object_file_flags(struct repository *repo, | ||||
| 			       const char *tmpfile, const char *filename, | ||||
| 			       enum finalize_object_file_flags flags); | ||||
|  | ||||
| void hash_object_file(const struct git_hash_algo *algo, const void *buf, | ||||
|  | @ -237,7 +211,8 @@ int check_and_freshen_file(const char *fn, int freshen); | |||
|  * | ||||
|  * Returns 0 on success, negative on error (details may be written to stderr). | ||||
|  */ | ||||
| int read_loose_object(const char *path, | ||||
| int read_loose_object(struct repository *repo, | ||||
| 		      const char *path, | ||||
| 		      const struct object_id *expected_oid, | ||||
| 		      struct object_id *real_oid, | ||||
| 		      void **contents, | ||||
|  |  | |||
							
								
								
									
										10
									
								
								odb.c
								
								
								
								
							
							
						
						
									
										10
									
								
								odb.c
								
								
								
								
							|  | @ -980,6 +980,16 @@ void odb_assert_oid_type(struct object_database *odb, | |||
| 		    type_name(expect)); | ||||
| } | ||||
|  | ||||
| int odb_write_object_ext(struct object_database *odb, | ||||
| 			 const void *buf, unsigned long len, | ||||
| 			 enum object_type type, | ||||
| 			 struct object_id *oid, | ||||
| 			 struct object_id *compat_oid, | ||||
| 			 unsigned flags) | ||||
| { | ||||
| 	return write_object_file(odb->sources, buf, len, type, oid, compat_oid, flags); | ||||
| } | ||||
|  | ||||
| struct object_database *odb_new(struct repository *repo) | ||||
| { | ||||
| 	struct object_database *o = xmalloc(sizeof(*o)); | ||||
|  |  | |||
							
								
								
									
										38
									
								
								odb.h
								
								
								
								
							
							
						
						
									
										38
									
								
								odb.h
								
								
								
								
							|  | @ -437,6 +437,44 @@ enum for_each_object_flags { | |||
| 	FOR_EACH_OBJECT_SKIP_ON_DISK_KEPT_PACKS = (1<<4), | ||||
| }; | ||||
|  | ||||
| enum { | ||||
| 	/* | ||||
| 	 * By default, `odb_write_object()` does not actually write anything | ||||
| 	 * into the object store, but only computes the object ID. This flag | ||||
| 	 * changes that so that the object will be written as a loose object | ||||
| 	 * and persisted. | ||||
| 	 */ | ||||
| 	WRITE_OBJECT_PERSIST = (1 << 0), | ||||
|  | ||||
| 	/* | ||||
| 	 * Do not print an error in case something goes wrong. | ||||
| 	 */ | ||||
| 	WRITE_OBJECT_SILENT = (1 << 1), | ||||
| }; | ||||
|  | ||||
| /* | ||||
|  * Write an object into the object database. The object is being written into | ||||
|  * the local alternate of the repository. If provided, the converted object ID | ||||
|  * as well as the compatibility object ID are written to the respective | ||||
|  * pointers. | ||||
|  * | ||||
|  * Returns 0 on success, a negative error code otherwise. | ||||
|  */ | ||||
| int odb_write_object_ext(struct object_database *odb, | ||||
| 			 const void *buf, unsigned long len, | ||||
| 			 enum object_type type, | ||||
| 			 struct object_id *oid, | ||||
| 			 struct object_id *compat_oid, | ||||
| 			 unsigned flags); | ||||
|  | ||||
| static inline int odb_write_object(struct object_database *odb, | ||||
| 				   const void *buf, unsigned long len, | ||||
| 				   enum object_type type, | ||||
| 				   struct object_id *oid) | ||||
| { | ||||
| 	return odb_write_object_ext(odb, buf, len, type, oid, NULL, 0); | ||||
| } | ||||
|  | ||||
| /* Compatibility wrappers, to be removed once Git 2.51 has been released. */ | ||||
| #include "repository.h" | ||||
|  | ||||
|  |  | |||
							
								
								
									
										16
									
								
								pack-write.c
								
								
								
								
							
							
						
						
									
										16
									
								
								pack-write.c
								
								
								
								
							|  | @ -538,22 +538,24 @@ struct hashfile *create_tmp_packfile(struct repository *repo, | |||
| 	return hashfd(repo->hash_algo, fd, *pack_tmp_name); | ||||
| } | ||||
|  | ||||
| static void rename_tmp_packfile(struct strbuf *name_prefix, const char *source, | ||||
| static void rename_tmp_packfile(struct repository *repo, | ||||
| 				struct strbuf *name_prefix, const char *source, | ||||
| 				const char *ext) | ||||
| { | ||||
| 	size_t name_prefix_len = name_prefix->len; | ||||
|  | ||||
| 	strbuf_addstr(name_prefix, ext); | ||||
| 	if (finalize_object_file(source, name_prefix->buf)) | ||||
| 	if (finalize_object_file(repo, source, name_prefix->buf)) | ||||
| 		die("unable to rename temporary file to '%s'", | ||||
| 		    name_prefix->buf); | ||||
| 	strbuf_setlen(name_prefix, name_prefix_len); | ||||
| } | ||||
|  | ||||
| void rename_tmp_packfile_idx(struct strbuf *name_buffer, | ||||
| void rename_tmp_packfile_idx(struct repository *repo, | ||||
| 			     struct strbuf *name_buffer, | ||||
| 			     char **idx_tmp_name) | ||||
| { | ||||
| 	rename_tmp_packfile(name_buffer, *idx_tmp_name, "idx"); | ||||
| 	rename_tmp_packfile(repo, name_buffer, *idx_tmp_name, "idx"); | ||||
| } | ||||
|  | ||||
| void stage_tmp_packfiles(struct repository *repo, | ||||
|  | @ -586,11 +588,11 @@ void stage_tmp_packfiles(struct repository *repo, | |||
| 						    hash); | ||||
| 	} | ||||
|  | ||||
| 	rename_tmp_packfile(name_buffer, pack_tmp_name, "pack"); | ||||
| 	rename_tmp_packfile(repo, name_buffer, pack_tmp_name, "pack"); | ||||
| 	if (rev_tmp_name) | ||||
| 		rename_tmp_packfile(name_buffer, rev_tmp_name, "rev"); | ||||
| 		rename_tmp_packfile(repo, name_buffer, rev_tmp_name, "rev"); | ||||
| 	if (mtimes_tmp_name) | ||||
| 		rename_tmp_packfile(name_buffer, mtimes_tmp_name, "mtimes"); | ||||
| 		rename_tmp_packfile(repo, name_buffer, mtimes_tmp_name, "mtimes"); | ||||
|  | ||||
| 	free(rev_tmp_name); | ||||
| 	free(mtimes_tmp_name); | ||||
|  |  | |||
							
								
								
									
										3
									
								
								pack.h
								
								
								
								
							
							
						
						
									
										3
									
								
								pack.h
								
								
								
								
							|  | @ -145,7 +145,8 @@ void stage_tmp_packfiles(struct repository *repo, | |||
| 			 struct pack_idx_option *pack_idx_opts, | ||||
| 			 unsigned char hash[], | ||||
| 			 char **idx_tmp_name); | ||||
| void rename_tmp_packfile_idx(struct strbuf *basename, | ||||
| void rename_tmp_packfile_idx(struct repository *repo, | ||||
| 			     struct strbuf *basename, | ||||
| 			     char **idx_tmp_name); | ||||
|  | ||||
| #endif | ||||
|  |  | |||
|  | @ -40,7 +40,7 @@ void prune_packed_objects(int opts) | |||
| 		progress = start_delayed_progress(the_repository, | ||||
| 						  _("Removing duplicate objects"), 256); | ||||
|  | ||||
| 	for_each_loose_file_in_objdir(repo_get_object_directory(the_repository), | ||||
| 	for_each_loose_file_in_source(the_repository->objects->sources, | ||||
| 				      prune_object, NULL, prune_subdir, &opts); | ||||
|  | ||||
| 	/* Ensure we show 100% before finishing progress */ | ||||
|  |  | |||
|  | @ -319,7 +319,7 @@ int add_unseen_recent_objects_to_traversal(struct rev_info *revs, | |||
| 	oidset_init(&data.extra_recent_oids, 0); | ||||
| 	data.extra_recent_oids_loaded = 0; | ||||
|  | ||||
| 	r = for_each_loose_object(add_recent_loose, &data, | ||||
| 	r = for_each_loose_object(the_repository->objects, add_recent_loose, &data, | ||||
| 				  FOR_EACH_OBJECT_LOCAL_ONLY); | ||||
| 	if (r) | ||||
| 		goto done; | ||||
|  |  | |||
|  | @ -690,7 +690,7 @@ static struct cache_entry *create_alias_ce(struct index_state *istate, | |||
| void set_object_name_for_intent_to_add_entry(struct cache_entry *ce) | ||||
| { | ||||
| 	struct object_id oid; | ||||
| 	if (write_object_file("", 0, OBJ_BLOB, &oid)) | ||||
| 	if (odb_write_object(the_repository->objects, "", 0, OBJ_BLOB, &oid)) | ||||
| 		die(_("cannot create an empty blob in the object database")); | ||||
| 	oidcpy(&ce->oid, &oid); | ||||
| } | ||||
|  |  | |||
|  | @ -227,7 +227,7 @@ static int migrate_one(struct tmp_objdir *t, | |||
| 			return -1; | ||||
| 		return migrate_paths(t, src, dst, flags); | ||||
| 	} | ||||
| 	return finalize_object_file_flags(src->buf, dst->buf, flags); | ||||
| 	return finalize_object_file_flags(t->repo, src->buf, dst->buf, flags); | ||||
| } | ||||
|  | ||||
| static int is_loose_object_shard(const char *name) | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue
	
	 Junio C Hamano
						Junio C Hamano