Browse Source

upload-pack: delegate rev walking in shallow fetch to pack-objects

upload-pack has a special revision walking code for shallow
recipients. It works almost like the similar code in pack-objects
except:

1. in upload-pack, graft points could be added for deepening;

2. also when the repository is deepened, the shallow point will be
   moved further away from the tip, but the old shallow point will be
   marked as edge to produce more efficient packs. See 6523078 (make
   shallow repository deepening more network efficient - 2009-09-03).

Pass the file to pack-objects via --shallow-file. This will override
$GIT_DIR/shallow and give pack-objects the exact repository shape
that upload-pack has.

mark edge commits by revision command arguments. Even if old shallow
points are passed as "--not" revisions as in this patch, they will not
be picked up by mark_edges_uninteresting() because this function looks
up to parents for edges, while in this case the edge is the children,
in the opposite direction. This will be fixed in an later patch when
all given uninteresting commits are marked as edges.

Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
maint
Nguyễn Thái Ngọc Duy 11 years ago committed by Junio C Hamano
parent
commit
cdab485853
  1. 3
      t/t5530-upload-pack-error.sh
  2. 128
      upload-pack.c

3
t/t5530-upload-pack-error.sh

@ -54,9 +54,6 @@ test_expect_success 'upload-pack fails due to error in rev-list' ' @@ -54,9 +54,6 @@ test_expect_success 'upload-pack fails due to error in rev-list' '
printf "0032want %s\n0034shallow %s00000009done\n0000" \
$(git rev-parse HEAD) $(git rev-parse HEAD^) >input &&
test_must_fail git upload-pack . <input >/dev/null 2>output.err &&
# pack-objects survived
grep "Total.*, reused" output.err &&
# but there was an error, which must have been in rev-list
grep "bad tree object" output.err
'


128
upload-pack.c

@ -68,87 +68,28 @@ static ssize_t send_client_data(int fd, const char *data, ssize_t sz) @@ -68,87 +68,28 @@ static ssize_t send_client_data(int fd, const char *data, ssize_t sz)
return sz;
}

static FILE *pack_pipe = NULL;
static void show_commit(struct commit *commit, void *data)
{
if (commit->object.flags & BOUNDARY)
fputc('-', pack_pipe);
if (fputs(sha1_to_hex(commit->object.sha1), pack_pipe) < 0)
die("broken output pipe");
fputc('\n', pack_pipe);
fflush(pack_pipe);
free(commit->buffer);
commit->buffer = NULL;
}

static void show_object(struct object *obj,
const struct name_path *path, const char *component,
void *cb_data)
{
show_object_with_name(pack_pipe, obj, path, component);
}

static void show_edge(struct commit *commit)
{
fprintf(pack_pipe, "-%s\n", sha1_to_hex(commit->object.sha1));
}

static int do_rev_list(int in, int out, void *user_data)
{
int i;
struct rev_info revs;

pack_pipe = xfdopen(out, "w");
init_revisions(&revs, NULL);
revs.tag_objects = 1;
revs.tree_objects = 1;
revs.blob_objects = 1;
if (use_thin_pack)
revs.edge_hint = 1;

for (i = 0; i < want_obj.nr; i++) {
struct object *o = want_obj.objects[i].item;
/* why??? */
o->flags &= ~UNINTERESTING;
add_pending_object(&revs, o, NULL);
}
for (i = 0; i < have_obj.nr; i++) {
struct object *o = have_obj.objects[i].item;
o->flags |= UNINTERESTING;
add_pending_object(&revs, o, NULL);
}
setup_revisions(0, NULL, &revs, NULL);
if (prepare_revision_walk(&revs))
die("revision walk setup failed");
mark_edges_uninteresting(revs.commits, &revs, show_edge);
if (use_thin_pack)
for (i = 0; i < extra_edge_obj.nr; i++)
fprintf(pack_pipe, "-%s\n", sha1_to_hex(
extra_edge_obj.objects[i].item->sha1));
traverse_commit_list(&revs, show_commit, show_object, NULL);
fflush(pack_pipe);
fclose(pack_pipe);
return 0;
}

static void create_pack_file(void)
{
struct async rev_list;
struct child_process pack_objects;
char data[8193], progress[128];
char abort_msg[] = "aborting due to possible repository "
"corruption on the remote side.";
int buffered = -1;
ssize_t sz;
const char *argv[10];
int arg = 0;
const char *argv[12];
int i, arg = 0;
FILE *pipe_fd;
char *shallow_file = NULL;

argv[arg++] = "pack-objects";
if (!shallow_nr) {
argv[arg++] = "--revs";
if (use_thin_pack)
argv[arg++] = "--thin";
if (shallow_nr) {
shallow_file = setup_temporary_shallow();
argv[arg++] = "--shallow-file";
argv[arg++] = shallow_file;
}
argv[arg++] = "pack-objects";
argv[arg++] = "--revs";
if (use_thin_pack)
argv[arg++] = "--thin";

argv[arg++] = "--stdout";
if (!no_progress)
@ -169,29 +110,21 @@ static void create_pack_file(void) @@ -169,29 +110,21 @@ static void create_pack_file(void)
if (start_command(&pack_objects))
die("git upload-pack: unable to fork git-pack-objects");

if (shallow_nr) {
memset(&rev_list, 0, sizeof(rev_list));
rev_list.proc = do_rev_list;
rev_list.out = pack_objects.in;
if (start_async(&rev_list))
die("git upload-pack: unable to fork git-rev-list");
}
else {
FILE *pipe_fd = xfdopen(pack_objects.in, "w");
int i;

for (i = 0; i < want_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
sha1_to_hex(want_obj.objects[i].item->sha1));
fprintf(pipe_fd, "--not\n");
for (i = 0; i < have_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
sha1_to_hex(have_obj.objects[i].item->sha1));
fprintf(pipe_fd, "\n");
fflush(pipe_fd);
fclose(pipe_fd);
}

pipe_fd = xfdopen(pack_objects.in, "w");

for (i = 0; i < want_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
sha1_to_hex(want_obj.objects[i].item->sha1));
fprintf(pipe_fd, "--not\n");
for (i = 0; i < have_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
sha1_to_hex(have_obj.objects[i].item->sha1));
for (i = 0; i < extra_edge_obj.nr; i++)
fprintf(pipe_fd, "%s\n",
sha1_to_hex(extra_edge_obj.objects[i].item->sha1));
fprintf(pipe_fd, "\n");
fflush(pipe_fd);
fclose(pipe_fd);

/* We read from pack_objects.err to capture stderr output for
* progress bar, and pack_objects.out to capture the pack data.
@ -290,8 +223,11 @@ static void create_pack_file(void) @@ -290,8 +223,11 @@ static void create_pack_file(void)
error("git upload-pack: git-pack-objects died with error.");
goto fail;
}
if (shallow_nr && finish_async(&rev_list))
goto fail; /* error was already reported */
if (shallow_file) {
if (*shallow_file)
unlink(shallow_file);
free(shallow_file);
}

/* flush the data */
if (0 <= buffered) {

Loading…
Cancel
Save