|
|
|
#include "cache.h"
|
|
|
|
#include "fetch.h"
|
|
|
|
#include "commit.h"
|
|
|
|
#include "tree.h"
|
|
|
|
#include "tree-walk.h"
|
|
|
|
#include "tag.h"
|
|
|
|
#include "blob.h"
|
|
|
|
#include "refs.h"
|
|
|
|
#include "strbuf.h"
|
|
|
|
|
|
|
|
int get_tree = 0;
|
|
|
|
int get_history = 0;
|
|
|
|
int get_all = 0;
|
|
|
|
int get_verbosely = 0;
|
|
|
|
int get_recover = 0;
|
|
|
|
static unsigned char current_commit_sha1[20];
|
|
|
|
|
|
|
|
void pull_say(const char *fmt, const char *hex)
|
|
|
|
{
|
|
|
|
if (get_verbosely)
|
|
|
|
fprintf(stderr, fmt, hex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void report_missing(const struct object *obj)
|
|
|
|
{
|
|
|
|
char missing_hex[41];
|
|
|
|
strcpy(missing_hex, sha1_to_hex(obj->sha1));;
|
|
|
|
fprintf(stderr, "Cannot obtain needed %s %s\n",
|
|
|
|
obj->type ? typename(obj->type): "object", missing_hex);
|
|
|
|
if (!is_null_sha1(current_commit_sha1))
|
|
|
|
fprintf(stderr, "while processing commit %s.\n",
|
|
|
|
sha1_to_hex(current_commit_sha1));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int process(struct object *obj);
|
|
|
|
|
|
|
|
static int process_tree(struct tree *tree)
|
|
|
|
{
|
|
|
|
struct tree_desc desc;
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
19 years ago
|
|
|
struct name_entry entry;
|
|
|
|
|
|
|
|
if (parse_tree(tree))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
desc.buf = tree->buffer;
|
|
|
|
desc.size = tree->size;
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
19 years ago
|
|
|
while (tree_entry(&desc, &entry)) {
|
|
|
|
struct object *obj = NULL;
|
|
|
|
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
19 years ago
|
|
|
if (S_ISDIR(entry.mode)) {
|
|
|
|
struct tree *tree = lookup_tree(entry.sha1);
|
|
|
|
if (tree)
|
|
|
|
obj = &tree->object;
|
|
|
|
}
|
|
|
|
else {
|
tree_entry(): new tree-walking helper function
This adds a "tree_entry()" function that combines the common operation of
doing a "tree_entry_extract()" + "update_tree_entry()".
It also has a simplified calling convention, designed for simple loops
that traverse over a whole tree: the arguments are pointers to the tree
descriptor and a name_entry structure to fill in, and it returns a boolean
"true" if there was an entry left to be gotten in the tree.
This allows tree traversal with
struct tree_desc desc;
struct name_entry entry;
desc.buf = tree->buffer;
desc.size = tree->size;
while (tree_entry(&desc, &entry) {
... use "entry.{path, sha1, mode, pathlen}" ...
}
which is not only shorter than writing it out in full, it's hopefully less
error prone too.
[ It's actually a tad faster too - we don't need to recalculate the entry
pathlength in both extract and update, but need to do it only once.
Also, some callers can avoid doing a "strlen()" on the result, since
it's returned as part of the name_entry structure.
However, by now we're talking just 1% speedup on "git-rev-list --objects
--all", and we're definitely at the point where tree walking is no
longer the issue any more. ]
NOTE! Not everybody wants to use this new helper function, since some of
the tree walkers very much on purpose do the descriptor update separately
from the entry extraction. So the "extract + update" sequence still
remains as the core sequence, this is just a simplified interface.
We should probably add a silly two-line inline helper function for
initializing the descriptor from the "struct tree" too, just to cut down
on the noise from that common "desc" initializer.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
19 years ago
|
|
|
struct blob *blob = lookup_blob(entry.sha1);
|
|
|
|
if (blob)
|
|
|
|
obj = &blob->object;
|
|
|
|
}
|
|
|
|
if (!obj || process(obj))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
free(tree->buffer);
|
|
|
|
tree->buffer = NULL;
|
|
|
|
tree->size = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define COMPLETE (1U << 0)
|
|
|
|
#define SEEN (1U << 1)
|
|
|
|
#define TO_SCAN (1U << 2)
|
|
|
|
|
|
|
|
static struct commit_list *complete = NULL;
|
|
|
|
|
|
|
|
static int process_commit(struct commit *commit)
|
|
|
|
{
|
|
|
|
if (parse_commit(commit))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
while (complete && complete->item->date >= commit->date) {
|
|
|
|
pop_most_recent_commit(&complete, COMPLETE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (commit->object.flags & COMPLETE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
hashcpy(current_commit_sha1, commit->object.sha1);
|
|
|
|
|
|
|
|
pull_say("walk %s\n", sha1_to_hex(commit->object.sha1));
|
|
|
|
|
|
|
|
if (get_tree) {
|
|
|
|
if (process(&commit->tree->object))
|
|
|
|
return -1;
|
|
|
|
if (!get_all)
|
|
|
|
get_tree = 0;
|
|
|
|
}
|
|
|
|
if (get_history) {
|
|
|
|
struct commit_list *parents = commit->parents;
|
|
|
|
for (; parents; parents = parents->next) {
|
|
|
|
if (process(&parents->item->object))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int process_tag(struct tag *tag)
|
|
|
|
{
|
|
|
|
if (parse_tag(tag))
|
|
|
|
return -1;
|
|
|
|
return process(tag->tagged);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct object_list *process_queue = NULL;
|
|
|
|
static struct object_list **process_queue_end = &process_queue;
|
|
|
|
|
|
|
|
static int process_object(struct object *obj)
|
|
|
|
{
|
|
|
|
if (obj->type == OBJ_COMMIT) {
|
|
|
|
if (process_commit((struct commit *)obj))
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (obj->type == OBJ_TREE) {
|
|
|
|
if (process_tree((struct tree *)obj))
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (obj->type == OBJ_BLOB) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (obj->type == OBJ_TAG) {
|
|
|
|
if (process_tag((struct tag *)obj))
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return error("Unable to determine requirements "
|
|
|
|
"of type %s for %s",
|
Shrink "struct object" a bit
This shrinks "struct object" by a small amount, by getting rid of the
"struct type *" pointer and replacing it with a 3-bit bitfield instead.
In addition, we merge the bitfields and the "flags" field, which
incidentally should also remove a useless 4-byte padding from the object
when in 64-bit mode.
Now, our "struct object" is still too damn large, but it's now less
obviously bloated, and of the remaining fields, only the "util" (which is
not used by most things) is clearly something that should be eventually
discarded.
This shrinks the "git-rev-list --all" memory use by about 2.5% on the
kernel archive (and, perhaps more importantly, on the larger mozilla
archive). That may not sound like much, but I suspect it's more on a
64-bit platform.
There are other remaining inefficiencies (the parent lists, for example,
probably have horrible malloc overhead), but this was pretty obvious.
Most of the patch is just changing the comparison of the "type" pointer
from one of the constant string pointers to the appropriate new TYPE_xxx
small integer constant.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
19 years ago
|
|
|
typename(obj->type), sha1_to_hex(obj->sha1));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int process(struct object *obj)
|
|
|
|
{
|
|
|
|
if (obj->flags & SEEN)
|
|
|
|
return 0;
|
|
|
|
obj->flags |= SEEN;
|
|
|
|
|
|
|
|
if (has_sha1_file(obj->sha1)) {
|
|
|
|
/* We already have it, so we should scan it now. */
|
|
|
|
obj->flags |= TO_SCAN;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (obj->flags & COMPLETE)
|
|
|
|
return 0;
|
|
|
|
prefetch(obj->sha1);
|
|
|
|
}
|
|
|
|
|
|
|
|
object_list_insert(obj, process_queue_end);
|
|
|
|
process_queue_end = &(*process_queue_end)->next;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int loop(void)
|
|
|
|
{
|
|
|
|
struct object_list *elem;
|
|
|
|
|
|
|
|
while (process_queue) {
|
|
|
|
struct object *obj = process_queue->item;
|
|
|
|
elem = process_queue;
|
|
|
|
process_queue = elem->next;
|
|
|
|
free(elem);
|
|
|
|
if (!process_queue)
|
|
|
|
process_queue_end = &process_queue;
|
|
|
|
|
|
|
|
/* If we are not scanning this object, we placed it in
|
|
|
|
* the queue because we needed to fetch it first.
|
|
|
|
*/
|
|
|
|
if (! (obj->flags & TO_SCAN)) {
|
|
|
|
if (fetch(obj->sha1)) {
|
|
|
|
report_missing(obj);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!obj->type)
|
|
|
|
parse_object(obj->sha1);
|
|
|
|
if (process_object(obj))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int interpret_target(char *target, unsigned char *sha1)
|
|
|
|
{
|
|
|
|
if (!get_sha1_hex(target, sha1))
|
|
|
|
return 0;
|
|
|
|
if (!check_ref_format(target)) {
|
|
|
|
if (!fetch_ref(target, sha1)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mark_complete(const char *path, const unsigned char *sha1, int flag, void *cb_data)
|
|
|
|
{
|
|
|
|
struct commit *commit = lookup_commit_reference_gently(sha1, 1);
|
|
|
|
if (commit) {
|
|
|
|
commit->object.flags |= COMPLETE;
|
|
|
|
insert_by_date(commit, &complete);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pull_targets_stdin(char ***target, const char ***write_ref)
|
|
|
|
{
|
|
|
|
int targets = 0, targets_alloc = 0;
|
|
|
|
struct strbuf buf;
|
|
|
|
*target = NULL; *write_ref = NULL;
|
|
|
|
strbuf_init(&buf);
|
|
|
|
while (1) {
|
|
|
|
char *rf_one = NULL;
|
|
|
|
char *tg_one;
|
|
|
|
|
|
|
|
read_line(&buf, stdin, '\n');
|
|
|
|
if (buf.eof)
|
|
|
|
break;
|
|
|
|
tg_one = buf.buf;
|
|
|
|
rf_one = strchr(tg_one, '\t');
|
|
|
|
if (rf_one)
|
|
|
|
*rf_one++ = 0;
|
|
|
|
|
|
|
|
if (targets >= targets_alloc) {
|
|
|
|
targets_alloc = targets_alloc ? targets_alloc * 2 : 64;
|
|
|
|
*target = xrealloc(*target, targets_alloc * sizeof(**target));
|
|
|
|
*write_ref = xrealloc(*write_ref, targets_alloc * sizeof(**write_ref));
|
|
|
|
}
|
|
|
|
(*target)[targets] = xstrdup(tg_one);
|
|
|
|
(*write_ref)[targets] = rf_one ? xstrdup(rf_one) : NULL;
|
|
|
|
targets++;
|
|
|
|
}
|
|
|
|
return targets;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pull_targets_free(int targets, char **target, const char **write_ref)
|
|
|
|
{
|
|
|
|
while (targets--) {
|
|
|
|
free(target[targets]);
|
|
|
|
if (write_ref && write_ref[targets])
|
|
|
|
free((char *) write_ref[targets]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int pull(int targets, char **target, const char **write_ref,
|
|
|
|
const char *write_ref_log_details)
|
|
|
|
{
|
|
|
|
struct ref_lock **lock = xcalloc(targets, sizeof(struct ref_lock *));
|
|
|
|
unsigned char *sha1 = xmalloc(targets * 20);
|
|
|
|
char *msg;
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
save_commit_buffer = 0;
|
|
|
|
track_object_refs = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < targets; i++) {
|
|
|
|
if (!write_ref || !write_ref[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
lock[i] = lock_ref_sha1(write_ref[i], NULL);
|
|
|
|
if (!lock[i]) {
|
|
|
|
error("Can't lock ref %s", write_ref[i]);
|
|
|
|
goto unlock_and_fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!get_recover)
|
|
|
|
for_each_ref(mark_complete, NULL);
|
|
|
|
|
|
|
|
for (i = 0; i < targets; i++) {
|
|
|
|
if (interpret_target(target[i], &sha1[20 * i])) {
|
|
|
|
error("Could not interpret %s as something to pull", target[i]);
|
|
|
|
goto unlock_and_fail;
|
|
|
|
}
|
|
|
|
if (process(lookup_unknown_object(&sha1[20 * i])))
|
|
|
|
goto unlock_and_fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (loop())
|
|
|
|
goto unlock_and_fail;
|
|
|
|
|
|
|
|
if (write_ref_log_details) {
|
|
|
|
msg = xmalloc(strlen(write_ref_log_details) + 12);
|
|
|
|
sprintf(msg, "fetch from %s", write_ref_log_details);
|
|
|
|
} else {
|
|
|
|
msg = NULL;
|
|
|
|
}
|
|
|
|
for (i = 0; i < targets; i++) {
|
|
|
|
if (!write_ref || !write_ref[i])
|
|
|
|
continue;
|
|
|
|
ret = write_ref_sha1(lock[i], &sha1[20 * i], msg ? msg : "fetch (unknown)");
|
|
|
|
lock[i] = NULL;
|
|
|
|
if (ret)
|
|
|
|
goto unlock_and_fail;
|
|
|
|
}
|
|
|
|
free(msg);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
|
|
unlock_and_fail:
|
|
|
|
for (i = 0; i < targets; i++)
|
|
|
|
if (lock[i])
|
|
|
|
unlock_ref(lock[i]);
|
|
|
|
return -1;
|
|
|
|
}
|