|
|
|
#include "builtin.h"
|
|
|
|
#include "cache.h"
|
|
|
|
#include "progress.h"
|
|
|
|
|
|
|
|
static const char prune_packed_usage[] =
|
|
|
|
"git-prune-packed [-n] [-q]";
|
|
|
|
|
|
|
|
#define DRY_RUN 01
|
|
|
|
#define VERBOSE 02
|
|
|
|
|
|
|
|
static struct progress *progress;
|
|
|
|
|
|
|
|
static void prune_dir(int i, DIR *dir, char *pathname, int len, int opts)
|
|
|
|
{
|
|
|
|
struct dirent *de;
|
|
|
|
char hex[40];
|
|
|
|
|
|
|
|
sprintf(hex, "%02x", i);
|
|
|
|
while ((de = readdir(dir)) != NULL) {
|
|
|
|
unsigned char sha1[20];
|
|
|
|
if (strlen(de->d_name) != 38)
|
|
|
|
continue;
|
|
|
|
memcpy(hex+2, de->d_name, 38);
|
|
|
|
if (get_sha1_hex(hex, sha1))
|
|
|
|
continue;
|
|
|
|
if (!has_sha1_pack(sha1, NULL))
|
|
|
|
continue;
|
|
|
|
memcpy(pathname + len, de->d_name, 38);
|
|
|
|
if (opts & DRY_RUN)
|
|
|
|
printf("rm -f %s\n", pathname);
|
|
|
|
else if (unlink(pathname) < 0)
|
|
|
|
error("unable to unlink %s", pathname);
|
|
|
|
display_progress(progress, i + 1);
|
|
|
|
}
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
19 years ago
|
|
|
pathname[len] = 0;
|
|
|
|
rmdir(pathname);
|
|
|
|
}
|
|
|
|
|
|
|
|
void prune_packed_objects(int opts)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
static char pathname[PATH_MAX];
|
|
|
|
const char *dir = get_object_directory();
|
|
|
|
int len = strlen(dir);
|
|
|
|
|
|
|
|
if (opts == VERBOSE)
|
|
|
|
progress = start_progress_delay("Removing duplicate objects",
|
|
|
|
256, 95, 2);
|
|
|
|
|
|
|
|
if (len > PATH_MAX - 42)
|
|
|
|
die("impossible object directory");
|
|
|
|
memcpy(pathname, dir, len);
|
|
|
|
if (len && pathname[len-1] != '/')
|
|
|
|
pathname[len++] = '/';
|
|
|
|
for (i = 0; i < 256; i++) {
|
|
|
|
DIR *d;
|
|
|
|
|
|
|
|
sprintf(pathname + len, "%02x/", i);
|
|
|
|
d = opendir(pathname);
|
|
|
|
if (!d)
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
19 years ago
|
|
|
continue;
|
|
|
|
prune_dir(i, d, pathname, len + 3, opts);
|
|
|
|
closedir(d);
|
|
|
|
}
|
|
|
|
stop_progress(&progress);
|
|
|
|
}
|
|
|
|
|
|
|
|
int cmd_prune_packed(int argc, const char **argv, const char *prefix)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int opts = VERBOSE;
|
|
|
|
|
|
|
|
for (i = 1; i < argc; i++) {
|
|
|
|
const char *arg = argv[i];
|
|
|
|
|
|
|
|
if (*arg == '-') {
|
|
|
|
if (!strcmp(arg, "-n"))
|
|
|
|
opts |= DRY_RUN;
|
|
|
|
else if (!strcmp(arg, "-q"))
|
|
|
|
opts &= ~VERBOSE;
|
|
|
|
else
|
|
|
|
usage(prune_packed_usage);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Handle arguments here .. */
|
|
|
|
usage(prune_packed_usage);
|
|
|
|
}
|
Be marginally more careful about removing objects
The git philosophy when it comes to disk accesses is "Laugh in the face of
danger".
Notably, since we never modify an existing object, we don't really care
that deeply about flushing things to disk, since even if the machine
crashes in the middle of a git operation, you can never really have lost
any old work. At most, you'd need to figure out the proper heads (which
git-fsck-objects can do for you) and re-do the operation.
However, there's two exceptions to this: pruning and repacking. Those
operations will actually _delete_ old objects that they know about in
other ways (ie that they just repacked, or that they have found in other
places).
However, since they actually modify old state, we should thus be a bit
more careful about them. If the machine crashes and the duplicate new
objects haven't been flushed to disk, you can actually be in trouble.
This is trivially stupid about it by calling "sync" before removing the
objects. Not very smart, but we're talking about special operations than
are usually done once a week if that.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
19 years ago
|
|
|
sync();
|
|
|
|
prune_packed_objects(opts);
|
|
|
|
return 0;
|
|
|
|
}
|