Merge branch 'rs/packfile-bad-object-list-in-oidset'
Replace a handcrafted data structure used to keep track of bad objects in the packfile API by an oidset. * rs/packfile-bad-object-list-in-oidset: packfile: use oidset for bad objects packfile: convert has_packed_and_bad() to object_id packfile: convert mark_bad_packed_object() to object_id midx: inline nth_midxed_pack_entry() oidset: make oidset_size() an inline functionmaint
commit
28caad63d0
37
midx.c
37
midx.c
|
@ -283,14 +283,18 @@ uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
|
|||
(off_t)pos * MIDX_CHUNK_OFFSET_WIDTH);
|
||||
}
|
||||
|
||||
static int nth_midxed_pack_entry(struct repository *r,
|
||||
struct multi_pack_index *m,
|
||||
struct pack_entry *e,
|
||||
uint32_t pos)
|
||||
int fill_midx_entry(struct repository * r,
|
||||
const struct object_id *oid,
|
||||
struct pack_entry *e,
|
||||
struct multi_pack_index *m)
|
||||
{
|
||||
uint32_t pos;
|
||||
uint32_t pack_int_id;
|
||||
struct packed_git *p;
|
||||
|
||||
if (!bsearch_midx(oid, m, &pos))
|
||||
return 0;
|
||||
|
||||
if (pos >= m->num_objects)
|
||||
return 0;
|
||||
|
||||
|
@ -310,15 +314,9 @@ static int nth_midxed_pack_entry(struct repository *r,
|
|||
if (!is_pack_valid(p))
|
||||
return 0;
|
||||
|
||||
if (p->num_bad_objects) {
|
||||
uint32_t i;
|
||||
struct object_id oid;
|
||||
nth_midxed_object_oid(&oid, m, pos);
|
||||
for (i = 0; i < p->num_bad_objects; i++)
|
||||
if (hasheq(oid.hash,
|
||||
p->bad_object_sha1 + the_hash_algo->rawsz * i))
|
||||
return 0;
|
||||
}
|
||||
if (oidset_size(&p->bad_objects) &&
|
||||
oidset_contains(&p->bad_objects, oid))
|
||||
return 0;
|
||||
|
||||
e->offset = nth_midxed_offset(m, pos);
|
||||
e->p = p;
|
||||
|
@ -326,19 +324,6 @@ static int nth_midxed_pack_entry(struct repository *r,
|
|||
return 1;
|
||||
}
|
||||
|
||||
int fill_midx_entry(struct repository * r,
|
||||
const struct object_id *oid,
|
||||
struct pack_entry *e,
|
||||
struct multi_pack_index *m)
|
||||
{
|
||||
uint32_t pos;
|
||||
|
||||
if (!bsearch_midx(oid, m, &pos))
|
||||
return 0;
|
||||
|
||||
return nth_midxed_pack_entry(r, m, e, pos);
|
||||
}
|
||||
|
||||
/* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */
|
||||
static int cmp_idx_or_pack_name(const char *idx_or_pack_name,
|
||||
const char *idx_name)
|
||||
|
|
|
@ -1642,7 +1642,7 @@ static int do_oid_object_info_extended(struct repository *r,
|
|||
return 0;
|
||||
rtype = packed_object_info(r, e.p, e.offset, oi);
|
||||
if (rtype < 0) {
|
||||
mark_bad_packed_object(e.p, real->hash);
|
||||
mark_bad_packed_object(e.p, real);
|
||||
return do_oid_object_info_extended(r, real, oi, 0);
|
||||
} else if (oi->whence == OI_PACKED) {
|
||||
oi->u.packed.offset = e.offset;
|
||||
|
@ -1751,7 +1751,7 @@ void *read_object_file_extended(struct repository *r,
|
|||
die(_("loose object %s (stored in %s) is corrupt"),
|
||||
oid_to_hex(repl), path);
|
||||
|
||||
if ((p = has_packed_and_bad(r, repl->hash)) != NULL)
|
||||
if ((p = has_packed_and_bad(r, repl)) != NULL)
|
||||
die(_("packed object %s (stored in %s) is corrupt"),
|
||||
oid_to_hex(repl), p->pack_name);
|
||||
obj_read_unlock();
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "khash.h"
|
||||
#include "dir.h"
|
||||
#include "oidtree.h"
|
||||
#include "oidset.h"
|
||||
|
||||
struct object_directory {
|
||||
struct object_directory *next;
|
||||
|
@ -76,9 +77,8 @@ struct packed_git {
|
|||
const void *index_data;
|
||||
size_t index_size;
|
||||
uint32_t num_objects;
|
||||
uint32_t num_bad_objects;
|
||||
uint32_t crc_offset;
|
||||
unsigned char *bad_object_sha1;
|
||||
struct oidset bad_objects;
|
||||
int index_version;
|
||||
time_t mtime;
|
||||
int pack_fd;
|
||||
|
|
5
oidset.c
5
oidset.c
|
@ -36,11 +36,6 @@ void oidset_clear(struct oidset *set)
|
|||
oidset_init(set, 0);
|
||||
}
|
||||
|
||||
int oidset_size(struct oidset *set)
|
||||
{
|
||||
return kh_size(&set->set);
|
||||
}
|
||||
|
||||
void oidset_parse_file(struct oidset *set, const char *path)
|
||||
{
|
||||
oidset_parse_file_carefully(set, path, NULL, NULL);
|
||||
|
|
5
oidset.h
5
oidset.h
|
@ -57,7 +57,10 @@ int oidset_remove(struct oidset *set, const struct object_id *oid);
|
|||
/**
|
||||
* Returns the number of oids in the set.
|
||||
*/
|
||||
int oidset_size(struct oidset *set);
|
||||
static inline int oidset_size(const struct oidset *set)
|
||||
{
|
||||
return kh_size(&set->set);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove all entries from the oidset, freeing any resources associated with
|
||||
|
|
38
packfile.c
38
packfile.c
|
@ -1161,31 +1161,19 @@ int unpack_object_header(struct packed_git *p,
|
|||
return type;
|
||||
}
|
||||
|
||||
void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1)
|
||||
void mark_bad_packed_object(struct packed_git *p, const struct object_id *oid)
|
||||
{
|
||||
unsigned i;
|
||||
const unsigned hashsz = the_hash_algo->rawsz;
|
||||
for (i = 0; i < p->num_bad_objects; i++)
|
||||
if (hasheq(sha1, p->bad_object_sha1 + hashsz * i))
|
||||
return;
|
||||
p->bad_object_sha1 = xrealloc(p->bad_object_sha1,
|
||||
st_mult(GIT_MAX_RAWSZ,
|
||||
st_add(p->num_bad_objects, 1)));
|
||||
hashcpy(p->bad_object_sha1 + hashsz * p->num_bad_objects, sha1);
|
||||
p->num_bad_objects++;
|
||||
oidset_insert(&p->bad_objects, oid);
|
||||
}
|
||||
|
||||
const struct packed_git *has_packed_and_bad(struct repository *r,
|
||||
const unsigned char *sha1)
|
||||
const struct object_id *oid)
|
||||
{
|
||||
struct packed_git *p;
|
||||
unsigned i;
|
||||
|
||||
for (p = r->objects->packed_git; p; p = p->next)
|
||||
for (i = 0; i < p->num_bad_objects; i++)
|
||||
if (hasheq(sha1,
|
||||
p->bad_object_sha1 + the_hash_algo->rawsz * i))
|
||||
return p;
|
||||
if (oidset_contains(&p->bad_objects, oid))
|
||||
return p;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1272,7 +1260,7 @@ static int retry_bad_packed_offset(struct repository *r,
|
|||
if (offset_to_pack_pos(p, obj_offset, &pos) < 0)
|
||||
return OBJ_BAD;
|
||||
nth_packed_object_id(&oid, p, pack_pos_to_index(p, pos));
|
||||
mark_bad_packed_object(p, oid.hash);
|
||||
mark_bad_packed_object(p, &oid);
|
||||
type = oid_object_info(r, &oid, NULL);
|
||||
if (type <= OBJ_NONE)
|
||||
return OBJ_BAD;
|
||||
|
@ -1722,7 +1710,7 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
|
|||
nth_packed_object_id(&oid, p, index_pos);
|
||||
error("bad packed object CRC for %s",
|
||||
oid_to_hex(&oid));
|
||||
mark_bad_packed_object(p, oid.hash);
|
||||
mark_bad_packed_object(p, &oid);
|
||||
data = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1811,7 +1799,7 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
|
|||
" at offset %"PRIuMAX" from %s",
|
||||
oid_to_hex(&base_oid), (uintmax_t)obj_offset,
|
||||
p->pack_name);
|
||||
mark_bad_packed_object(p, base_oid.hash);
|
||||
mark_bad_packed_object(p, &base_oid);
|
||||
base = read_object(r, &base_oid, &type, &base_size);
|
||||
external_base = base;
|
||||
}
|
||||
|
@ -2016,13 +2004,9 @@ static int fill_pack_entry(const struct object_id *oid,
|
|||
{
|
||||
off_t offset;
|
||||
|
||||
if (p->num_bad_objects) {
|
||||
unsigned i;
|
||||
for (i = 0; i < p->num_bad_objects; i++)
|
||||
if (hasheq(oid->hash,
|
||||
p->bad_object_sha1 + the_hash_algo->rawsz * i))
|
||||
return 0;
|
||||
}
|
||||
if (oidset_size(&p->bad_objects) &&
|
||||
oidset_contains(&p->bad_objects, oid))
|
||||
return 0;
|
||||
|
||||
offset = find_pack_entry_one(oid->hash, p);
|
||||
if (!offset)
|
||||
|
|
|
@ -159,8 +159,8 @@ int packed_object_info(struct repository *r,
|
|||
struct packed_git *pack,
|
||||
off_t offset, struct object_info *);
|
||||
|
||||
void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
|
||||
const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1);
|
||||
void mark_bad_packed_object(struct packed_git *, const struct object_id *);
|
||||
const struct packed_git *has_packed_and_bad(struct repository *, const struct object_id *);
|
||||
|
||||
#define ON_DISK_KEEP_PACKS 1
|
||||
#define IN_CORE_KEEP_PACKS 2
|
||||
|
|
Loading…
Reference in New Issue