2019-01-24 09:29:12 +01:00
|
|
|
#define USE_THE_INDEX_COMPATIBILITY_MACROS
|
2007-07-15 01:14:45 +02:00
|
|
|
#include "builtin.h"
|
2005-04-30 18:59:31 +02:00
|
|
|
#include "cache.h"
|
2018-03-23 18:20:57 +01:00
|
|
|
#include "repository.h"
|
2017-06-14 20:07:36 +02:00
|
|
|
#include "config.h"
|
2005-04-18 20:39:48 +02:00
|
|
|
#include "commit.h"
|
|
|
|
#include "tree.h"
|
|
|
|
#include "blob.h"
|
2005-04-28 16:46:33 +02:00
|
|
|
#include "tag.h"
|
2005-07-03 19:01:38 +02:00
|
|
|
#include "refs.h"
|
2005-06-29 11:51:27 +02:00
|
|
|
#include "pack.h"
|
2006-04-26 01:37:08 +02:00
|
|
|
#include "cache-tree.h"
|
2006-05-29 21:19:02 +02:00
|
|
|
#include "tree-walk.h"
|
2008-02-25 22:46:05 +01:00
|
|
|
#include "fsck.h"
|
2007-10-15 22:34:05 +02:00
|
|
|
#include "parse-options.h"
|
2009-01-10 13:07:50 +01:00
|
|
|
#include "dir.h"
|
2011-11-07 03:59:26 +01:00
|
|
|
#include "progress.h"
|
2012-03-07 11:54:20 +01:00
|
|
|
#include "streaming.h"
|
2016-07-17 13:00:02 +02:00
|
|
|
#include "decorate.h"
|
2017-08-19 00:20:19 +02:00
|
|
|
#include "packfile.h"
|
2018-03-23 18:20:56 +01:00
|
|
|
#include "object-store.h"
|
2018-06-27 15:24:43 +02:00
|
|
|
#include "run-command.h"
|
2018-10-21 10:08:58 +02:00
|
|
|
#include "worktree.h"
|
2005-04-18 20:39:48 +02:00
|
|
|
|
|
|
|
#define REACHABLE 0x0001
|
2006-05-29 21:18:33 +02:00
|
|
|
#define SEEN 0x0002
|
2013-06-06 00:37:39 +02:00
|
|
|
#define HAS_OBJ 0x0004
|
2017-07-20 02:21:44 +02:00
|
|
|
/* This flag is set if something points to this object. */
|
|
|
|
#define USED 0x0008
|
2005-04-13 18:57:30 +02:00
|
|
|
|
2006-08-15 19:23:48 +02:00
|
|
|
static int show_root;
|
|
|
|
static int show_tags;
|
|
|
|
static int show_unreachable;
|
2007-04-04 16:46:14 +02:00
|
|
|
static int include_reflogs = 1;
|
2009-10-20 20:46:55 +02:00
|
|
|
static int check_full = 1;
|
2015-06-22 17:27:12 +02:00
|
|
|
static int connectivity_only;
|
2006-08-15 19:23:48 +02:00
|
|
|
static int check_strict;
|
|
|
|
static int keep_cache_objects;
|
2015-06-22 17:25:00 +02:00
|
|
|
static struct fsck_options fsck_walk_options = FSCK_OPTIONS_DEFAULT;
|
|
|
|
static struct fsck_options fsck_obj_options = FSCK_OPTIONS_DEFAULT;
|
2007-03-05 09:22:06 +01:00
|
|
|
static int errors_found;
|
2007-07-03 02:33:54 +02:00
|
|
|
static int write_lost_and_found;
|
2007-06-05 04:44:00 +02:00
|
|
|
static int verbose;
|
2011-11-07 03:59:26 +01:00
|
|
|
static int show_progress = -1;
|
2012-02-28 23:55:39 +01:00
|
|
|
static int show_dangling = 1;
|
2016-07-17 13:00:02 +02:00
|
|
|
static int name_objects;
|
2007-03-05 09:22:06 +01:00
|
|
|
#define ERROR_OBJECT 01
|
|
|
|
#define ERROR_REACHABLE 02
|
2011-11-07 03:59:23 +01:00
|
|
|
#define ERROR_PACK 04
|
2015-09-23 22:46:39 +02:00
|
|
|
#define ERROR_REFS 010
|
2018-06-27 15:24:43 +02:00
|
|
|
#define ERROR_COMMIT_GRAPH 020
|
2005-04-13 18:57:30 +02:00
|
|
|
|
2016-07-17 12:59:44 +02:00
|
|
|
static const char *describe_object(struct object *obj)
|
|
|
|
{
|
2018-11-10 06:16:14 +01:00
|
|
|
static struct strbuf bufs[] = {
|
|
|
|
STRBUF_INIT, STRBUF_INIT, STRBUF_INIT, STRBUF_INIT
|
|
|
|
};
|
|
|
|
static int b = 0;
|
|
|
|
struct strbuf *buf;
|
|
|
|
char *name = NULL;
|
2016-07-17 13:00:02 +02:00
|
|
|
|
2018-11-10 06:16:14 +01:00
|
|
|
if (name_objects)
|
|
|
|
name = lookup_decoration(fsck_walk_options.object_names, obj);
|
|
|
|
|
|
|
|
buf = bufs + b;
|
|
|
|
b = (b + 1) % ARRAY_SIZE(bufs);
|
|
|
|
strbuf_reset(buf);
|
|
|
|
strbuf_addstr(buf, oid_to_hex(&obj->oid));
|
2016-07-17 13:00:02 +02:00
|
|
|
if (name)
|
2018-11-10 06:16:14 +01:00
|
|
|
strbuf_addf(buf, " (%s)", name);
|
2016-07-17 13:00:02 +02:00
|
|
|
|
2018-11-10 06:16:14 +01:00
|
|
|
return buf->buf;
|
2016-07-17 12:59:44 +02:00
|
|
|
}
|
|
|
|
|
2017-01-26 05:11:00 +01:00
|
|
|
static const char *printable_type(struct object *obj)
|
|
|
|
{
|
|
|
|
const char *ret;
|
|
|
|
|
fsck: lazily load types under --connectivity-only
The recent fixes to "fsck --connectivity-only" load all of
the objects with their correct types. This keeps the
connectivity-only code path close to the regular one, but it
also introduces some unnecessary inefficiency. While getting
the type of an object is cheap compared to actually opening
and parsing the object (as the non-connectivity-only case
would do), it's still not free.
For reachable non-blob objects, we end up having to parse
them later anyway (to see what they point to), making our
type lookup here redundant.
For unreachable objects, we might never hit them at all in
the reachability traversal, making the lookup completely
wasted. And in some cases, we might have quite a few
unreachable objects (e.g., when alternates are used for
shared object storage between repositories, it's normal for
there to be objects reachable from other repositories but
not the one running fsck).
The comment in mark_object_for_connectivity() claims two
benefits to getting the type up front:
1. We need to know the types during fsck_walk(). (And not
explicitly mentioned, but we also need them when
printing the types of broken or dangling commits).
We can address this by lazy-loading the types as
necessary. Most objects never need this lazy-load at
all, because they fall into one of these categories:
a. Reachable from our tips, and are coerced into the
correct type as we traverse (e.g., a parent link
will call lookup_commit(), which converts OBJ_NONE
to OBJ_COMMIT).
b. Unreachable, but not at the tip of a chunk of
unreachable history. We only mention the tips as
"dangling", so an unreachable commit which links
to hundreds of other objects needs only report the
type of the tip commit.
2. It serves as a cross-check that the coercion in (1a) is
correct (i.e., we'll complain about a parent link that
points to a blob). But we get most of this for free
already, because right after coercing, we'll parse any
non-blob objects. So we'd notice then if we expected a
commit and got a blob.
The one exception is when we expect a blob, in which
case we never actually read the object contents.
So this is a slight weakening, but given that the whole
point of --connectivity-only is to sacrifice some data
integrity checks for speed, this seems like an
acceptable tradeoff.
Here are before and after timings for an extreme case with
~5M reachable objects and another ~12M unreachable (it's the
torvalds/linux repository on GitHub, connected to shared
storage for all of the other kernel forks):
[before]
$ time git fsck --no-dangling --connectivity-only
real 3m4.323s
user 1m25.121s
sys 1m38.710s
[after]
$ time git fsck --no-dangling --connectivity-only
real 0m51.497s
user 0m49.575s
sys 0m1.776s
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-26 05:12:07 +01:00
|
|
|
if (obj->type == OBJ_NONE) {
|
2018-04-25 20:20:59 +02:00
|
|
|
enum object_type type = oid_object_info(the_repository,
|
|
|
|
&obj->oid, NULL);
|
fsck: lazily load types under --connectivity-only
The recent fixes to "fsck --connectivity-only" load all of
the objects with their correct types. This keeps the
connectivity-only code path close to the regular one, but it
also introduces some unnecessary inefficiency. While getting
the type of an object is cheap compared to actually opening
and parsing the object (as the non-connectivity-only case
would do), it's still not free.
For reachable non-blob objects, we end up having to parse
them later anyway (to see what they point to), making our
type lookup here redundant.
For unreachable objects, we might never hit them at all in
the reachability traversal, making the lookup completely
wasted. And in some cases, we might have quite a few
unreachable objects (e.g., when alternates are used for
shared object storage between repositories, it's normal for
there to be objects reachable from other repositories but
not the one running fsck).
The comment in mark_object_for_connectivity() claims two
benefits to getting the type up front:
1. We need to know the types during fsck_walk(). (And not
explicitly mentioned, but we also need them when
printing the types of broken or dangling commits).
We can address this by lazy-loading the types as
necessary. Most objects never need this lazy-load at
all, because they fall into one of these categories:
a. Reachable from our tips, and are coerced into the
correct type as we traverse (e.g., a parent link
will call lookup_commit(), which converts OBJ_NONE
to OBJ_COMMIT).
b. Unreachable, but not at the tip of a chunk of
unreachable history. We only mention the tips as
"dangling", so an unreachable commit which links
to hundreds of other objects needs only report the
type of the tip commit.
2. It serves as a cross-check that the coercion in (1a) is
correct (i.e., we'll complain about a parent link that
points to a blob). But we get most of this for free
already, because right after coercing, we'll parse any
non-blob objects. So we'd notice then if we expected a
commit and got a blob.
The one exception is when we expect a blob, in which
case we never actually read the object contents.
So this is a slight weakening, but given that the whole
point of --connectivity-only is to sacrifice some data
integrity checks for speed, this seems like an
acceptable tradeoff.
Here are before and after timings for an extreme case with
~5M reachable objects and another ~12M unreachable (it's the
torvalds/linux repository on GitHub, connected to shared
storage for all of the other kernel forks):
[before]
$ time git fsck --no-dangling --connectivity-only
real 3m4.323s
user 1m25.121s
sys 1m38.710s
[after]
$ time git fsck --no-dangling --connectivity-only
real 0m51.497s
user 0m49.575s
sys 0m1.776s
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-26 05:12:07 +01:00
|
|
|
if (type > 0)
|
2018-06-29 03:21:54 +02:00
|
|
|
object_as_type(the_repository, obj, type, 0);
|
fsck: lazily load types under --connectivity-only
The recent fixes to "fsck --connectivity-only" load all of
the objects with their correct types. This keeps the
connectivity-only code path close to the regular one, but it
also introduces some unnecessary inefficiency. While getting
the type of an object is cheap compared to actually opening
and parsing the object (as the non-connectivity-only case
would do), it's still not free.
For reachable non-blob objects, we end up having to parse
them later anyway (to see what they point to), making our
type lookup here redundant.
For unreachable objects, we might never hit them at all in
the reachability traversal, making the lookup completely
wasted. And in some cases, we might have quite a few
unreachable objects (e.g., when alternates are used for
shared object storage between repositories, it's normal for
there to be objects reachable from other repositories but
not the one running fsck).
The comment in mark_object_for_connectivity() claims two
benefits to getting the type up front:
1. We need to know the types during fsck_walk(). (And not
explicitly mentioned, but we also need them when
printing the types of broken or dangling commits).
We can address this by lazy-loading the types as
necessary. Most objects never need this lazy-load at
all, because they fall into one of these categories:
a. Reachable from our tips, and are coerced into the
correct type as we traverse (e.g., a parent link
will call lookup_commit(), which converts OBJ_NONE
to OBJ_COMMIT).
b. Unreachable, but not at the tip of a chunk of
unreachable history. We only mention the tips as
"dangling", so an unreachable commit which links
to hundreds of other objects needs only report the
type of the tip commit.
2. It serves as a cross-check that the coercion in (1a) is
correct (i.e., we'll complain about a parent link that
points to a blob). But we get most of this for free
already, because right after coercing, we'll parse any
non-blob objects. So we'd notice then if we expected a
commit and got a blob.
The one exception is when we expect a blob, in which
case we never actually read the object contents.
So this is a slight weakening, but given that the whole
point of --connectivity-only is to sacrifice some data
integrity checks for speed, this seems like an
acceptable tradeoff.
Here are before and after timings for an extreme case with
~5M reachable objects and another ~12M unreachable (it's the
torvalds/linux repository on GitHub, connected to shared
storage for all of the other kernel forks):
[before]
$ time git fsck --no-dangling --connectivity-only
real 3m4.323s
user 1m25.121s
sys 1m38.710s
[after]
$ time git fsck --no-dangling --connectivity-only
real 0m51.497s
user 0m49.575s
sys 0m1.776s
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-26 05:12:07 +01:00
|
|
|
}
|
|
|
|
|
2018-02-14 19:59:24 +01:00
|
|
|
ret = type_name(obj->type);
|
2017-01-26 05:11:00 +01:00
|
|
|
if (!ret)
|
2018-11-10 06:16:15 +01:00
|
|
|
ret = _("unknown");
|
2017-01-26 05:11:00 +01:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-06-22 17:27:06 +02:00
|
|
|
static int fsck_config(const char *var, const char *value, void *cb)
|
2005-09-20 20:56:05 +02:00
|
|
|
{
|
2015-06-22 17:27:23 +02:00
|
|
|
if (strcmp(var, "fsck.skiplist") == 0) {
|
|
|
|
const char *path;
|
|
|
|
struct strbuf sb = STRBUF_INIT;
|
|
|
|
|
|
|
|
if (git_config_pathname(&path, var, value))
|
|
|
|
return 1;
|
|
|
|
strbuf_addf(&sb, "skiplist=%s", path);
|
|
|
|
free((char *)path);
|
|
|
|
fsck_set_msg_types(&fsck_obj_options, sb.buf);
|
|
|
|
strbuf_release(&sb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-22 17:27:06 +02:00
|
|
|
if (skip_prefix(var, "fsck.", &var)) {
|
|
|
|
fsck_set_msg_type(&fsck_obj_options, var, value);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return git_default_config(var, value, cb);
|
|
|
|
}
|
|
|
|
|
2015-06-22 17:25:09 +02:00
|
|
|
static int objerror(struct object *obj, const char *err)
|
2005-09-20 20:56:05 +02:00
|
|
|
{
|
2007-03-05 09:22:06 +01:00
|
|
|
errors_found |= ERROR_OBJECT;
|
2018-11-10 06:16:15 +01:00
|
|
|
/* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
|
|
|
|
fprintf_ln(stderr, _("error in %s %s: %s"),
|
2018-11-10 06:16:14 +01:00
|
|
|
printable_type(obj), describe_object(obj), err);
|
2005-09-20 20:56:05 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-07-17 12:59:57 +02:00
|
|
|
static int fsck_error_func(struct fsck_options *o,
|
|
|
|
struct object *obj, int type, const char *message)
|
2005-09-20 20:56:05 +02:00
|
|
|
{
|
2018-11-10 06:16:14 +01:00
|
|
|
switch (type) {
|
|
|
|
case FSCK_WARN:
|
2018-11-10 06:16:15 +01:00
|
|
|
/* TRANSLATORS: e.g. warning in tree 01bfda: <more explanation> */
|
|
|
|
fprintf_ln(stderr, _("warning in %s %s: %s"),
|
2018-11-10 06:16:14 +01:00
|
|
|
printable_type(obj), describe_object(obj), message);
|
|
|
|
return 0;
|
|
|
|
case FSCK_ERROR:
|
2018-11-10 06:16:15 +01:00
|
|
|
/* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
|
|
|
|
fprintf_ln(stderr, _("error in %s %s: %s"),
|
2018-11-10 06:16:14 +01:00
|
|
|
printable_type(obj), describe_object(obj), message);
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
BUG("%d (FSCK_IGNORE?) should never trigger this callback", type);
|
|
|
|
}
|
2005-09-20 20:56:05 +02:00
|
|
|
}
|
|
|
|
|
2008-12-11 04:44:37 +01:00
|
|
|
static struct object_array pending;
|
|
|
|
|
2015-06-22 17:25:00 +02:00
|
|
|
static int mark_object(struct object *obj, int type, void *data, struct fsck_options *options)
|
2008-02-25 22:46:05 +01:00
|
|
|
{
|
|
|
|
struct object *parent = data;
|
|
|
|
|
2011-01-26 21:46:55 +01:00
|
|
|
/*
|
|
|
|
* The only case data is NULL or type is OBJ_ANY is when
|
|
|
|
* mark_object_reachable() calls us. All the callers of
|
|
|
|
* that function has non-NULL obj hence ...
|
|
|
|
*/
|
2008-02-25 22:46:05 +01:00
|
|
|
if (!obj) {
|
2011-01-26 21:46:55 +01:00
|
|
|
/* ... these references to parent->fld are safe here */
|
2018-11-10 06:16:15 +01:00
|
|
|
printf_ln(_("broken link from %7s %s"),
|
|
|
|
printable_type(parent), describe_object(parent));
|
|
|
|
printf_ln(_("broken link from %7s %s"),
|
|
|
|
(type == OBJ_ANY ? _("unknown") : type_name(type)),
|
|
|
|
_("unknown"));
|
2008-02-25 22:46:05 +01:00
|
|
|
errors_found |= ERROR_REACHABLE;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type != OBJ_ANY && obj->type != type)
|
2011-01-26 21:46:55 +01:00
|
|
|
/* ... and the reference to parent is safe here */
|
2018-11-10 06:16:15 +01:00
|
|
|
objerror(parent, _("wrong object type in link"));
|
2008-02-25 22:46:05 +01:00
|
|
|
|
|
|
|
if (obj->flags & REACHABLE)
|
|
|
|
return 0;
|
|
|
|
obj->flags |= REACHABLE;
|
2017-12-05 17:58:46 +01:00
|
|
|
|
|
|
|
if (is_promisor_object(&obj->oid))
|
|
|
|
/*
|
|
|
|
* Further recursion does not need to be performed on this
|
|
|
|
* object since it is a promisor object (so it does not need to
|
|
|
|
* be added to "pending").
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
|
2013-06-06 00:37:39 +02:00
|
|
|
if (!(obj->flags & HAS_OBJ)) {
|
2015-11-10 03:22:28 +01:00
|
|
|
if (parent && !has_object_file(&obj->oid)) {
|
2018-11-10 06:16:15 +01:00
|
|
|
printf_ln(_("broken link from %7s %s\n"
|
|
|
|
" to %7s %s"),
|
2018-11-10 06:16:14 +01:00
|
|
|
printable_type(parent),
|
|
|
|
describe_object(parent),
|
|
|
|
printable_type(obj),
|
|
|
|
describe_object(obj));
|
2008-02-25 22:46:05 +01:00
|
|
|
errors_found |= ERROR_REACHABLE;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-05-25 11:08:11 +02:00
|
|
|
add_object_array(obj, NULL, &pending);
|
2008-12-11 04:44:37 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mark_object_reachable(struct object *obj)
|
|
|
|
{
|
2015-06-22 17:25:00 +02:00
|
|
|
mark_object(obj, OBJ_ANY, NULL, NULL);
|
2008-12-11 04:44:37 +01:00
|
|
|
}
|
|
|
|
|
2011-01-26 21:46:55 +01:00
|
|
|
static int traverse_one_object(struct object *obj)
|
2008-12-11 04:44:37 +01:00
|
|
|
{
|
2018-01-20 08:43:51 +01:00
|
|
|
int result = fsck_walk(obj, obj, &fsck_walk_options);
|
|
|
|
|
|
|
|
if (obj->type == OBJ_TREE) {
|
|
|
|
struct tree *tree = (struct tree *)obj;
|
|
|
|
free_tree_buffer(tree);
|
|
|
|
}
|
|
|
|
return result;
|
2008-02-25 22:46:05 +01:00
|
|
|
}
|
|
|
|
|
2008-12-11 04:44:37 +01:00
|
|
|
static int traverse_reachable(void)
|
2008-02-25 22:46:05 +01:00
|
|
|
{
|
2011-11-07 03:59:26 +01:00
|
|
|
struct progress *progress = NULL;
|
|
|
|
unsigned int nr = 0;
|
2008-12-11 04:44:37 +01:00
|
|
|
int result = 0;
|
2011-11-07 03:59:26 +01:00
|
|
|
if (show_progress)
|
progress: simplify "delayed" progress API
We used to expose the full power of the delayed progress API to the
callers, so that they can specify, not just the message to show and
expected total amount of work that is used to compute the percentage
of work performed so far, the percent-threshold parameter P and the
delay-seconds parameter N. The progress meter starts to show at N
seconds into the operation only if we have not yet completed P per-cent
of the total work.
Most callers used either (0%, 2s) or (50%, 1s) as (P, N), but there
are oddballs that chose more random-looking values like 95%.
For a smoother workload, (50%, 1s) would allow us to start showing
the progress meter earlier than (0%, 2s), while keeping the chance
of not showing progress meter for long running operation the same as
the latter. For a task that would take 2s or more to complete, it
is likely that less than half of it would complete within the first
second, if the workload is smooth. But for a spiky workload whose
earlier part is easier, such a setting is likely to fail to show the
progress meter entirely and (0%, 2s) is more appropriate.
But that is merely a theory. Realistically, it is of dubious value
to ask each codepath to carefully consider smoothness of their
workload and specify their own setting by passing two extra
parameters. Let's simplify the API by dropping both parameters and
have everybody use (0%, 2s).
Oh, by the way, the percent-threshold parameter and the structure
member were consistently misspelled, which also is now fixed ;-)
Helped-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-08-19 19:39:41 +02:00
|
|
|
progress = start_delayed_progress(_("Checking connectivity"), 0);
|
2008-12-11 04:44:37 +01:00
|
|
|
while (pending.nr) {
|
object_array: add and use `object_array_pop()`
In a couple of places, we pop objects off an object array `foo` by
decreasing `foo.nr`. We access `foo.nr` in many places, but most if not
all other times we do so read-only, e.g., as we iterate over the array.
But when we change `foo.nr` behind the array's back, it feels a bit
nasty and looks like it might leak memory.
Leaks happen if the popped element has an allocated `name` or `path`.
At the moment, that is not the case. Still, 1) the object array might
gain more fields that want to be freed, 2) a code path where we pop
might start using names or paths, 3) one of these code paths might be
copied to somewhere where we do, and 4) using a dedicated function for
popping is conceptually cleaner.
Introduce and use `object_array_pop()` instead. Release memory in the
new function. Document that popping an object leaves the associated
elements in limbo.
The converted places were identified by grepping for "\.nr\>" and
looking for "--".
Make the new function return NULL on an empty array. This is consistent
with `pop_commit()` and allows the following:
while ((o = object_array_pop(&foo)) != NULL) {
// do something
}
But as noted above, we don't need to go out of our way to avoid reading
`foo.nr`. This is probably more readable:
while (foo.nr) {
... o = object_array_pop(&foo);
// do something
}
The name of `object_array_pop()` does not quite align with
`add_object_array()`. That is unfortunate. On the other hand, it matches
`object_array_clear()`. Arguably it's `add_...` that is the odd one out,
since it reads like it's used to "add" an "object array". For that
reason, side with `object_array_clear()`.
Signed-off-by: Martin Ågren <martin.agren@gmail.com>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-23 01:34:53 +02:00
|
|
|
result |= traverse_one_object(object_array_pop(&pending));
|
2011-11-07 03:59:26 +01:00
|
|
|
display_progress(progress, ++nr);
|
2008-12-11 04:44:37 +01:00
|
|
|
}
|
2011-11-07 03:59:26 +01:00
|
|
|
stop_progress(&progress);
|
2008-12-11 04:44:37 +01:00
|
|
|
return !!result;
|
2008-02-25 22:46:05 +01:00
|
|
|
}
|
|
|
|
|
2015-06-22 17:25:00 +02:00
|
|
|
static int mark_used(struct object *obj, int type, void *data, struct fsck_options *options)
|
2008-02-25 22:46:05 +01:00
|
|
|
{
|
|
|
|
if (!obj)
|
|
|
|
return 1;
|
2017-07-20 02:21:44 +02:00
|
|
|
obj->flags |= USED;
|
2008-02-25 22:46:05 +01:00
|
|
|
return 0;
|
2005-09-20 20:56:05 +02:00
|
|
|
}
|
|
|
|
|
fsck: always compute USED flags for unreachable objects
The --connectivity-only option avoids opening every object, and instead
just marks reachable objects with a flag and compares this to the set
of all objects. This strategy is discussed in more detail in 3e3f8bd608
(fsck: prepare dummy objects for --connectivity-check, 2017-01-17).
This means that we report _every_ unreachable object as dangling.
Whereas in a full fsck, we'd have actually opened and parsed each of
those unreachable objects, marking their child objects with the USED
flag, to mean "this was mentioned by another object". And thus we can
report only the tip of an unreachable segment of the object graph as
dangling.
You can see this difference with a trivial example:
tree=$(git hash-object -t tree -w /dev/null)
one=$(echo one | git commit-tree $tree)
two=$(echo two | git commit-tree -p $one $tree)
Running `git fsck` will report only $two as dangling, but with
--connectivity-only, both commits (and the tree) are reported. Likewise,
using --lost-found would write all three objects.
We can make --connectivity-only work like the normal case by taking a
separate pass over the unreachable objects, parsing them and marking
objects they refer to as USED. That still avoids parsing any blobs,
though we do pay the cost to access any unreachable commits and trees
(which may or may not be noticeable, depending on how many you have).
If neither --dangling nor --lost-found is in effect, then we can skip
this step entirely, just like we do now. That makes "--connectivity-only
--no-dangling" just as fast as the current "--connectivity-only". I.e.,
we do the correct thing always, but you can still tweak the options to
make it faster if you don't care about dangling objects.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-05 05:47:39 +01:00
|
|
|
static void mark_unreachable_referents(const struct object_id *oid)
|
|
|
|
{
|
|
|
|
struct fsck_options options = FSCK_OPTIONS_DEFAULT;
|
2019-06-20 09:41:14 +02:00
|
|
|
struct object *obj = lookup_object(the_repository, oid);
|
fsck: always compute USED flags for unreachable objects
The --connectivity-only option avoids opening every object, and instead
just marks reachable objects with a flag and compares this to the set
of all objects. This strategy is discussed in more detail in 3e3f8bd608
(fsck: prepare dummy objects for --connectivity-check, 2017-01-17).
This means that we report _every_ unreachable object as dangling.
Whereas in a full fsck, we'd have actually opened and parsed each of
those unreachable objects, marking their child objects with the USED
flag, to mean "this was mentioned by another object". And thus we can
report only the tip of an unreachable segment of the object graph as
dangling.
You can see this difference with a trivial example:
tree=$(git hash-object -t tree -w /dev/null)
one=$(echo one | git commit-tree $tree)
two=$(echo two | git commit-tree -p $one $tree)
Running `git fsck` will report only $two as dangling, but with
--connectivity-only, both commits (and the tree) are reported. Likewise,
using --lost-found would write all three objects.
We can make --connectivity-only work like the normal case by taking a
separate pass over the unreachable objects, parsing them and marking
objects they refer to as USED. That still avoids parsing any blobs,
though we do pay the cost to access any unreachable commits and trees
(which may or may not be noticeable, depending on how many you have).
If neither --dangling nor --lost-found is in effect, then we can skip
this step entirely, just like we do now. That makes "--connectivity-only
--no-dangling" just as fast as the current "--connectivity-only". I.e.,
we do the correct thing always, but you can still tweak the options to
make it faster if you don't care about dangling objects.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-05 05:47:39 +01:00
|
|
|
|
|
|
|
if (!obj || !(obj->flags & HAS_OBJ))
|
|
|
|
return; /* not part of our original set */
|
|
|
|
if (obj->flags & REACHABLE)
|
|
|
|
return; /* reachable objects already traversed */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid passing OBJ_NONE to fsck_walk, which will parse the object
|
|
|
|
* (and we want to avoid parsing blobs).
|
|
|
|
*/
|
|
|
|
if (obj->type == OBJ_NONE) {
|
|
|
|
enum object_type type = oid_object_info(the_repository,
|
|
|
|
&obj->oid, NULL);
|
|
|
|
if (type > 0)
|
|
|
|
object_as_type(the_repository, obj, type, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
options.walk = mark_used;
|
|
|
|
fsck_walk(obj, NULL, &options);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mark_loose_unreachable_referents(const struct object_id *oid,
|
|
|
|
const char *path,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
mark_unreachable_referents(oid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mark_packed_unreachable_referents(const struct object_id *oid,
|
|
|
|
struct packed_git *pack,
|
|
|
|
uint32_t pos,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
mark_unreachable_referents(oid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-01-22 07:26:41 +01:00
|
|
|
/*
|
|
|
|
* Check a single reachable object
|
|
|
|
*/
|
|
|
|
static void check_reachable_object(struct object *obj)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We obviously want the object to be parsed,
|
|
|
|
* except if it was in a pack-file and we didn't
|
|
|
|
* do a full fsck
|
|
|
|
*/
|
2013-06-06 00:37:39 +02:00
|
|
|
if (!(obj->flags & HAS_OBJ)) {
|
2017-12-05 17:58:46 +01:00
|
|
|
if (is_promisor_object(&obj->oid))
|
|
|
|
return;
|
2018-05-02 02:25:33 +02:00
|
|
|
if (has_object_pack(&obj->oid))
|
2007-01-22 07:26:41 +01:00
|
|
|
return; /* it is in pack - forget about it */
|
2018-11-10 06:16:15 +01:00
|
|
|
printf_ln(_("missing %s %s"), printable_type(obj),
|
|
|
|
describe_object(obj));
|
2007-03-05 09:22:06 +01:00
|
|
|
errors_found |= ERROR_REACHABLE;
|
2007-01-22 07:26:41 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check a single unreachable object
|
|
|
|
*/
|
|
|
|
static void check_unreachable_object(struct object *obj)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Missing unreachable object? Ignore it. It's not like
|
|
|
|
* we miss it (since it can't be reached), nor do we want
|
|
|
|
* to complain about it being unreachable (since it does
|
|
|
|
* not exist).
|
|
|
|
*/
|
2017-01-16 22:25:35 +01:00
|
|
|
if (!(obj->flags & HAS_OBJ))
|
2007-01-22 07:26:41 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unreachable object that exists? Show it if asked to,
|
|
|
|
* since this is something that is prunable.
|
|
|
|
*/
|
|
|
|
if (show_unreachable) {
|
2018-11-10 06:16:15 +01:00
|
|
|
printf_ln(_("unreachable %s %s"), printable_type(obj),
|
|
|
|
describe_object(obj));
|
2007-01-22 07:26:41 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-07-20 02:21:44 +02:00
|
|
|
* "!USED" means that nothing at all points to it, including
|
2007-02-04 05:49:16 +01:00
|
|
|
* other unreachable objects. In other words, it's the "tip"
|
2007-01-22 07:26:41 +01:00
|
|
|
* of some set of unreachable objects, usually a commit that
|
|
|
|
* got dropped.
|
|
|
|
*
|
|
|
|
* Such starting points are more interesting than some random
|
|
|
|
* set of unreachable objects, so we show them even if the user
|
|
|
|
* hasn't asked for _all_ unreachable objects. If you have
|
|
|
|
* deleted a branch by mistake, this is a prime candidate to
|
|
|
|
* start looking at, for example.
|
|
|
|
*/
|
2017-07-20 02:21:44 +02:00
|
|
|
if (!(obj->flags & USED)) {
|
2012-02-28 23:55:39 +01:00
|
|
|
if (show_dangling)
|
2018-11-10 06:16:15 +01:00
|
|
|
printf_ln(_("dangling %s %s"), printable_type(obj),
|
|
|
|
describe_object(obj));
|
2007-07-03 02:33:54 +02:00
|
|
|
if (write_lost_and_found) {
|
2015-08-10 11:35:31 +02:00
|
|
|
char *filename = git_pathdup("lost-found/%s/%s",
|
2007-07-03 02:33:54 +02:00
|
|
|
obj->type == OBJ_COMMIT ? "commit" : "other",
|
2016-07-17 12:59:44 +02:00
|
|
|
describe_object(obj));
|
2007-07-03 02:33:54 +02:00
|
|
|
FILE *f;
|
|
|
|
|
2014-11-30 09:24:27 +01:00
|
|
|
if (safe_create_leading_directories_const(filename)) {
|
2018-11-10 06:16:15 +01:00
|
|
|
error(_("could not create lost-found"));
|
2015-08-10 11:35:31 +02:00
|
|
|
free(filename);
|
2007-07-03 02:33:54 +02:00
|
|
|
return;
|
|
|
|
}
|
2017-05-03 12:16:46 +02:00
|
|
|
f = xfopen(filename, "w");
|
2007-07-22 22:20:26 +02:00
|
|
|
if (obj->type == OBJ_BLOB) {
|
2016-09-05 22:07:59 +02:00
|
|
|
if (stream_blob_to_fd(fileno(f), &obj->oid, NULL, 1))
|
2018-11-10 06:16:15 +01:00
|
|
|
die_errno(_("could not write '%s'"), filename);
|
2007-07-22 22:20:26 +02:00
|
|
|
} else
|
2016-07-17 12:59:44 +02:00
|
|
|
fprintf(f, "%s\n", describe_object(obj));
|
2008-12-05 01:35:48 +01:00
|
|
|
if (fclose(f))
|
2018-11-10 06:16:15 +01:00
|
|
|
die_errno(_("could not finish '%s'"),
|
2009-06-27 17:58:46 +02:00
|
|
|
filename);
|
2015-08-10 11:35:31 +02:00
|
|
|
free(filename);
|
2007-07-03 02:33:54 +02:00
|
|
|
}
|
2007-01-22 07:26:41 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Otherwise? It's there, it's unreachable, and some other unreachable
|
|
|
|
* object points to it. Ignore it - it's not interesting, and we showed
|
|
|
|
* all the interesting cases above.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
static void check_object(struct object *obj)
|
|
|
|
{
|
2007-06-05 04:44:00 +02:00
|
|
|
if (verbose)
|
2018-11-10 06:16:15 +01:00
|
|
|
fprintf_ln(stderr, _("Checking %s"), describe_object(obj));
|
2007-06-05 04:44:00 +02:00
|
|
|
|
2007-01-22 07:26:41 +01:00
|
|
|
if (obj->flags & REACHABLE)
|
|
|
|
check_reachable_object(obj);
|
|
|
|
else
|
|
|
|
check_unreachable_object(obj);
|
|
|
|
}
|
2005-09-20 20:56:05 +02:00
|
|
|
|
2005-04-11 08:13:09 +02:00
|
|
|
static void check_connectivity(void)
|
|
|
|
{
|
2006-06-30 06:38:55 +02:00
|
|
|
int i, max;
|
2005-04-11 08:13:09 +02:00
|
|
|
|
2008-12-11 04:44:37 +01:00
|
|
|
/* Traverse the pending reachable objects */
|
|
|
|
traverse_reachable();
|
|
|
|
|
fsck: always compute USED flags for unreachable objects
The --connectivity-only option avoids opening every object, and instead
just marks reachable objects with a flag and compares this to the set
of all objects. This strategy is discussed in more detail in 3e3f8bd608
(fsck: prepare dummy objects for --connectivity-check, 2017-01-17).
This means that we report _every_ unreachable object as dangling.
Whereas in a full fsck, we'd have actually opened and parsed each of
those unreachable objects, marking their child objects with the USED
flag, to mean "this was mentioned by another object". And thus we can
report only the tip of an unreachable segment of the object graph as
dangling.
You can see this difference with a trivial example:
tree=$(git hash-object -t tree -w /dev/null)
one=$(echo one | git commit-tree $tree)
two=$(echo two | git commit-tree -p $one $tree)
Running `git fsck` will report only $two as dangling, but with
--connectivity-only, both commits (and the tree) are reported. Likewise,
using --lost-found would write all three objects.
We can make --connectivity-only work like the normal case by taking a
separate pass over the unreachable objects, parsing them and marking
objects they refer to as USED. That still avoids parsing any blobs,
though we do pay the cost to access any unreachable commits and trees
(which may or may not be noticeable, depending on how many you have).
If neither --dangling nor --lost-found is in effect, then we can skip
this step entirely, just like we do now. That makes "--connectivity-only
--no-dangling" just as fast as the current "--connectivity-only". I.e.,
we do the correct thing always, but you can still tweak the options to
make it faster if you don't care about dangling objects.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-05 05:47:39 +01:00
|
|
|
/*
|
|
|
|
* With --connectivity-only, we won't have actually opened and marked
|
|
|
|
* unreachable objects with USED. Do that now to make --dangling, etc
|
|
|
|
* accurate.
|
|
|
|
*/
|
|
|
|
if (connectivity_only && (show_dangling || write_lost_and_found)) {
|
|
|
|
/*
|
|
|
|
* Even though we already have a "struct object" for each of
|
|
|
|
* these in memory, we must not iterate over the internal
|
|
|
|
* object hash as we do below. Our loop would potentially
|
|
|
|
* resize the hash, making our iteration invalid.
|
|
|
|
*
|
|
|
|
* Instead, we'll just go back to the source list of objects,
|
|
|
|
* and ignore any that weren't present in our earlier
|
|
|
|
* traversal.
|
|
|
|
*/
|
|
|
|
for_each_loose_object(mark_loose_unreachable_referents, NULL, 0);
|
|
|
|
for_each_packed_object(mark_packed_unreachable_referents, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2005-04-11 08:13:09 +02:00
|
|
|
/* Look up all the requirements, warn about missing objects.. */
|
2006-06-30 06:38:55 +02:00
|
|
|
max = get_max_object_index();
|
2007-06-05 04:44:00 +02:00
|
|
|
if (verbose)
|
2018-11-10 06:16:15 +01:00
|
|
|
fprintf_ln(stderr, _("Checking connectivity (%d objects)"), max);
|
2007-06-05 04:44:00 +02:00
|
|
|
|
2006-06-30 06:38:55 +02:00
|
|
|
for (i = 0; i < max; i++) {
|
|
|
|
struct object *obj = get_indexed_object(i);
|
2005-04-11 08:13:09 +02:00
|
|
|
|
2007-01-22 07:26:41 +01:00
|
|
|
if (obj)
|
|
|
|
check_object(obj);
|
2005-04-11 08:13:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
static int fsck_obj(struct object *obj, void *buffer, unsigned long size)
|
2005-05-03 01:13:18 +02:00
|
|
|
{
|
2017-08-10 11:42:10 +02:00
|
|
|
int err;
|
|
|
|
|
2008-02-25 22:46:08 +01:00
|
|
|
if (obj->flags & SEEN)
|
2005-05-03 01:13:18 +02:00
|
|
|
return 0;
|
2008-02-25 22:46:08 +01:00
|
|
|
obj->flags |= SEEN;
|
2005-05-03 01:13:18 +02:00
|
|
|
|
2007-06-05 04:44:00 +02:00
|
|
|
if (verbose)
|
2018-11-10 06:16:15 +01:00
|
|
|
fprintf_ln(stderr, _("Checking %s %s"),
|
|
|
|
printable_type(obj), describe_object(obj));
|
2005-05-06 01:18:48 +02:00
|
|
|
|
2015-06-22 17:25:00 +02:00
|
|
|
if (fsck_walk(obj, NULL, &fsck_obj_options))
|
2018-11-10 06:16:15 +01:00
|
|
|
objerror(obj, _("broken links"));
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
err = fsck_object(obj, buffer, size, &fsck_obj_options);
|
2017-08-10 11:42:10 +02:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2005-04-09 02:11:14 +02:00
|
|
|
|
2008-02-25 22:46:08 +01:00
|
|
|
if (obj->type == OBJ_COMMIT) {
|
|
|
|
struct commit *commit = (struct commit *) obj;
|
2005-07-28 00:16:03 +02:00
|
|
|
|
2008-02-25 22:46:08 +01:00
|
|
|
if (!commit->parents && show_root)
|
2018-11-10 06:16:15 +01:00
|
|
|
printf_ln(_("root %s"),
|
|
|
|
describe_object(&commit->object));
|
2008-02-25 22:46:08 +01:00
|
|
|
}
|
2005-05-03 16:57:56 +02:00
|
|
|
|
2008-02-25 22:46:08 +01:00
|
|
|
if (obj->type == OBJ_TAG) {
|
|
|
|
struct tag *tag = (struct tag *) obj;
|
2007-06-05 04:44:00 +02:00
|
|
|
|
2008-02-25 22:46:08 +01:00
|
|
|
if (show_tags && tag->tagged) {
|
2018-11-10 06:16:15 +01:00
|
|
|
printf_ln(_("tagged %s %s (%s) in %s"),
|
2018-11-10 06:16:14 +01:00
|
|
|
printable_type(tag->tagged),
|
|
|
|
describe_object(tag->tagged),
|
|
|
|
tag->tag,
|
|
|
|
describe_object(&tag->object));
|
2008-02-25 22:46:08 +01:00
|
|
|
}
|
2005-05-03 16:57:56 +02:00
|
|
|
}
|
2005-04-26 01:31:13 +02:00
|
|
|
|
2017-08-10 11:42:10 +02:00
|
|
|
out:
|
|
|
|
if (obj->type == OBJ_TREE)
|
|
|
|
free_tree_buffer((struct tree *)obj);
|
|
|
|
if (obj->type == OBJ_COMMIT)
|
2018-12-15 01:09:40 +01:00
|
|
|
free_commit_buffer(the_repository->parsed_objects,
|
|
|
|
(struct commit *)obj);
|
2017-08-10 11:42:10 +02:00
|
|
|
return err;
|
2005-04-09 00:02:42 +02:00
|
|
|
}
|
|
|
|
|
2017-05-07 00:10:20 +02:00
|
|
|
static int fsck_obj_buffer(const struct object_id *oid, enum object_type type,
|
2011-11-07 03:59:25 +01:00
|
|
|
unsigned long size, void *buffer, int *eaten)
|
|
|
|
{
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 17:44:04 +02:00
|
|
|
/*
|
|
|
|
* Note, buffer may be NULL if type is OBJ_BLOB. See
|
|
|
|
* verify_packfile(), data_valid variable for details.
|
|
|
|
*/
|
2011-11-07 03:59:25 +01:00
|
|
|
struct object *obj;
|
2018-06-29 03:21:53 +02:00
|
|
|
obj = parse_object_buffer(the_repository, oid, type, size, buffer,
|
|
|
|
eaten);
|
2011-11-07 03:59:25 +01:00
|
|
|
if (!obj) {
|
|
|
|
errors_found |= ERROR_OBJECT;
|
2018-11-10 06:16:15 +01:00
|
|
|
return error(_("%s: object corrupt or missing"),
|
|
|
|
oid_to_hex(oid));
|
2011-11-07 03:59:25 +01:00
|
|
|
}
|
2017-07-20 02:21:44 +02:00
|
|
|
obj->flags &= ~(REACHABLE | SEEN);
|
|
|
|
obj->flags |= HAS_OBJ;
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
return fsck_obj(obj, buffer, size);
|
2011-11-07 03:59:25 +01:00
|
|
|
}
|
|
|
|
|
2006-08-15 19:23:48 +02:00
|
|
|
static int default_refs;
|
2005-07-03 19:01:38 +02:00
|
|
|
|
2017-02-22 00:47:32 +01:00
|
|
|
static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid,
|
2017-04-26 21:29:31 +02:00
|
|
|
timestamp_t timestamp)
|
2006-12-18 10:36:16 +01:00
|
|
|
{
|
|
|
|
struct object *obj;
|
|
|
|
|
2017-02-22 00:47:32 +01:00
|
|
|
if (!is_null_oid(oid)) {
|
2019-06-20 09:41:14 +02:00
|
|
|
obj = lookup_object(the_repository, oid);
|
2017-01-16 22:34:57 +01:00
|
|
|
if (obj && (obj->flags & HAS_OBJ)) {
|
2016-07-17 13:00:02 +02:00
|
|
|
if (timestamp && name_objects)
|
|
|
|
add_decoration(fsck_walk_options.object_names,
|
|
|
|
obj,
|
2017-04-21 12:45:48 +02:00
|
|
|
xstrfmt("%s@{%"PRItime"}", refname, timestamp));
|
2017-07-20 02:21:44 +02:00
|
|
|
obj->flags |= USED;
|
2008-02-25 22:46:05 +01:00
|
|
|
mark_object_reachable(obj);
|
2017-12-05 17:58:44 +01:00
|
|
|
} else if (!is_promisor_object(oid)) {
|
2018-11-10 06:16:15 +01:00
|
|
|
error(_("%s: invalid reflog entry %s"),
|
|
|
|
refname, oid_to_hex(oid));
|
2015-06-08 15:40:05 +02:00
|
|
|
errors_found |= ERROR_REACHABLE;
|
2006-12-18 10:36:16 +01:00
|
|
|
}
|
|
|
|
}
|
2015-06-08 15:40:04 +02:00
|
|
|
}
|
|
|
|
|
2017-02-22 00:47:32 +01:00
|
|
|
static int fsck_handle_reflog_ent(struct object_id *ooid, struct object_id *noid,
|
2017-04-26 21:29:31 +02:00
|
|
|
const char *email, timestamp_t timestamp, int tz,
|
2007-01-08 01:59:54 +01:00
|
|
|
const char *message, void *cb_data)
|
2006-12-18 10:36:16 +01:00
|
|
|
{
|
2015-06-08 15:40:05 +02:00
|
|
|
const char *refname = cb_data;
|
2006-12-18 10:36:16 +01:00
|
|
|
|
2007-06-05 04:44:00 +02:00
|
|
|
if (verbose)
|
2018-11-10 06:16:15 +01:00
|
|
|
fprintf_ln(stderr, _("Checking reflog %s->%s"),
|
|
|
|
oid_to_hex(ooid), oid_to_hex(noid));
|
2007-06-05 04:44:00 +02:00
|
|
|
|
2017-02-22 00:47:32 +01:00
|
|
|
fsck_handle_reflog_oid(refname, ooid, 0);
|
|
|
|
fsck_handle_reflog_oid(refname, noid, timestamp);
|
2006-12-18 10:36:16 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-25 20:38:50 +02:00
|
|
|
static int fsck_handle_reflog(const char *logname, const struct object_id *oid,
|
|
|
|
int flag, void *cb_data)
|
2007-02-03 19:25:43 +01:00
|
|
|
{
|
2018-10-21 10:08:58 +02:00
|
|
|
struct strbuf refname = STRBUF_INIT;
|
|
|
|
|
|
|
|
strbuf_worktree_ref(cb_data, &refname, logname);
|
|
|
|
for_each_reflog_ent(refname.buf, fsck_handle_reflog_ent, refname.buf);
|
|
|
|
strbuf_release(&refname);
|
2007-02-03 19:25:43 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-25 20:38:50 +02:00
|
|
|
static int fsck_handle_ref(const char *refname, const struct object_id *oid,
|
|
|
|
int flag, void *cb_data)
|
2005-05-18 19:16:14 +02:00
|
|
|
{
|
|
|
|
struct object *obj;
|
|
|
|
|
2018-06-29 03:21:51 +02:00
|
|
|
obj = parse_object(the_repository, oid);
|
2005-06-28 23:58:33 +02:00
|
|
|
if (!obj) {
|
2017-12-05 17:58:45 +01:00
|
|
|
if (is_promisor_object(oid)) {
|
|
|
|
/*
|
|
|
|
* Increment default_refs anyway, because this is a
|
|
|
|
* valid ref.
|
|
|
|
*/
|
|
|
|
default_refs++;
|
|
|
|
return 0;
|
|
|
|
}
|
2018-11-10 06:16:15 +01:00
|
|
|
error(_("%s: invalid sha1 pointer %s"),
|
|
|
|
refname, oid_to_hex(oid));
|
2014-09-12 05:38:30 +02:00
|
|
|
errors_found |= ERROR_REACHABLE;
|
2005-07-03 19:01:38 +02:00
|
|
|
/* We'll continue with the rest despite the error.. */
|
|
|
|
return 0;
|
2005-06-28 23:58:33 +02:00
|
|
|
}
|
2015-09-23 22:46:39 +02:00
|
|
|
if (obj->type != OBJ_COMMIT && is_branch(refname)) {
|
2018-11-10 06:16:15 +01:00
|
|
|
error(_("%s: not a commit"), refname);
|
2015-09-23 22:46:39 +02:00
|
|
|
errors_found |= ERROR_REFS;
|
|
|
|
}
|
2005-07-03 19:01:38 +02:00
|
|
|
default_refs++;
|
2017-07-20 02:21:44 +02:00
|
|
|
obj->flags |= USED;
|
2016-07-17 13:00:02 +02:00
|
|
|
if (name_objects)
|
|
|
|
add_decoration(fsck_walk_options.object_names,
|
|
|
|
obj, xstrdup(refname));
|
2008-02-25 22:46:05 +01:00
|
|
|
mark_object_reachable(obj);
|
2006-12-18 10:36:16 +01:00
|
|
|
|
2005-05-20 16:49:17 +02:00
|
|
|
return 0;
|
2005-05-18 19:16:14 +02:00
|
|
|
}
|
|
|
|
|
2018-10-21 10:08:58 +02:00
|
|
|
static int fsck_head_link(const char *head_ref_name,
|
|
|
|
const char **head_points_at,
|
2018-10-21 10:08:57 +02:00
|
|
|
struct object_id *head_oid);
|
|
|
|
|
2005-05-18 19:16:14 +02:00
|
|
|
static void get_default_heads(void)
|
|
|
|
{
|
2018-10-21 10:08:58 +02:00
|
|
|
struct worktree **worktrees, **p;
|
2018-10-21 10:08:57 +02:00
|
|
|
const char *head_points_at;
|
|
|
|
struct object_id head_oid;
|
|
|
|
|
2015-05-25 20:38:50 +02:00
|
|
|
for_each_rawref(fsck_handle_ref, NULL);
|
2018-10-21 10:08:58 +02:00
|
|
|
|
|
|
|
worktrees = get_worktrees(0);
|
|
|
|
for (p = worktrees; *p; p++) {
|
|
|
|
struct worktree *wt = *p;
|
|
|
|
struct strbuf ref = STRBUF_INIT;
|
|
|
|
|
|
|
|
strbuf_worktree_ref(wt, &ref, "HEAD");
|
|
|
|
fsck_head_link(ref.buf, &head_points_at, &head_oid);
|
|
|
|
if (head_points_at && !is_null_oid(&head_oid))
|
|
|
|
fsck_handle_ref(ref.buf, &head_oid, 0, NULL);
|
|
|
|
strbuf_release(&ref);
|
|
|
|
|
|
|
|
if (include_reflogs)
|
|
|
|
refs_for_each_reflog(get_worktree_ref_store(wt),
|
|
|
|
fsck_handle_reflog, wt);
|
|
|
|
}
|
|
|
|
free_worktrees(worktrees);
|
2006-08-29 20:47:30 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Not having any default heads isn't really fatal, but
|
|
|
|
* it does mean that "--unreachable" no longer makes any
|
|
|
|
* sense (since in this case everything will obviously
|
|
|
|
* be unreachable by definition.
|
|
|
|
*
|
|
|
|
* Showing dangling objects is valid, though (as those
|
|
|
|
* dangling objects are likely lost heads).
|
|
|
|
*
|
|
|
|
* So we just print a warning about it, and clear the
|
|
|
|
* "show_unreachable" flag.
|
|
|
|
*/
|
|
|
|
if (!default_refs) {
|
2018-11-10 06:16:15 +01:00
|
|
|
fprintf_ln(stderr, _("notice: No default references"));
|
2006-08-29 20:47:30 +02:00
|
|
|
show_unreachable = 0;
|
|
|
|
}
|
2005-05-18 19:16:14 +02:00
|
|
|
}
|
|
|
|
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
static int fsck_loose(const struct object_id *oid, const char *path, void *data)
|
fsck: parse loose object paths directly
When we iterate over the list of loose objects to check, we
get the actual path of each object. But we then throw it
away and pass just the sha1 to fsck_sha1(), which will do a
fresh lookup. Usually it would find the same object, but it
may not if an object exists both as a loose and a packed
object. We may end up checking the packed object twice, and
never look at the loose one.
In practice this isn't too terrible, because if fsck doesn't
complain, it means you have at least one good copy. But
since the point of fsck is to look for corruption, we should
be thorough.
The new read_loose_object() interface can help us get the
data from disk, and then we replace parse_object() with
parse_object_buffer(). As a bonus, our error messages now
mention the path to a corrupted object, which should make it
easier to track down errors when they do happen.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-13 18:59:44 +01:00
|
|
|
{
|
|
|
|
struct object *obj;
|
|
|
|
enum object_type type;
|
|
|
|
unsigned long size;
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
void *contents;
|
fsck: parse loose object paths directly
When we iterate over the list of loose objects to check, we
get the actual path of each object. But we then throw it
away and pass just the sha1 to fsck_sha1(), which will do a
fresh lookup. Usually it would find the same object, but it
may not if an object exists both as a loose and a packed
object. We may end up checking the packed object twice, and
never look at the loose one.
In practice this isn't too terrible, because if fsck doesn't
complain, it means you have at least one good copy. But
since the point of fsck is to look for corruption, we should
be thorough.
The new read_loose_object() interface can help us get the
data from disk, and then we replace parse_object() with
parse_object_buffer(). As a bonus, our error messages now
mention the path to a corrupted object, which should make it
easier to track down errors when they do happen.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-13 18:59:44 +01:00
|
|
|
int eaten;
|
|
|
|
|
2018-05-29 10:09:58 +02:00
|
|
|
if (read_loose_object(path, oid, &type, &size, &contents) < 0) {
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
errors_found |= ERROR_OBJECT;
|
2018-11-10 06:16:15 +01:00
|
|
|
error(_("%s: object corrupt or missing: %s"),
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
oid_to_hex(oid), path);
|
|
|
|
return 0; /* keep checking other objects */
|
|
|
|
}
|
fsck: parse loose object paths directly
When we iterate over the list of loose objects to check, we
get the actual path of each object. But we then throw it
away and pass just the sha1 to fsck_sha1(), which will do a
fresh lookup. Usually it would find the same object, but it
may not if an object exists both as a loose and a packed
object. We may end up checking the packed object twice, and
never look at the loose one.
In practice this isn't too terrible, because if fsck doesn't
complain, it means you have at least one good copy. But
since the point of fsck is to look for corruption, we should
be thorough.
The new read_loose_object() interface can help us get the
data from disk, and then we replace parse_object() with
parse_object_buffer(). As a bonus, our error messages now
mention the path to a corrupted object, which should make it
easier to track down errors when they do happen.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-13 18:59:44 +01:00
|
|
|
|
|
|
|
if (!contents && type != OBJ_BLOB)
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
BUG("read_loose_object streamed a non-blob");
|
fsck: parse loose object paths directly
When we iterate over the list of loose objects to check, we
get the actual path of each object. But we then throw it
away and pass just the sha1 to fsck_sha1(), which will do a
fresh lookup. Usually it would find the same object, but it
may not if an object exists both as a loose and a packed
object. We may end up checking the packed object twice, and
never look at the loose one.
In practice this isn't too terrible, because if fsck doesn't
complain, it means you have at least one good copy. But
since the point of fsck is to look for corruption, we should
be thorough.
The new read_loose_object() interface can help us get the
data from disk, and then we replace parse_object() with
parse_object_buffer(). As a bonus, our error messages now
mention the path to a corrupted object, which should make it
easier to track down errors when they do happen.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-13 18:59:44 +01:00
|
|
|
|
2018-06-29 03:21:53 +02:00
|
|
|
obj = parse_object_buffer(the_repository, oid, type, size,
|
|
|
|
contents, &eaten);
|
|
|
|
|
fsck: parse loose object paths directly
When we iterate over the list of loose objects to check, we
get the actual path of each object. But we then throw it
away and pass just the sha1 to fsck_sha1(), which will do a
fresh lookup. Usually it would find the same object, but it
may not if an object exists both as a loose and a packed
object. We may end up checking the packed object twice, and
never look at the loose one.
In practice this isn't too terrible, because if fsck doesn't
complain, it means you have at least one good copy. But
since the point of fsck is to look for corruption, we should
be thorough.
The new read_loose_object() interface can help us get the
data from disk, and then we replace parse_object() with
parse_object_buffer(). As a bonus, our error messages now
mention the path to a corrupted object, which should make it
easier to track down errors when they do happen.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-13 18:59:44 +01:00
|
|
|
if (!obj) {
|
|
|
|
errors_found |= ERROR_OBJECT;
|
2018-11-10 06:16:15 +01:00
|
|
|
error(_("%s: object could not be parsed: %s"),
|
2017-02-22 00:47:35 +01:00
|
|
|
oid_to_hex(oid), path);
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
if (!eaten)
|
|
|
|
free(contents);
|
fsck: parse loose object paths directly
When we iterate over the list of loose objects to check, we
get the actual path of each object. But we then throw it
away and pass just the sha1 to fsck_sha1(), which will do a
fresh lookup. Usually it would find the same object, but it
may not if an object exists both as a loose and a packed
object. We may end up checking the packed object twice, and
never look at the loose one.
In practice this isn't too terrible, because if fsck doesn't
complain, it means you have at least one good copy. But
since the point of fsck is to look for corruption, we should
be thorough.
The new read_loose_object() interface can help us get the
data from disk, and then we replace parse_object() with
parse_object_buffer(). As a bonus, our error messages now
mention the path to a corrupted object, which should make it
easier to track down errors when they do happen.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-13 18:59:44 +01:00
|
|
|
return 0; /* keep checking other objects */
|
|
|
|
}
|
|
|
|
|
2017-07-20 02:21:44 +02:00
|
|
|
obj->flags &= ~(REACHABLE | SEEN);
|
|
|
|
obj->flags |= HAS_OBJ;
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
if (fsck_obj(obj, contents, size))
|
2015-09-24 23:08:33 +02:00
|
|
|
errors_found |= ERROR_OBJECT;
|
fsck: actually fsck blob data
Because fscking a blob has always been a noop, we didn't
bother passing around the blob data. In preparation for
content-level checks, let's fix up a few things:
1. The fsck_object() function just returns success for any
blob. Let's a noop fsck_blob(), which we can fill in
with actual logic later.
2. The fsck_loose() function in builtin/fsck.c
just threw away blob content after loading it. Let's
hold onto it until after we've called fsck_object().
The easiest way to do this is to just drop the
parse_loose_object() helper entirely. Incidentally,
this also fixes a memory leak: if we successfully
loaded the object data but did not parse it, we would
have left the function without freeing it.
3. When fsck_loose() loads the object data, it
does so with a custom read_loose_object() helper. This
function streams any blobs, regardless of size, under
the assumption that we're only checking the sha1.
Instead, let's actually load blobs smaller than
big_file_threshold, as the normal object-reading
code-paths would do. This lets us fsck small files, and
a NULL return is an indication that the blob was so big
that it needed to be streamed, and we can pass that
information along to fsck_blob().
Signed-off-by: Jeff King <peff@peff.net>
2018-05-02 21:44:51 +02:00
|
|
|
|
|
|
|
if (!eaten)
|
|
|
|
free(contents);
|
|
|
|
return 0; /* keep checking other objects, even if we saw an error */
|
2015-09-24 23:08:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int fsck_cruft(const char *basename, const char *path, void *data)
|
|
|
|
{
|
|
|
|
if (!starts_with(basename, "tmp_obj_"))
|
2018-11-10 06:16:15 +01:00
|
|
|
fprintf_ln(stderr, _("bad sha1 file: %s"), path);
|
2015-09-24 23:08:33 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-24 16:09:39 +02:00
|
|
|
static int fsck_subdir(unsigned int nr, const char *path, void *progress)
|
2015-09-24 23:08:33 +02:00
|
|
|
{
|
|
|
|
display_progress(progress, nr + 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-06-28 23:58:33 +02:00
|
|
|
static void fsck_object_dir(const char *path)
|
|
|
|
{
|
2011-11-07 03:59:26 +01:00
|
|
|
struct progress *progress = NULL;
|
2007-06-05 04:44:00 +02:00
|
|
|
|
|
|
|
if (verbose)
|
2018-11-10 06:16:15 +01:00
|
|
|
fprintf_ln(stderr, _("Checking object directory"));
|
2007-06-05 04:44:00 +02:00
|
|
|
|
2011-11-07 03:59:26 +01:00
|
|
|
if (show_progress)
|
2014-02-21 13:50:18 +01:00
|
|
|
progress = start_progress(_("Checking object directories"), 256);
|
2015-09-24 23:08:33 +02:00
|
|
|
|
|
|
|
for_each_loose_file_in_objdir(path, fsck_loose, fsck_cruft, fsck_subdir,
|
|
|
|
progress);
|
|
|
|
display_progress(progress, 256);
|
2011-11-07 03:59:26 +01:00
|
|
|
stop_progress(&progress);
|
2005-06-28 23:58:33 +02:00
|
|
|
}
|
|
|
|
|
2018-10-21 10:08:58 +02:00
|
|
|
static int fsck_head_link(const char *head_ref_name,
|
|
|
|
const char **head_points_at,
|
2018-10-21 10:08:57 +02:00
|
|
|
struct object_id *head_oid)
|
2005-07-03 19:40:38 +02:00
|
|
|
{
|
2007-04-11 10:28:43 +02:00
|
|
|
int null_is_error = 0;
|
|
|
|
|
2007-06-05 04:44:00 +02:00
|
|
|
if (verbose)
|
2019-01-04 22:33:31 +01:00
|
|
|
fprintf_ln(stderr, _("Checking %s link"), head_ref_name);
|
2007-06-05 04:44:00 +02:00
|
|
|
|
2018-10-21 10:08:58 +02:00
|
|
|
*head_points_at = resolve_ref_unsafe(head_ref_name, 0, head_oid, NULL);
|
2018-10-21 10:08:57 +02:00
|
|
|
if (!*head_points_at) {
|
2015-09-23 22:46:39 +02:00
|
|
|
errors_found |= ERROR_REFS;
|
2019-01-04 22:33:31 +01:00
|
|
|
return error(_("invalid %s"), head_ref_name);
|
2015-09-23 22:46:39 +02:00
|
|
|
}
|
2018-10-21 10:08:58 +02:00
|
|
|
if (!strcmp(*head_points_at, head_ref_name))
|
2007-04-11 10:28:43 +02:00
|
|
|
/* detached HEAD */
|
|
|
|
null_is_error = 1;
|
2018-10-21 10:08:57 +02:00
|
|
|
else if (!starts_with(*head_points_at, "refs/heads/")) {
|
2015-09-23 22:46:39 +02:00
|
|
|
errors_found |= ERROR_REFS;
|
2019-01-04 22:33:31 +01:00
|
|
|
return error(_("%s points to something strange (%s)"),
|
2018-10-21 10:08:58 +02:00
|
|
|
head_ref_name, *head_points_at);
|
2015-09-23 22:46:39 +02:00
|
|
|
}
|
2018-10-21 10:08:57 +02:00
|
|
|
if (is_null_oid(head_oid)) {
|
2015-09-23 22:46:39 +02:00
|
|
|
if (null_is_error) {
|
|
|
|
errors_found |= ERROR_REFS;
|
2019-01-04 22:33:31 +01:00
|
|
|
return error(_("%s: detached HEAD points at nothing"),
|
2018-10-21 10:08:58 +02:00
|
|
|
head_ref_name);
|
2015-09-23 22:46:39 +02:00
|
|
|
}
|
2019-01-04 22:33:31 +01:00
|
|
|
fprintf_ln(stderr,
|
|
|
|
_("notice: %s points to an unborn branch (%s)"),
|
|
|
|
head_ref_name, *head_points_at + 11);
|
2007-04-11 10:28:43 +02:00
|
|
|
}
|
2005-07-03 19:40:38 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-04-26 01:37:08 +02:00
|
|
|
static int fsck_cache_tree(struct cache_tree *it)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int err = 0;
|
|
|
|
|
2007-06-05 04:44:00 +02:00
|
|
|
if (verbose)
|
2018-11-10 06:16:15 +01:00
|
|
|
fprintf_ln(stderr, _("Checking cache tree"));
|
2007-06-05 04:44:00 +02:00
|
|
|
|
2006-04-26 01:37:08 +02:00
|
|
|
if (0 <= it->entry_count) {
|
2018-06-29 03:21:51 +02:00
|
|
|
struct object *obj = parse_object(the_repository, &it->oid);
|
2006-05-04 06:17:45 +02:00
|
|
|
if (!obj) {
|
2018-11-10 06:16:15 +01:00
|
|
|
error(_("%s: invalid sha1 pointer in cache-tree"),
|
2017-05-01 04:28:56 +02:00
|
|
|
oid_to_hex(&it->oid));
|
2015-09-23 22:46:39 +02:00
|
|
|
errors_found |= ERROR_REFS;
|
2006-05-04 06:17:45 +02:00
|
|
|
return 1;
|
|
|
|
}
|
2017-07-20 02:21:44 +02:00
|
|
|
obj->flags |= USED;
|
2016-07-17 13:00:02 +02:00
|
|
|
if (name_objects)
|
|
|
|
add_decoration(fsck_walk_options.object_names,
|
|
|
|
obj, xstrdup(":"));
|
2011-01-26 21:46:55 +01:00
|
|
|
mark_object_reachable(obj);
|
2006-07-12 05:45:31 +02:00
|
|
|
if (obj->type != OBJ_TREE)
|
2018-11-10 06:16:15 +01:00
|
|
|
err |= objerror(obj, _("non-tree in cache-tree"));
|
2006-04-26 01:37:08 +02:00
|
|
|
}
|
|
|
|
for (i = 0; i < it->subtree_nr; i++)
|
|
|
|
err |= fsck_cache_tree(it->down[i]->cache_tree);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-02-22 00:47:35 +01:00
|
|
|
static void mark_object_for_connectivity(const struct object_id *oid)
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
{
|
2019-06-20 09:41:10 +02:00
|
|
|
struct object *obj = lookup_unknown_object(oid);
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
obj->flags |= HAS_OBJ;
|
|
|
|
}
|
|
|
|
|
2017-02-22 00:47:35 +01:00
|
|
|
static int mark_loose_for_connectivity(const struct object_id *oid,
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
const char *path,
|
|
|
|
void *data)
|
|
|
|
{
|
2017-02-22 00:47:35 +01:00
|
|
|
mark_object_for_connectivity(oid);
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-22 00:47:35 +01:00
|
|
|
static int mark_packed_for_connectivity(const struct object_id *oid,
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
struct packed_git *pack,
|
|
|
|
uint32_t pos,
|
|
|
|
void *data)
|
|
|
|
{
|
2017-02-22 00:47:35 +01:00
|
|
|
mark_object_for_connectivity(oid);
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-15 22:34:05 +02:00
|
|
|
static char const * const fsck_usage[] = {
|
2015-01-13 08:44:47 +01:00
|
|
|
N_("git fsck [<options>] [<object>...]"),
|
2007-10-15 22:34:05 +02:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct option fsck_opts[] = {
|
2012-08-20 14:32:13 +02:00
|
|
|
OPT__VERBOSE(&verbose, N_("be verbose")),
|
2013-08-03 13:51:19 +02:00
|
|
|
OPT_BOOL(0, "unreachable", &show_unreachable, N_("show unreachable objects")),
|
2012-08-20 14:32:13 +02:00
|
|
|
OPT_BOOL(0, "dangling", &show_dangling, N_("show dangling objects")),
|
2013-08-03 13:51:19 +02:00
|
|
|
OPT_BOOL(0, "tags", &show_tags, N_("report tags")),
|
|
|
|
OPT_BOOL(0, "root", &show_root, N_("report root nodes")),
|
|
|
|
OPT_BOOL(0, "cache", &keep_cache_objects, N_("make index objects head nodes")),
|
|
|
|
OPT_BOOL(0, "reflogs", &include_reflogs, N_("make reflogs head nodes (default)")),
|
|
|
|
OPT_BOOL(0, "full", &check_full, N_("also consider packs and alternate objects")),
|
2015-06-22 17:27:12 +02:00
|
|
|
OPT_BOOL(0, "connectivity-only", &connectivity_only, N_("check only connectivity")),
|
2013-08-03 13:51:19 +02:00
|
|
|
OPT_BOOL(0, "strict", &check_strict, N_("enable more strict checking")),
|
|
|
|
OPT_BOOL(0, "lost-found", &write_lost_and_found,
|
2012-08-20 14:32:13 +02:00
|
|
|
N_("write dangling objects in .git/lost-found")),
|
|
|
|
OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
|
2016-07-17 13:00:02 +02:00
|
|
|
OPT_BOOL(0, "name-objects", &name_objects, N_("show verbose names for reachable objects")),
|
2007-10-15 22:34:05 +02:00
|
|
|
OPT_END(),
|
|
|
|
};
|
2007-03-05 09:22:06 +01:00
|
|
|
|
2007-07-15 01:14:45 +02:00
|
|
|
int cmd_fsck(int argc, const char **argv, const char *prefix)
|
2005-04-09 00:02:42 +02:00
|
|
|
{
|
2017-07-26 03:34:56 +02:00
|
|
|
int i;
|
2018-11-12 15:48:47 +01:00
|
|
|
struct object_directory *odb;
|
2005-04-09 00:02:42 +02:00
|
|
|
|
2017-12-08 16:27:14 +01:00
|
|
|
/* fsck knows how to handle missing promisor objects */
|
|
|
|
fetch_if_missing = 0;
|
|
|
|
|
2007-03-05 09:22:06 +01:00
|
|
|
errors_found = 0;
|
2018-07-18 22:45:20 +02:00
|
|
|
read_replace_refs = 0;
|
2005-11-26 08:52:04 +01:00
|
|
|
|
2009-05-23 20:53:12 +02:00
|
|
|
argc = parse_options(argc, argv, prefix, fsck_opts, fsck_usage, 0);
|
2011-11-07 03:59:26 +01:00
|
|
|
|
2015-06-22 17:25:00 +02:00
|
|
|
fsck_walk_options.walk = mark_object;
|
|
|
|
fsck_obj_options.walk = mark_used;
|
|
|
|
fsck_obj_options.error_func = fsck_error_func;
|
|
|
|
if (check_strict)
|
|
|
|
fsck_obj_options.strict = 1;
|
|
|
|
|
2011-11-07 03:59:26 +01:00
|
|
|
if (show_progress == -1)
|
|
|
|
show_progress = isatty(2);
|
|
|
|
if (verbose)
|
|
|
|
show_progress = 0;
|
|
|
|
|
2007-10-15 22:34:05 +02:00
|
|
|
if (write_lost_and_found) {
|
|
|
|
check_full = 1;
|
|
|
|
include_reflogs = 0;
|
2005-04-26 01:31:13 +02:00
|
|
|
}
|
|
|
|
|
2016-07-17 13:00:02 +02:00
|
|
|
if (name_objects)
|
|
|
|
fsck_walk_options.object_names =
|
|
|
|
xcalloc(1, sizeof(struct decoration));
|
|
|
|
|
2015-06-22 17:27:06 +02:00
|
|
|
git_config(fsck_config, NULL);
|
|
|
|
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
if (connectivity_only) {
|
|
|
|
for_each_loose_object(mark_loose_for_connectivity, NULL, 0);
|
|
|
|
for_each_packed_object(mark_packed_for_connectivity, NULL, 0);
|
|
|
|
} else {
|
2018-03-23 18:21:07 +01:00
|
|
|
prepare_alt_odb(the_repository);
|
sha1-file: use an object_directory for the main object dir
Our handling of alternate object directories is needlessly different
from the main object directory. As a result, many places in the code
basically look like this:
do_something(r->objects->objdir);
for (odb = r->objects->alt_odb_list; odb; odb = odb->next)
do_something(odb->path);
That gets annoying when do_something() is non-trivial, and we've
resorted to gross hacks like creating fake alternates (see
find_short_object_filename()).
Instead, let's give each raw_object_store a unified list of
object_directory structs. The first will be the main store, and
everything after is an alternate. Very few callers even care about the
distinction, and can just loop over the whole list (and those who care
can just treat the first element differently).
A few observations:
- we don't need r->objects->objectdir anymore, and can just
mechanically convert that to r->objects->odb->path
- object_directory's path field needs to become a real pointer rather
than a FLEX_ARRAY, in order to fill it with expand_base_dir()
- we'll call prepare_alt_odb() earlier in many functions (i.e.,
outside of the loop). This may result in us calling it even when our
function would be satisfied looking only at the main odb.
But this doesn't matter in practice. It's not a very expensive
operation in the first place, and in the majority of cases it will
be a noop. We call it already (and cache its results) in
prepare_packed_git(), and we'll generally check packs before loose
objects. So essentially every program is going to call it
immediately once per program.
Arguably we should just prepare_alt_odb() immediately upon setting
up the repository's object directory, which would save us sprinkling
calls throughout the code base (and forgetting to do so has been a
source of subtle bugs in the past). But I've stopped short of that
here, since there are already a lot of other moving parts in this
patch.
- Most call sites just get shorter. The check_and_freshen() functions
are an exception, because they have entry points to handle local and
nonlocal directories separately.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 15:50:39 +01:00
|
|
|
for (odb = the_repository->objects->odb; odb; odb = odb->next)
|
2018-11-12 15:48:47 +01:00
|
|
|
fsck_object_dir(odb->path);
|
2009-01-30 09:50:54 +01:00
|
|
|
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
if (check_full) {
|
|
|
|
struct packed_git *p;
|
|
|
|
uint32_t total = 0, count = 0;
|
|
|
|
struct progress *progress = NULL;
|
2009-01-30 09:50:54 +01:00
|
|
|
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
if (show_progress) {
|
2018-08-20 18:52:04 +02:00
|
|
|
for (p = get_all_packs(the_repository); p;
|
2018-03-23 18:20:59 +01:00
|
|
|
p = p->next) {
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
if (open_pack_index(p))
|
|
|
|
continue;
|
|
|
|
total += p->num_objects;
|
|
|
|
}
|
2011-11-07 03:59:26 +01:00
|
|
|
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
progress = start_progress(_("Checking objects"), total);
|
|
|
|
}
|
2018-08-20 18:52:04 +02:00
|
|
|
for (p = get_all_packs(the_repository); p;
|
2018-03-23 18:20:59 +01:00
|
|
|
p = p->next) {
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
/* verify gives error messages itself */
|
2018-11-10 06:49:07 +01:00
|
|
|
if (verify_pack(the_repository,
|
|
|
|
p, fsck_obj_buffer,
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
progress, count))
|
|
|
|
errors_found |= ERROR_PACK;
|
|
|
|
count += p->num_objects;
|
2011-11-07 03:59:26 +01:00
|
|
|
}
|
fsck: prepare dummy objects for --connectivity-check
Normally fsck makes a pass over all objects to check their
integrity, and then follows up with a reachability check to
make sure we have all of the referenced objects (and to know
which ones are dangling). The latter checks for the HAS_OBJ
flag in obj->flags to see if we found the object in the
first pass.
Commit 02976bf85 (fsck: introduce `git fsck --connectivity-only`,
2015-06-22) taught fsck to skip the initial pass, and to
fallback to has_sha1_file() instead of the HAS_OBJ check.
However, it converted only one HAS_OBJ check to use
has_sha1_file(). But there are many other places in
builtin/fsck.c that assume that the flag is set (or that
lookup_object() will return an object at all). This leads to
several bugs with --connectivity-only:
1. mark_object() will not queue objects for examination,
so recursively following links from commits to trees,
etc, did nothing. I.e., we were checking the
reachability of hardly anything at all.
2. When a set of heads is given on the command-line, we
use lookup_object() to see if they exist. But without
the initial pass, we assume nothing exists.
3. When loading reflog entries, we do a similar
lookup_object() check, and complain that the reflog is
broken if the object doesn't exist in our hash.
So in short, --connectivity-only is broken pretty badly, and
will claim that your repository is fine when it's not.
Presumably nobody noticed for a few reasons.
One is that the embedded test does not actually test the
recursive nature of the reachability check. All of the
missing objects are still in the index, and we directly
check items from the index. This patch modifies the test to
delete the index, which shows off breakage (1).
Another is that --connectivity-only just skips the initial
pass for loose objects. So on a real repository, the packed
objects were still checked correctly. But on the flipside,
it means that "git fsck --connectivity-only" still checks
the sha1 of all of the packed objects, nullifying its
original purpose of being a faster git-fsck.
And of course the final problem is that the bug only shows
up when there _is_ corruption, which is rare. So anybody
running "git fsck --connectivity-only" proactively would
assume it was being thorough, when it was not.
One possibility for fixing this is to find all of the spots
that rely on HAS_OBJ and tweak them for the connectivity-only
case. But besides the risk that we might miss a spot (and I
found three already, corresponding to the three bugs above),
there are other parts of fsck that _can't_ work without a
full list of objects. E.g., the list of dangling objects.
Instead, let's make the connectivity-only case look more
like the normal case. Rather than skip the initial pass
completely, we'll do an abbreviated one that sets up the
HAS_OBJ flag for each object, without actually loading the
object data.
That's simple and fast, and we don't have to care about the
connectivity_only flag in the rest of the code at all.
While we're at it, let's make sure we treat loose and packed
objects the same (i.e., setting up dummy objects for both
and skipping the actual sha1 check). That makes the
connectivity-only check actually fast on a real repo (40
seconds versus 180 seconds on my copy of linux.git).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-17 22:32:57 +01:00
|
|
|
stop_progress(&progress);
|
2011-11-07 03:59:26 +01:00
|
|
|
}
|
2018-05-02 23:20:35 +02:00
|
|
|
|
|
|
|
if (fsck_finish(&fsck_obj_options))
|
|
|
|
errors_found |= ERROR_OBJECT;
|
2005-04-14 01:42:09 +02:00
|
|
|
}
|
|
|
|
|
2009-01-18 04:46:09 +01:00
|
|
|
for (i = 0; i < argc; i++) {
|
2007-06-07 09:04:01 +02:00
|
|
|
const char *arg = argv[i];
|
2017-07-14 01:49:18 +02:00
|
|
|
struct object_id oid;
|
|
|
|
if (!get_oid(arg, &oid)) {
|
2018-06-29 03:21:52 +02:00
|
|
|
struct object *obj = lookup_object(the_repository,
|
2019-06-20 09:41:14 +02:00
|
|
|
&oid);
|
2005-04-30 05:00:40 +02:00
|
|
|
|
2017-01-16 22:34:57 +01:00
|
|
|
if (!obj || !(obj->flags & HAS_OBJ)) {
|
2017-12-05 17:58:47 +01:00
|
|
|
if (is_promisor_object(&oid))
|
|
|
|
continue;
|
2018-11-10 06:16:15 +01:00
|
|
|
error(_("%s: object missing"), oid_to_hex(&oid));
|
2017-01-16 22:33:29 +01:00
|
|
|
errors_found |= ERROR_OBJECT;
|
2005-04-30 05:00:40 +02:00
|
|
|
continue;
|
2017-01-16 22:33:29 +01:00
|
|
|
}
|
2005-04-30 05:00:40 +02:00
|
|
|
|
2017-07-20 02:21:44 +02:00
|
|
|
obj->flags |= USED;
|
2016-07-17 13:00:02 +02:00
|
|
|
if (name_objects)
|
|
|
|
add_decoration(fsck_walk_options.object_names,
|
|
|
|
obj, xstrdup(arg));
|
2008-02-25 22:46:05 +01:00
|
|
|
mark_object_reachable(obj);
|
2005-04-13 18:57:30 +02:00
|
|
|
continue;
|
|
|
|
}
|
2018-11-10 06:16:15 +01:00
|
|
|
error(_("invalid parameter: expected sha1, got '%s'"), arg);
|
2017-01-16 22:33:29 +01:00
|
|
|
errors_found |= ERROR_OBJECT;
|
2005-04-13 18:57:30 +02:00
|
|
|
}
|
|
|
|
|
2005-05-18 19:16:14 +02:00
|
|
|
/*
|
2005-05-20 22:59:17 +02:00
|
|
|
* If we've not been given any explicit head information, do the
|
2005-05-18 19:19:59 +02:00
|
|
|
* default ones from .git/refs. We also consider the index file
|
|
|
|
* in this case (ie this implies --cache).
|
2005-05-18 19:16:14 +02:00
|
|
|
*/
|
2017-01-16 22:34:21 +01:00
|
|
|
if (!argc) {
|
2005-05-18 19:16:14 +02:00
|
|
|
get_default_heads();
|
|
|
|
keep_cache_objects = 1;
|
|
|
|
}
|
|
|
|
|
2005-05-04 10:33:33 +02:00
|
|
|
if (keep_cache_objects) {
|
2017-04-14 22:32:21 +02:00
|
|
|
verify_index_checksum = 1;
|
2017-10-18 16:27:25 +02:00
|
|
|
verify_ce_order = 1;
|
2005-05-04 10:33:33 +02:00
|
|
|
read_cache();
|
|
|
|
for (i = 0; i < active_nr; i++) {
|
2007-04-10 06:15:29 +02:00
|
|
|
unsigned int mode;
|
|
|
|
struct blob *blob;
|
2005-05-04 10:33:33 +02:00
|
|
|
struct object *obj;
|
2007-04-10 06:15:29 +02:00
|
|
|
|
2008-01-15 01:03:17 +01:00
|
|
|
mode = active_cache[i]->ce_mode;
|
2007-05-21 22:08:28 +02:00
|
|
|
if (S_ISGITLINK(mode))
|
2007-04-10 06:15:29 +02:00
|
|
|
continue;
|
2018-06-29 03:21:55 +02:00
|
|
|
blob = lookup_blob(the_repository,
|
|
|
|
&active_cache[i]->oid);
|
2005-05-04 10:33:33 +02:00
|
|
|
if (!blob)
|
|
|
|
continue;
|
|
|
|
obj = &blob->object;
|
2017-07-20 02:21:44 +02:00
|
|
|
obj->flags |= USED;
|
2016-07-17 13:00:02 +02:00
|
|
|
if (name_objects)
|
|
|
|
add_decoration(fsck_walk_options.object_names,
|
|
|
|
obj,
|
|
|
|
xstrfmt(":%s", active_cache[i]->name));
|
2008-02-25 22:46:05 +01:00
|
|
|
mark_object_reachable(obj);
|
2005-05-04 10:33:33 +02:00
|
|
|
}
|
2006-04-26 01:37:08 +02:00
|
|
|
if (active_cache_tree)
|
|
|
|
fsck_cache_tree(active_cache_tree);
|
2005-05-04 10:33:33 +02:00
|
|
|
}
|
|
|
|
|
2005-04-11 08:13:09 +02:00
|
|
|
check_connectivity();
|
2018-06-27 15:24:43 +02:00
|
|
|
|
2018-07-12 00:42:42 +02:00
|
|
|
if (!git_config_get_bool("core.commitgraph", &i) && i) {
|
2018-06-27 15:24:43 +02:00
|
|
|
struct child_process commit_graph_verify = CHILD_PROCESS_INIT;
|
|
|
|
const char *verify_argv[] = { "commit-graph", "verify", NULL, NULL, NULL };
|
|
|
|
|
|
|
|
prepare_alt_odb(the_repository);
|
sha1-file: use an object_directory for the main object dir
Our handling of alternate object directories is needlessly different
from the main object directory. As a result, many places in the code
basically look like this:
do_something(r->objects->objdir);
for (odb = r->objects->alt_odb_list; odb; odb = odb->next)
do_something(odb->path);
That gets annoying when do_something() is non-trivial, and we've
resorted to gross hacks like creating fake alternates (see
find_short_object_filename()).
Instead, let's give each raw_object_store a unified list of
object_directory structs. The first will be the main store, and
everything after is an alternate. Very few callers even care about the
distinction, and can just loop over the whole list (and those who care
can just treat the first element differently).
A few observations:
- we don't need r->objects->objectdir anymore, and can just
mechanically convert that to r->objects->odb->path
- object_directory's path field needs to become a real pointer rather
than a FLEX_ARRAY, in order to fill it with expand_base_dir()
- we'll call prepare_alt_odb() earlier in many functions (i.e.,
outside of the loop). This may result in us calling it even when our
function would be satisfied looking only at the main odb.
But this doesn't matter in practice. It's not a very expensive
operation in the first place, and in the majority of cases it will
be a noop. We call it already (and cache its results) in
prepare_packed_git(), and we'll generally check packs before loose
objects. So essentially every program is going to call it
immediately once per program.
Arguably we should just prepare_alt_odb() immediately upon setting
up the repository's object directory, which would save us sprinkling
calls throughout the code base (and forgetting to do so has been a
source of subtle bugs in the past). But I've stopped short of that
here, since there are already a lot of other moving parts in this
patch.
- Most call sites just get shorter. The check_and_freshen() functions
are an exception, because they have entry points to handle local and
nonlocal directories separately.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 15:50:39 +01:00
|
|
|
for (odb = the_repository->objects->odb; odb; odb = odb->next) {
|
2018-11-12 15:46:54 +01:00
|
|
|
child_process_init(&commit_graph_verify);
|
|
|
|
commit_graph_verify.argv = verify_argv;
|
|
|
|
commit_graph_verify.git_cmd = 1;
|
2018-06-27 15:24:43 +02:00
|
|
|
verify_argv[2] = "--object-dir";
|
2018-11-12 15:48:47 +01:00
|
|
|
verify_argv[3] = odb->path;
|
2018-06-27 15:24:43 +02:00
|
|
|
if (run_command(&commit_graph_verify))
|
|
|
|
errors_found |= ERROR_COMMIT_GRAPH;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-13 20:02:27 +02:00
|
|
|
if (!git_config_get_bool("core.multipackindex", &i) && i) {
|
|
|
|
struct child_process midx_verify = CHILD_PROCESS_INIT;
|
|
|
|
const char *midx_argv[] = { "multi-pack-index", "verify", NULL, NULL, NULL };
|
|
|
|
|
|
|
|
prepare_alt_odb(the_repository);
|
sha1-file: use an object_directory for the main object dir
Our handling of alternate object directories is needlessly different
from the main object directory. As a result, many places in the code
basically look like this:
do_something(r->objects->objdir);
for (odb = r->objects->alt_odb_list; odb; odb = odb->next)
do_something(odb->path);
That gets annoying when do_something() is non-trivial, and we've
resorted to gross hacks like creating fake alternates (see
find_short_object_filename()).
Instead, let's give each raw_object_store a unified list of
object_directory structs. The first will be the main store, and
everything after is an alternate. Very few callers even care about the
distinction, and can just loop over the whole list (and those who care
can just treat the first element differently).
A few observations:
- we don't need r->objects->objectdir anymore, and can just
mechanically convert that to r->objects->odb->path
- object_directory's path field needs to become a real pointer rather
than a FLEX_ARRAY, in order to fill it with expand_base_dir()
- we'll call prepare_alt_odb() earlier in many functions (i.e.,
outside of the loop). This may result in us calling it even when our
function would be satisfied looking only at the main odb.
But this doesn't matter in practice. It's not a very expensive
operation in the first place, and in the majority of cases it will
be a noop. We call it already (and cache its results) in
prepare_packed_git(), and we'll generally check packs before loose
objects. So essentially every program is going to call it
immediately once per program.
Arguably we should just prepare_alt_odb() immediately upon setting
up the repository's object directory, which would save us sprinkling
calls throughout the code base (and forgetting to do so has been a
source of subtle bugs in the past). But I've stopped short of that
here, since there are already a lot of other moving parts in this
patch.
- Most call sites just get shorter. The check_and_freshen() functions
are an exception, because they have entry points to handle local and
nonlocal directories separately.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-11-12 15:50:39 +01:00
|
|
|
for (odb = the_repository->objects->odb; odb; odb = odb->next) {
|
2018-11-12 15:46:54 +01:00
|
|
|
child_process_init(&midx_verify);
|
|
|
|
midx_verify.argv = midx_argv;
|
|
|
|
midx_verify.git_cmd = 1;
|
2018-09-13 20:02:27 +02:00
|
|
|
midx_argv[2] = "--object-dir";
|
2018-11-12 15:48:47 +01:00
|
|
|
midx_argv[3] = odb->path;
|
2018-09-13 20:02:27 +02:00
|
|
|
if (run_command(&midx_verify))
|
|
|
|
errors_found |= ERROR_COMMIT_GRAPH;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-05 09:22:06 +01:00
|
|
|
return errors_found;
|
2005-04-09 00:02:42 +02:00
|
|
|
}
|