bf3d70fe93
Don't redundantly run "git reflog expire --all" when gc.reflogExpire
and gc.reflogExpireUnreachable are set to "never", and die immediately
if those configuration valuer are bad.
As an earlier "assert lack of early exit" change to the tests for "git
reflog expire" shows, an early check of gc.reflogExpire{Unreachable,}
isn't wanted in general for "git reflog expire", but it makes sense
for "gc" because:
1) Similarly to 8ab5aa4bd8
("parseopt: handle malformed --expire
arguments more nicely", 2018-04-21) we'll now die early if the
config variables are set to invalid values.
We run "pack-refs" before "reflog expire", which can take a while,
only to then die on an invalid gc.reflogExpire{Unreachable,}
configuration.
2) Not invoking the command at all means it won't show up in trace
output, which makes what's going on more obvious when the two are
set to "never".
3) As a later change documents we lock the refs when looping over the
refs to expire, even in cases where we end up doing nothing due to
this config.
For the reasons noted in the earlier "assert lack of early exit"
change I don't think it's worth it to bend over backwards in "git
reflog expire" itself to carefully detect if we'll really do
nothing given the combination of all its possible options and skip
that locking, but that's easy to detect here in "gc" where we'll
only run "reflog expire" in a relatively simple mode.
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
701 lines
18 KiB
C
701 lines
18 KiB
C
/*
|
|
* git gc builtin command
|
|
*
|
|
* Cleanup unreachable files and optimize the repository.
|
|
*
|
|
* Copyright (c) 2007 James Bowes
|
|
*
|
|
* Based on git-gc.sh, which is
|
|
*
|
|
* Copyright (c) 2006 Shawn O. Pearce
|
|
*/
|
|
|
|
#include "builtin.h"
|
|
#include "repository.h"
|
|
#include "config.h"
|
|
#include "tempfile.h"
|
|
#include "lockfile.h"
|
|
#include "parse-options.h"
|
|
#include "run-command.h"
|
|
#include "sigchain.h"
|
|
#include "argv-array.h"
|
|
#include "commit.h"
|
|
#include "commit-graph.h"
|
|
#include "packfile.h"
|
|
#include "object-store.h"
|
|
#include "pack.h"
|
|
#include "pack-objects.h"
|
|
#include "blob.h"
|
|
#include "tree.h"
|
|
|
|
#define FAILED_RUN "failed to run %s"
|
|
|
|
static const char * const builtin_gc_usage[] = {
|
|
N_("git gc [<options>]"),
|
|
NULL
|
|
};
|
|
|
|
static int pack_refs = 1;
|
|
static int prune_reflogs = 1;
|
|
static int aggressive_depth = 50;
|
|
static int aggressive_window = 250;
|
|
static int gc_auto_threshold = 6700;
|
|
static int gc_auto_pack_limit = 50;
|
|
static int gc_write_commit_graph;
|
|
static int detach_auto = 1;
|
|
static timestamp_t gc_log_expire_time;
|
|
static const char *gc_log_expire = "1.day.ago";
|
|
static const char *prune_expire = "2.weeks.ago";
|
|
static const char *prune_worktrees_expire = "3.months.ago";
|
|
static unsigned long big_pack_threshold;
|
|
static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
|
|
|
|
static struct argv_array pack_refs_cmd = ARGV_ARRAY_INIT;
|
|
static struct argv_array reflog = ARGV_ARRAY_INIT;
|
|
static struct argv_array repack = ARGV_ARRAY_INIT;
|
|
static struct argv_array prune = ARGV_ARRAY_INIT;
|
|
static struct argv_array prune_worktrees = ARGV_ARRAY_INIT;
|
|
static struct argv_array rerere = ARGV_ARRAY_INIT;
|
|
|
|
static struct tempfile *pidfile;
|
|
static struct lock_file log_lock;
|
|
|
|
static struct string_list pack_garbage = STRING_LIST_INIT_DUP;
|
|
|
|
static void clean_pack_garbage(void)
|
|
{
|
|
int i;
|
|
for (i = 0; i < pack_garbage.nr; i++)
|
|
unlink_or_warn(pack_garbage.items[i].string);
|
|
string_list_clear(&pack_garbage, 0);
|
|
}
|
|
|
|
static void report_pack_garbage(unsigned seen_bits, const char *path)
|
|
{
|
|
if (seen_bits == PACKDIR_FILE_IDX)
|
|
string_list_append(&pack_garbage, path);
|
|
}
|
|
|
|
static void process_log_file(void)
|
|
{
|
|
struct stat st;
|
|
if (fstat(get_lock_file_fd(&log_lock), &st)) {
|
|
/*
|
|
* Perhaps there was an i/o error or another
|
|
* unlikely situation. Try to make a note of
|
|
* this in gc.log along with any existing
|
|
* messages.
|
|
*/
|
|
int saved_errno = errno;
|
|
fprintf(stderr, _("Failed to fstat %s: %s"),
|
|
get_tempfile_path(log_lock.tempfile),
|
|
strerror(saved_errno));
|
|
fflush(stderr);
|
|
commit_lock_file(&log_lock);
|
|
errno = saved_errno;
|
|
} else if (st.st_size) {
|
|
/* There was some error recorded in the lock file */
|
|
commit_lock_file(&log_lock);
|
|
} else {
|
|
/* No error, clean up any old gc.log */
|
|
unlink(git_path("gc.log"));
|
|
rollback_lock_file(&log_lock);
|
|
}
|
|
}
|
|
|
|
static void process_log_file_at_exit(void)
|
|
{
|
|
fflush(stderr);
|
|
process_log_file();
|
|
}
|
|
|
|
static void process_log_file_on_signal(int signo)
|
|
{
|
|
process_log_file();
|
|
sigchain_pop(signo);
|
|
raise(signo);
|
|
}
|
|
|
|
static int gc_config_is_timestamp_never(const char *var)
|
|
{
|
|
const char *value;
|
|
timestamp_t expire;
|
|
|
|
if (!git_config_get_value(var, &value) && value) {
|
|
if (parse_expiry_date(value, &expire))
|
|
die(_("failed to parse '%s' value '%s'"), var, value);
|
|
return expire == 0;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static void gc_config(void)
|
|
{
|
|
const char *value;
|
|
|
|
if (!git_config_get_value("gc.packrefs", &value)) {
|
|
if (value && !strcmp(value, "notbare"))
|
|
pack_refs = -1;
|
|
else
|
|
pack_refs = git_config_bool("gc.packrefs", value);
|
|
}
|
|
|
|
if (gc_config_is_timestamp_never("gc.reflogexpire") &&
|
|
gc_config_is_timestamp_never("gc.reflogexpireunreachable"))
|
|
prune_reflogs = 0;
|
|
|
|
git_config_get_int("gc.aggressivewindow", &aggressive_window);
|
|
git_config_get_int("gc.aggressivedepth", &aggressive_depth);
|
|
git_config_get_int("gc.auto", &gc_auto_threshold);
|
|
git_config_get_int("gc.autopacklimit", &gc_auto_pack_limit);
|
|
git_config_get_bool("gc.writecommitgraph", &gc_write_commit_graph);
|
|
git_config_get_bool("gc.autodetach", &detach_auto);
|
|
git_config_get_expiry("gc.pruneexpire", &prune_expire);
|
|
git_config_get_expiry("gc.worktreepruneexpire", &prune_worktrees_expire);
|
|
git_config_get_expiry("gc.logexpiry", &gc_log_expire);
|
|
|
|
git_config_get_ulong("gc.bigpackthreshold", &big_pack_threshold);
|
|
git_config_get_ulong("pack.deltacachesize", &max_delta_cache_size);
|
|
|
|
git_config(git_default_config, NULL);
|
|
}
|
|
|
|
static int too_many_loose_objects(void)
|
|
{
|
|
/*
|
|
* Quickly check if a "gc" is needed, by estimating how
|
|
* many loose objects there are. Because SHA-1 is evenly
|
|
* distributed, we can check only one and get a reasonable
|
|
* estimate.
|
|
*/
|
|
DIR *dir;
|
|
struct dirent *ent;
|
|
int auto_threshold;
|
|
int num_loose = 0;
|
|
int needed = 0;
|
|
const unsigned hexsz_loose = the_hash_algo->hexsz - 2;
|
|
|
|
dir = opendir(git_path("objects/17"));
|
|
if (!dir)
|
|
return 0;
|
|
|
|
auto_threshold = DIV_ROUND_UP(gc_auto_threshold, 256);
|
|
while ((ent = readdir(dir)) != NULL) {
|
|
if (strspn(ent->d_name, "0123456789abcdef") != hexsz_loose ||
|
|
ent->d_name[hexsz_loose] != '\0')
|
|
continue;
|
|
if (++num_loose > auto_threshold) {
|
|
needed = 1;
|
|
break;
|
|
}
|
|
}
|
|
closedir(dir);
|
|
return needed;
|
|
}
|
|
|
|
static struct packed_git *find_base_packs(struct string_list *packs,
|
|
unsigned long limit)
|
|
{
|
|
struct packed_git *p, *base = NULL;
|
|
|
|
for (p = get_all_packs(the_repository); p; p = p->next) {
|
|
if (!p->pack_local)
|
|
continue;
|
|
if (limit) {
|
|
if (p->pack_size >= limit)
|
|
string_list_append(packs, p->pack_name);
|
|
} else if (!base || base->pack_size < p->pack_size) {
|
|
base = p;
|
|
}
|
|
}
|
|
|
|
if (base)
|
|
string_list_append(packs, base->pack_name);
|
|
|
|
return base;
|
|
}
|
|
|
|
static int too_many_packs(void)
|
|
{
|
|
struct packed_git *p;
|
|
int cnt;
|
|
|
|
if (gc_auto_pack_limit <= 0)
|
|
return 0;
|
|
|
|
for (cnt = 0, p = get_all_packs(the_repository); p; p = p->next) {
|
|
if (!p->pack_local)
|
|
continue;
|
|
if (p->pack_keep)
|
|
continue;
|
|
/*
|
|
* Perhaps check the size of the pack and count only
|
|
* very small ones here?
|
|
*/
|
|
cnt++;
|
|
}
|
|
return gc_auto_pack_limit < cnt;
|
|
}
|
|
|
|
static uint64_t total_ram(void)
|
|
{
|
|
#if defined(HAVE_SYSINFO)
|
|
struct sysinfo si;
|
|
|
|
if (!sysinfo(&si))
|
|
return si.totalram;
|
|
#elif defined(HAVE_BSD_SYSCTL) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM))
|
|
int64_t physical_memory;
|
|
int mib[2];
|
|
size_t length;
|
|
|
|
mib[0] = CTL_HW;
|
|
# if defined(HW_MEMSIZE)
|
|
mib[1] = HW_MEMSIZE;
|
|
# else
|
|
mib[1] = HW_PHYSMEM;
|
|
# endif
|
|
length = sizeof(int64_t);
|
|
if (!sysctl(mib, 2, &physical_memory, &length, NULL, 0))
|
|
return physical_memory;
|
|
#elif defined(GIT_WINDOWS_NATIVE)
|
|
MEMORYSTATUSEX memInfo;
|
|
|
|
memInfo.dwLength = sizeof(MEMORYSTATUSEX);
|
|
if (GlobalMemoryStatusEx(&memInfo))
|
|
return memInfo.ullTotalPhys;
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
static uint64_t estimate_repack_memory(struct packed_git *pack)
|
|
{
|
|
unsigned long nr_objects = approximate_object_count();
|
|
size_t os_cache, heap;
|
|
|
|
if (!pack || !nr_objects)
|
|
return 0;
|
|
|
|
/*
|
|
* First we have to scan through at least one pack.
|
|
* Assume enough room in OS file cache to keep the entire pack
|
|
* or we may accidentally evict data of other processes from
|
|
* the cache.
|
|
*/
|
|
os_cache = pack->pack_size + pack->index_size;
|
|
/* then pack-objects needs lots more for book keeping */
|
|
heap = sizeof(struct object_entry) * nr_objects;
|
|
/*
|
|
* internal rev-list --all --objects takes up some memory too,
|
|
* let's say half of it is for blobs
|
|
*/
|
|
heap += sizeof(struct blob) * nr_objects / 2;
|
|
/*
|
|
* and the other half is for trees (commits and tags are
|
|
* usually insignificant)
|
|
*/
|
|
heap += sizeof(struct tree) * nr_objects / 2;
|
|
/* and then obj_hash[], underestimated in fact */
|
|
heap += sizeof(struct object *) * nr_objects;
|
|
/* revindex is used also */
|
|
heap += sizeof(struct revindex_entry) * nr_objects;
|
|
/*
|
|
* read_sha1_file() (either at delta calculation phase, or
|
|
* writing phase) also fills up the delta base cache
|
|
*/
|
|
heap += delta_base_cache_limit;
|
|
/* and of course pack-objects has its own delta cache */
|
|
heap += max_delta_cache_size;
|
|
|
|
return os_cache + heap;
|
|
}
|
|
|
|
static int keep_one_pack(struct string_list_item *item, void *data)
|
|
{
|
|
argv_array_pushf(&repack, "--keep-pack=%s", basename(item->string));
|
|
return 0;
|
|
}
|
|
|
|
static void add_repack_all_option(struct string_list *keep_pack)
|
|
{
|
|
if (prune_expire && !strcmp(prune_expire, "now"))
|
|
argv_array_push(&repack, "-a");
|
|
else {
|
|
argv_array_push(&repack, "-A");
|
|
if (prune_expire)
|
|
argv_array_pushf(&repack, "--unpack-unreachable=%s", prune_expire);
|
|
}
|
|
|
|
if (keep_pack)
|
|
for_each_string_list(keep_pack, keep_one_pack, NULL);
|
|
}
|
|
|
|
static void add_repack_incremental_option(void)
|
|
{
|
|
argv_array_push(&repack, "--no-write-bitmap-index");
|
|
}
|
|
|
|
static int need_to_gc(void)
|
|
{
|
|
/*
|
|
* Setting gc.auto to 0 or negative can disable the
|
|
* automatic gc.
|
|
*/
|
|
if (gc_auto_threshold <= 0)
|
|
return 0;
|
|
|
|
/*
|
|
* If there are too many loose objects, but not too many
|
|
* packs, we run "repack -d -l". If there are too many packs,
|
|
* we run "repack -A -d -l". Otherwise we tell the caller
|
|
* there is no need.
|
|
*/
|
|
if (too_many_packs()) {
|
|
struct string_list keep_pack = STRING_LIST_INIT_NODUP;
|
|
|
|
if (big_pack_threshold) {
|
|
find_base_packs(&keep_pack, big_pack_threshold);
|
|
if (keep_pack.nr >= gc_auto_pack_limit) {
|
|
big_pack_threshold = 0;
|
|
string_list_clear(&keep_pack, 0);
|
|
find_base_packs(&keep_pack, 0);
|
|
}
|
|
} else {
|
|
struct packed_git *p = find_base_packs(&keep_pack, 0);
|
|
uint64_t mem_have, mem_want;
|
|
|
|
mem_have = total_ram();
|
|
mem_want = estimate_repack_memory(p);
|
|
|
|
/*
|
|
* Only allow 1/2 of memory for pack-objects, leave
|
|
* the rest for the OS and other processes in the
|
|
* system.
|
|
*/
|
|
if (!mem_have || mem_want < mem_have / 2)
|
|
string_list_clear(&keep_pack, 0);
|
|
}
|
|
|
|
add_repack_all_option(&keep_pack);
|
|
string_list_clear(&keep_pack, 0);
|
|
} else if (too_many_loose_objects())
|
|
add_repack_incremental_option();
|
|
else
|
|
return 0;
|
|
|
|
if (run_hook_le(NULL, "pre-auto-gc", NULL))
|
|
return 0;
|
|
return 1;
|
|
}
|
|
|
|
/* return NULL on success, else hostname running the gc */
|
|
static const char *lock_repo_for_gc(int force, pid_t* ret_pid)
|
|
{
|
|
struct lock_file lock = LOCK_INIT;
|
|
char my_host[HOST_NAME_MAX + 1];
|
|
struct strbuf sb = STRBUF_INIT;
|
|
struct stat st;
|
|
uintmax_t pid;
|
|
FILE *fp;
|
|
int fd;
|
|
char *pidfile_path;
|
|
|
|
if (is_tempfile_active(pidfile))
|
|
/* already locked */
|
|
return NULL;
|
|
|
|
if (xgethostname(my_host, sizeof(my_host)))
|
|
xsnprintf(my_host, sizeof(my_host), "unknown");
|
|
|
|
pidfile_path = git_pathdup("gc.pid");
|
|
fd = hold_lock_file_for_update(&lock, pidfile_path,
|
|
LOCK_DIE_ON_ERROR);
|
|
if (!force) {
|
|
static char locking_host[HOST_NAME_MAX + 1];
|
|
static char *scan_fmt;
|
|
int should_exit;
|
|
|
|
if (!scan_fmt)
|
|
scan_fmt = xstrfmt("%s %%%ds", "%"SCNuMAX, HOST_NAME_MAX);
|
|
fp = fopen(pidfile_path, "r");
|
|
memset(locking_host, 0, sizeof(locking_host));
|
|
should_exit =
|
|
fp != NULL &&
|
|
!fstat(fileno(fp), &st) &&
|
|
/*
|
|
* 12 hour limit is very generous as gc should
|
|
* never take that long. On the other hand we
|
|
* don't really need a strict limit here,
|
|
* running gc --auto one day late is not a big
|
|
* problem. --force can be used in manual gc
|
|
* after the user verifies that no gc is
|
|
* running.
|
|
*/
|
|
time(NULL) - st.st_mtime <= 12 * 3600 &&
|
|
fscanf(fp, scan_fmt, &pid, locking_host) == 2 &&
|
|
/* be gentle to concurrent "gc" on remote hosts */
|
|
(strcmp(locking_host, my_host) || !kill(pid, 0) || errno == EPERM);
|
|
if (fp != NULL)
|
|
fclose(fp);
|
|
if (should_exit) {
|
|
if (fd >= 0)
|
|
rollback_lock_file(&lock);
|
|
*ret_pid = pid;
|
|
free(pidfile_path);
|
|
return locking_host;
|
|
}
|
|
}
|
|
|
|
strbuf_addf(&sb, "%"PRIuMAX" %s",
|
|
(uintmax_t) getpid(), my_host);
|
|
write_in_full(fd, sb.buf, sb.len);
|
|
strbuf_release(&sb);
|
|
commit_lock_file(&lock);
|
|
pidfile = register_tempfile(pidfile_path);
|
|
free(pidfile_path);
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Returns 0 if there was no previous error and gc can proceed, 1 if
|
|
* gc should not proceed due to an error in the last run. Prints a
|
|
* message and returns -1 if an error occured while reading gc.log
|
|
*/
|
|
static int report_last_gc_error(void)
|
|
{
|
|
struct strbuf sb = STRBUF_INIT;
|
|
int ret = 0;
|
|
ssize_t len;
|
|
struct stat st;
|
|
char *gc_log_path = git_pathdup("gc.log");
|
|
|
|
if (stat(gc_log_path, &st)) {
|
|
if (errno == ENOENT)
|
|
goto done;
|
|
|
|
ret = error_errno(_("cannot stat '%s'"), gc_log_path);
|
|
goto done;
|
|
}
|
|
|
|
if (st.st_mtime < gc_log_expire_time)
|
|
goto done;
|
|
|
|
len = strbuf_read_file(&sb, gc_log_path, 0);
|
|
if (len < 0)
|
|
ret = error_errno(_("cannot read '%s'"), gc_log_path);
|
|
else if (len > 0) {
|
|
/*
|
|
* A previous gc failed. Report the error, and don't
|
|
* bother with an automatic gc run since it is likely
|
|
* to fail in the same way.
|
|
*/
|
|
warning(_("The last gc run reported the following. "
|
|
"Please correct the root cause\n"
|
|
"and remove %s.\n"
|
|
"Automatic cleanup will not be performed "
|
|
"until the file is removed.\n\n"
|
|
"%s"),
|
|
gc_log_path, sb.buf);
|
|
ret = 1;
|
|
}
|
|
strbuf_release(&sb);
|
|
done:
|
|
free(gc_log_path);
|
|
return ret;
|
|
}
|
|
|
|
static void gc_before_repack(void)
|
|
{
|
|
/*
|
|
* We may be called twice, as both the pre- and
|
|
* post-daemonized phases will call us, but running these
|
|
* commands more than once is pointless and wasteful.
|
|
*/
|
|
static int done = 0;
|
|
if (done++)
|
|
return;
|
|
|
|
if (pack_refs && run_command_v_opt(pack_refs_cmd.argv, RUN_GIT_CMD))
|
|
die(FAILED_RUN, pack_refs_cmd.argv[0]);
|
|
|
|
if (prune_reflogs && run_command_v_opt(reflog.argv, RUN_GIT_CMD))
|
|
die(FAILED_RUN, reflog.argv[0]);
|
|
}
|
|
|
|
int cmd_gc(int argc, const char **argv, const char *prefix)
|
|
{
|
|
int aggressive = 0;
|
|
int auto_gc = 0;
|
|
int quiet = 0;
|
|
int force = 0;
|
|
const char *name;
|
|
pid_t pid;
|
|
int daemonized = 0;
|
|
int keep_base_pack = -1;
|
|
timestamp_t dummy;
|
|
|
|
struct option builtin_gc_options[] = {
|
|
OPT__QUIET(&quiet, N_("suppress progress reporting")),
|
|
{ OPTION_STRING, 0, "prune", &prune_expire, N_("date"),
|
|
N_("prune unreferenced objects"),
|
|
PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire },
|
|
OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
|
|
OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
|
|
PARSE_OPT_NOCOMPLETE),
|
|
OPT_BOOL_F(0, "force", &force,
|
|
N_("force running gc even if there may be another gc running"),
|
|
PARSE_OPT_NOCOMPLETE),
|
|
OPT_BOOL(0, "keep-largest-pack", &keep_base_pack,
|
|
N_("repack all other packs except the largest pack")),
|
|
OPT_END()
|
|
};
|
|
|
|
if (argc == 2 && !strcmp(argv[1], "-h"))
|
|
usage_with_options(builtin_gc_usage, builtin_gc_options);
|
|
|
|
argv_array_pushl(&pack_refs_cmd, "pack-refs", "--all", "--prune", NULL);
|
|
argv_array_pushl(&reflog, "reflog", "expire", "--all", NULL);
|
|
argv_array_pushl(&repack, "repack", "-d", "-l", NULL);
|
|
argv_array_pushl(&prune, "prune", "--expire", NULL);
|
|
argv_array_pushl(&prune_worktrees, "worktree", "prune", "--expire", NULL);
|
|
argv_array_pushl(&rerere, "rerere", "gc", NULL);
|
|
|
|
/* default expiry time, overwritten in gc_config */
|
|
gc_config();
|
|
if (parse_expiry_date(gc_log_expire, &gc_log_expire_time))
|
|
die(_("failed to parse gc.logexpiry value %s"), gc_log_expire);
|
|
|
|
if (pack_refs < 0)
|
|
pack_refs = !is_bare_repository();
|
|
|
|
argc = parse_options(argc, argv, prefix, builtin_gc_options,
|
|
builtin_gc_usage, 0);
|
|
if (argc > 0)
|
|
usage_with_options(builtin_gc_usage, builtin_gc_options);
|
|
|
|
if (prune_expire && parse_expiry_date(prune_expire, &dummy))
|
|
die(_("failed to parse prune expiry value %s"), prune_expire);
|
|
|
|
if (aggressive) {
|
|
argv_array_push(&repack, "-f");
|
|
if (aggressive_depth > 0)
|
|
argv_array_pushf(&repack, "--depth=%d", aggressive_depth);
|
|
if (aggressive_window > 0)
|
|
argv_array_pushf(&repack, "--window=%d", aggressive_window);
|
|
}
|
|
if (quiet)
|
|
argv_array_push(&repack, "-q");
|
|
|
|
if (auto_gc) {
|
|
/*
|
|
* Auto-gc should be least intrusive as possible.
|
|
*/
|
|
if (!need_to_gc())
|
|
return 0;
|
|
if (!quiet) {
|
|
if (detach_auto)
|
|
fprintf(stderr, _("Auto packing the repository in background for optimum performance.\n"));
|
|
else
|
|
fprintf(stderr, _("Auto packing the repository for optimum performance.\n"));
|
|
fprintf(stderr, _("See \"git help gc\" for manual housekeeping.\n"));
|
|
}
|
|
if (detach_auto) {
|
|
int ret = report_last_gc_error();
|
|
if (ret < 0)
|
|
/* an I/O error occured, already reported */
|
|
exit(128);
|
|
if (ret == 1)
|
|
/* Last gc --auto failed. Skip this one. */
|
|
return 0;
|
|
|
|
if (lock_repo_for_gc(force, &pid))
|
|
return 0;
|
|
gc_before_repack(); /* dies on failure */
|
|
delete_tempfile(&pidfile);
|
|
|
|
/*
|
|
* failure to daemonize is ok, we'll continue
|
|
* in foreground
|
|
*/
|
|
daemonized = !daemonize();
|
|
}
|
|
} else {
|
|
struct string_list keep_pack = STRING_LIST_INIT_NODUP;
|
|
|
|
if (keep_base_pack != -1) {
|
|
if (keep_base_pack)
|
|
find_base_packs(&keep_pack, 0);
|
|
} else if (big_pack_threshold) {
|
|
find_base_packs(&keep_pack, big_pack_threshold);
|
|
}
|
|
|
|
add_repack_all_option(&keep_pack);
|
|
string_list_clear(&keep_pack, 0);
|
|
}
|
|
|
|
name = lock_repo_for_gc(force, &pid);
|
|
if (name) {
|
|
if (auto_gc)
|
|
return 0; /* be quiet on --auto */
|
|
die(_("gc is already running on machine '%s' pid %"PRIuMAX" (use --force if not)"),
|
|
name, (uintmax_t)pid);
|
|
}
|
|
|
|
if (daemonized) {
|
|
hold_lock_file_for_update(&log_lock,
|
|
git_path("gc.log"),
|
|
LOCK_DIE_ON_ERROR);
|
|
dup2(get_lock_file_fd(&log_lock), 2);
|
|
sigchain_push_common(process_log_file_on_signal);
|
|
atexit(process_log_file_at_exit);
|
|
}
|
|
|
|
gc_before_repack();
|
|
|
|
if (!repository_format_precious_objects) {
|
|
close_all_packs(the_repository->objects);
|
|
if (run_command_v_opt(repack.argv, RUN_GIT_CMD))
|
|
die(FAILED_RUN, repack.argv[0]);
|
|
|
|
if (prune_expire) {
|
|
argv_array_push(&prune, prune_expire);
|
|
if (quiet)
|
|
argv_array_push(&prune, "--no-progress");
|
|
if (repository_format_partial_clone)
|
|
argv_array_push(&prune,
|
|
"--exclude-promisor-objects");
|
|
if (run_command_v_opt(prune.argv, RUN_GIT_CMD))
|
|
die(FAILED_RUN, prune.argv[0]);
|
|
}
|
|
}
|
|
|
|
if (prune_worktrees_expire) {
|
|
argv_array_push(&prune_worktrees, prune_worktrees_expire);
|
|
if (run_command_v_opt(prune_worktrees.argv, RUN_GIT_CMD))
|
|
die(FAILED_RUN, prune_worktrees.argv[0]);
|
|
}
|
|
|
|
if (run_command_v_opt(rerere.argv, RUN_GIT_CMD))
|
|
die(FAILED_RUN, rerere.argv[0]);
|
|
|
|
report_garbage = report_pack_garbage;
|
|
reprepare_packed_git(the_repository);
|
|
if (pack_garbage.nr > 0) {
|
|
close_all_packs(the_repository->objects);
|
|
clean_pack_garbage();
|
|
}
|
|
|
|
if (gc_write_commit_graph)
|
|
write_commit_graph_reachable(get_object_directory(), 0,
|
|
!quiet && !daemonized);
|
|
|
|
if (auto_gc && too_many_loose_objects())
|
|
warning(_("There are too many unreachable loose objects; "
|
|
"run 'git prune' to remove them."));
|
|
|
|
if (!daemonized)
|
|
unlink(git_path("gc.log"));
|
|
|
|
return 0;
|
|
}
|