aboutsummaryrefslogtreecommitdiffstats
path: root/refs
diff options
context:
space:
mode:
Diffstat (limited to 'refs')
-rw-r--r--refs/debug.c80
-rw-r--r--refs/files-backend.c1422
-rw-r--r--refs/iterator.c223
-rw-r--r--refs/packed-backend.c577
-rw-r--r--refs/packed-backend.h13
-rw-r--r--refs/ref-cache.c103
-rw-r--r--refs/ref-cache.h4
-rw-r--r--refs/refs-internal.h224
-rw-r--r--refs/reftable-backend.c2700
9 files changed, 4604 insertions, 742 deletions
diff --git a/refs/debug.c b/refs/debug.c
index 634681ca44..5390fa9c18 100644
--- a/refs/debug.c
+++ b/refs/debug.c
@@ -33,11 +33,18 @@ struct ref_store *maybe_debug_wrap_ref_store(const char *gitdir, struct ref_stor
return (struct ref_store *)res;
}
-static int debug_init_db(struct ref_store *refs, int flags, struct strbuf *err)
+static void debug_release(struct ref_store *refs)
{
struct debug_ref_store *drefs = (struct debug_ref_store *)refs;
- int res = drefs->refs->be->init_db(drefs->refs, flags, err);
- trace_printf_key(&trace_refs, "init_db: %d\n", res);
+ drefs->refs->be->release(drefs->refs);
+ trace_printf_key(&trace_refs, "release\n");
+}
+
+static int debug_create_on_disk(struct ref_store *refs, int flags, struct strbuf *err)
+{
+ struct debug_ref_store *drefs = (struct debug_ref_store *)refs;
+ int res = drefs->refs->be->create_on_disk(drefs->refs, flags, err);
+ trace_printf_key(&trace_refs, "create_on_disk: %d\n", res);
return res;
}
@@ -76,9 +83,8 @@ static void print_update(int i, const char *refname,
static void print_transaction(struct ref_transaction *transaction)
{
- int i;
trace_printf_key(&trace_refs, "transaction {\n");
- for (i = 0; i < transaction->nr; i++) {
+ for (size_t i = 0; i < transaction->nr; i++) {
struct ref_update *u = transaction->updates[i];
print_update(i, u->refname, &u->old_oid, &u->new_oid, u->flags,
u->type, u->msg);
@@ -111,18 +117,6 @@ static int debug_transaction_abort(struct ref_store *refs,
return res;
}
-static int debug_initial_transaction_commit(struct ref_store *refs,
- struct ref_transaction *transaction,
- struct strbuf *err)
-{
- struct debug_ref_store *drefs = (struct debug_ref_store *)refs;
- int res;
- transaction->ref_store = drefs->refs;
- res = drefs->refs->be->initial_transaction_commit(drefs->refs,
- transaction, err);
- return res;
-}
-
static int debug_pack_refs(struct ref_store *ref_store, struct pack_refs_opts *opts)
{
struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store;
@@ -131,18 +125,6 @@ static int debug_pack_refs(struct ref_store *ref_store, struct pack_refs_opts *o
return res;
}
-static int debug_create_symref(struct ref_store *ref_store,
- const char *ref_name, const char *target,
- const char *logmsg)
-{
- struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store;
- int res = drefs->refs->be->create_symref(drefs->refs, ref_name, target,
- logmsg);
- trace_printf_key(&trace_refs, "create_symref: %s -> %s \"%s\": %d\n", ref_name,
- target, logmsg, res);
- return res;
-}
-
static int debug_rename_ref(struct ref_store *ref_store, const char *oldref,
const char *newref, const char *logmsg)
{
@@ -181,13 +163,22 @@ static int debug_ref_iterator_advance(struct ref_iterator *ref_iterator)
trace_printf_key(&trace_refs, "iterator_advance: %s (0)\n",
diter->iter->refname);
- diter->base.ordered = diter->iter->ordered;
diter->base.refname = diter->iter->refname;
diter->base.oid = diter->iter->oid;
diter->base.flags = diter->iter->flags;
return res;
}
+static int debug_ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
+{
+ struct debug_ref_iterator *diter =
+ (struct debug_ref_iterator *)ref_iterator;
+ int res = diter->iter->vtable->seek(diter->iter, prefix);
+ trace_printf_key(&trace_refs, "iterator_seek: %s: %d\n", prefix ? prefix : "", res);
+ return res;
+}
+
static int debug_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
@@ -198,19 +189,19 @@ static int debug_ref_iterator_peel(struct ref_iterator *ref_iterator,
return res;
}
-static int debug_ref_iterator_abort(struct ref_iterator *ref_iterator)
+static void debug_ref_iterator_release(struct ref_iterator *ref_iterator)
{
struct debug_ref_iterator *diter =
(struct debug_ref_iterator *)ref_iterator;
- int res = diter->iter->vtable->abort(diter->iter);
- trace_printf_key(&trace_refs, "iterator_abort: %d\n", res);
- return res;
+ diter->iter->vtable->release(diter->iter);
+ trace_printf_key(&trace_refs, "iterator_abort\n");
}
static struct ref_iterator_vtable debug_ref_iterator_vtable = {
.advance = debug_ref_iterator_advance,
+ .seek = debug_ref_iterator_seek,
.peel = debug_ref_iterator_peel,
- .abort = debug_ref_iterator_abort,
+ .release = debug_ref_iterator_release,
};
static struct ref_iterator *
@@ -222,7 +213,7 @@ debug_ref_iterator_begin(struct ref_store *ref_store, const char *prefix,
drefs->refs->be->iterator_begin(drefs->refs, prefix,
exclude_patterns, flags);
struct debug_ref_iterator *diter = xcalloc(1, sizeof(*diter));
- base_ref_iterator_init(&diter->base, &debug_ref_iterator_vtable, 1);
+ base_ref_iterator_init(&diter->base, &debug_ref_iterator_vtable);
diter->iter = res;
trace_printf_key(&trace_refs, "ref_iterator_begin: \"%s\" (0x%x)\n",
prefix, flags);
@@ -425,10 +416,21 @@ static int debug_reflog_expire(struct ref_store *ref_store, const char *refname,
return res;
}
+static int debug_fsck(struct ref_store *ref_store,
+ struct fsck_options *o,
+ struct worktree *wt)
+{
+ struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store;
+ int res = drefs->refs->be->fsck(drefs->refs, o, wt);
+ trace_printf_key(&trace_refs, "fsck: %d\n", res);
+ return res;
+}
+
struct ref_storage_be refs_be_debug = {
.name = "debug",
.init = NULL,
- .init_db = debug_init_db,
+ .release = debug_release,
+ .create_on_disk = debug_create_on_disk,
/*
* None of these should be NULL. If the "files" backend (in
@@ -439,10 +441,8 @@ struct ref_storage_be refs_be_debug = {
.transaction_prepare = debug_transaction_prepare,
.transaction_finish = debug_transaction_finish,
.transaction_abort = debug_transaction_abort,
- .initial_transaction_commit = debug_initial_transaction_commit,
.pack_refs = debug_pack_refs,
- .create_symref = debug_create_symref,
.rename_ref = debug_rename_ref,
.copy_ref = debug_copy_ref,
@@ -457,4 +457,6 @@ struct ref_storage_be refs_be_debug = {
.create_reflog = debug_create_reflog,
.delete_reflog = debug_delete_reflog,
.reflog_expire = debug_reflog_expire,
+
+ .fsck = debug_fsck,
};
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 75dcc21ecb..ff54a4bb7e 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -1,10 +1,17 @@
+#define USE_THE_REPOSITORY_VARIABLE
+#define DISABLE_SIGN_COMPARE_WARNINGS
+
#include "../git-compat-util.h"
+#include "../abspath.h"
+#include "../config.h"
#include "../copy.h"
#include "../environment.h"
#include "../gettext.h"
#include "../hash.h"
#include "../hex.h"
+#include "../fsck.h"
#include "../refs.h"
+#include "../repo-settings.h"
#include "refs-internal.h"
#include "ref-cache.h"
#include "packed-backend.h"
@@ -18,6 +25,7 @@
#include "../dir.h"
#include "../chdir-notify.h"
#include "../setup.h"
+#include "../worktree.h"
#include "../wrapper.h"
#include "../write-or-die.h"
#include "../revision.h"
@@ -64,6 +72,7 @@ struct ref_lock {
char *ref_name;
struct lock_file lk;
struct object_id old_oid;
+ unsigned int count; /* track users of the lock (ref update + reflog updates) */
};
struct files_ref_store {
@@ -71,6 +80,8 @@ struct files_ref_store {
unsigned int store_flags;
char *gitcommondir;
+ enum log_refs_config log_all_ref_updates;
+ int prefer_symlink_refs;
struct ref_cache *loose;
@@ -89,9 +100,9 @@ static void clear_loose_ref_cache(struct files_ref_store *refs)
* Create a new submodule ref cache and add it to the internal
* set of caches.
*/
-static struct ref_store *files_ref_store_create(struct repository *repo,
- const char *gitdir,
- unsigned int flags)
+static struct ref_store *files_ref_store_init(struct repository *repo,
+ const char *gitdir,
+ unsigned int flags)
{
struct files_ref_store *refs = xcalloc(1, sizeof(*refs));
struct ref_store *ref_store = (struct ref_store *)refs;
@@ -102,7 +113,9 @@ static struct ref_store *files_ref_store_create(struct repository *repo,
get_common_dir_noenv(&sb, gitdir);
refs->gitcommondir = strbuf_detach(&sb, NULL);
refs->packed_ref_store =
- packed_ref_store_create(repo, refs->gitcommondir, flags);
+ packed_ref_store_init(repo, refs->gitcommondir, flags);
+ refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
+ repo_config_get_bool(repo, "core.prefersymlinkrefs", &refs->prefer_symlink_refs);
chdir_notify_reparent("files-backend $GIT_DIR", &refs->base.gitdir);
chdir_notify_reparent("files-backend $GIT_COMMONDIR",
@@ -149,6 +162,15 @@ static struct files_ref_store *files_downcast(struct ref_store *ref_store,
return refs;
}
+static void files_ref_store_release(struct ref_store *ref_store)
+{
+ struct files_ref_store *refs = files_downcast(ref_store, 0, "release");
+ free_ref_cache(refs->loose);
+ free(refs->gitcommondir);
+ ref_store_release(refs->packed_ref_store);
+ free(refs->packed_ref_store);
+}
+
static void files_reflog_path(struct files_ref_store *refs,
struct strbuf *sb,
const char *refname)
@@ -229,6 +251,45 @@ static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dir
}
}
+static void loose_fill_ref_dir_regular_file(struct files_ref_store *refs,
+ const char *refname,
+ struct ref_dir *dir)
+{
+ struct object_id oid;
+ int flag;
+ const char *referent = refs_resolve_ref_unsafe(&refs->base,
+ refname,
+ RESOLVE_REF_READING,
+ &oid, &flag);
+
+ if (!referent) {
+ oidclr(&oid, refs->base.repo->hash_algo);
+ flag |= REF_ISBROKEN;
+ } else if (is_null_oid(&oid)) {
+ /*
+ * It is so astronomically unlikely
+ * that null_oid is the OID of an
+ * actual object that we consider its
+ * appearance in a loose reference
+ * file to be repo corruption
+ * (probably due to a software bug).
+ */
+ flag |= REF_ISBROKEN;
+ }
+
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(refname))
+ die("loose refname is dangerous: %s", refname);
+ oidclr(&oid, refs->base.repo->hash_algo);
+ flag |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+
+ if (!(flag & REF_ISSYMREF))
+ referent = NULL;
+
+ add_entry_to_dir(dir, create_ref_entry(refname, referent, &oid, flag));
+}
+
/*
* Read the loose references from the namespace dirname into dir
* (without recursing). dirname must end with '/'. dir must be the
@@ -257,8 +318,6 @@ static void loose_fill_ref_dir(struct ref_store *ref_store,
strbuf_add(&refname, dirname, dirnamelen);
while ((de = readdir(d)) != NULL) {
- struct object_id oid;
- int flag;
unsigned char dtype;
if (de->d_name[0] == '.')
@@ -274,33 +333,7 @@ static void loose_fill_ref_dir(struct ref_store *ref_store,
create_dir_entry(dir->cache, refname.buf,
refname.len));
} else if (dtype == DT_REG) {
- if (!refs_resolve_ref_unsafe(&refs->base,
- refname.buf,
- RESOLVE_REF_READING,
- &oid, &flag)) {
- oidclr(&oid);
- flag |= REF_ISBROKEN;
- } else if (is_null_oid(&oid)) {
- /*
- * It is so astronomically unlikely
- * that null_oid is the OID of an
- * actual object that we consider its
- * appearance in a loose reference
- * file to be repo corruption
- * (probably due to a software bug).
- */
- flag |= REF_ISBROKEN;
- }
-
- if (check_refname_format(refname.buf,
- REFNAME_ALLOW_ONELEVEL)) {
- if (!refname_is_safe(refname.buf))
- die("loose refname is dangerous: %s", refname.buf);
- oidclr(&oid);
- flag |= REF_BAD_NAME | REF_ISBROKEN;
- }
- add_entry_to_dir(dir,
- create_ref_entry(refname.buf, &oid, flag));
+ loose_fill_ref_dir_regular_file(refs, refname.buf, dir);
}
strbuf_setlen(&refname, dirnamelen);
}
@@ -311,9 +344,89 @@ static void loose_fill_ref_dir(struct ref_store *ref_store,
add_per_worktree_entries_to_dir(dir, dirname);
}
-static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
+static int for_each_root_ref(struct files_ref_store *refs,
+ int (*cb)(const char *refname, void *cb_data),
+ void *cb_data)
+{
+ struct strbuf path = STRBUF_INIT, refname = STRBUF_INIT;
+ const char *dirname = refs->loose->root->name;
+ struct dirent *de;
+ size_t dirnamelen;
+ int ret;
+ DIR *d;
+
+ files_ref_path(refs, &path, dirname);
+
+ d = opendir(path.buf);
+ if (!d) {
+ strbuf_release(&path);
+ return -1;
+ }
+
+ strbuf_addstr(&refname, dirname);
+ dirnamelen = refname.len;
+
+ while ((de = readdir(d)) != NULL) {
+ unsigned char dtype;
+
+ if (de->d_name[0] == '.')
+ continue;
+ if (ends_with(de->d_name, ".lock"))
+ continue;
+ strbuf_addstr(&refname, de->d_name);
+
+ dtype = get_dtype(de, &path, 1);
+ if (dtype == DT_REG && is_root_ref(de->d_name)) {
+ ret = cb(refname.buf, cb_data);
+ if (ret)
+ goto done;
+ }
+
+ strbuf_setlen(&refname, dirnamelen);
+ }
+
+ ret = 0;
+
+done:
+ strbuf_release(&refname);
+ strbuf_release(&path);
+ closedir(d);
+ return ret;
+}
+
+struct fill_root_ref_data {
+ struct files_ref_store *refs;
+ struct ref_dir *dir;
+};
+
+static int fill_root_ref(const char *refname, void *cb_data)
+{
+ struct fill_root_ref_data *data = cb_data;
+ loose_fill_ref_dir_regular_file(data->refs, refname, data->dir);
+ return 0;
+}
+
+/*
+ * Add root refs to the ref dir by parsing the directory for any files which
+ * follow the root ref syntax.
+ */
+static void add_root_refs(struct files_ref_store *refs,
+ struct ref_dir *dir)
+{
+ struct fill_root_ref_data data = {
+ .refs = refs,
+ .dir = dir,
+ };
+
+ for_each_root_ref(refs, fill_root_ref, &data);
+}
+
+static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs,
+ unsigned int flags)
{
if (!refs->loose) {
+ struct ref_dir *dir;
+
/*
* Mark the top-level directory complete because we
* are about to read the only subdirectory that can
@@ -324,12 +437,16 @@ static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
/* We're going to fill the top level ourselves: */
refs->loose->root->flag &= ~REF_INCOMPLETE;
+ dir = get_ref_dir(refs->loose->root);
+
+ if (flags & DO_FOR_EACH_INCLUDE_ROOT_REFS)
+ add_root_refs(refs, dir);
+
/*
* Add an incomplete entry for "refs/" (to be filled
* lazily):
*/
- add_entry_to_dir(get_ref_dir(refs->loose->root),
- create_dir_entry(refs->loose, "refs/", 5));
+ add_entry_to_dir(dir, create_dir_entry(refs->loose, "refs/", 5));
}
return refs->loose;
}
@@ -454,7 +571,8 @@ stat_ref:
strbuf_rtrim(&sb_contents);
buf = sb_contents.buf;
- ret = parse_loose_ref_contents(buf, oid, referent, type, &myerr);
+ ret = parse_loose_ref_contents(ref_store->repo->hash_algo, buf,
+ oid, referent, type, NULL, &myerr);
out:
if (ret && !myerr)
@@ -482,15 +600,15 @@ static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refn
unsigned int type;
ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1);
- if (ret)
- return ret;
-
- return !(type & REF_ISSYMREF);
+ if (!ret && !(type & REF_ISSYMREF))
+ return NOT_A_SYMREF;
+ return ret;
}
-int parse_loose_ref_contents(const char *buf, struct object_id *oid,
+int parse_loose_ref_contents(const struct git_hash_algo *algop,
+ const char *buf, struct object_id *oid,
struct strbuf *referent, unsigned int *type,
- int *failure_errno)
+ const char **trailing, int *failure_errno)
{
const char *p;
if (skip_prefix(buf, "ref:", &buf)) {
@@ -506,20 +624,27 @@ int parse_loose_ref_contents(const char *buf, struct object_id *oid,
/*
* FETCH_HEAD has additional data after the sha.
*/
- if (parse_oid_hex(buf, oid, &p) ||
+ if (parse_oid_hex_algop(buf, oid, &p, algop) ||
(*p != '\0' && !isspace(*p))) {
*type |= REF_ISBROKEN;
*failure_errno = EINVAL;
return -1;
}
+
+ if (trailing)
+ *trailing = p;
+
return 0;
}
static void unlock_ref(struct ref_lock *lock)
{
- rollback_lock_file(&lock->lk);
- free(lock->ref_name);
- free(lock);
+ lock->count--;
+ if (!lock->count) {
+ rollback_lock_file(&lock->lk);
+ free(lock->ref_name);
+ free(lock);
+ }
}
/*
@@ -553,6 +678,7 @@ static void unlock_ref(struct ref_lock *lock)
*/
static int lock_raw_ref(struct files_ref_store *refs,
const char *refname, int mustexist,
+ struct string_list *refnames_to_check,
const struct string_list *extras,
struct ref_lock **lock_p,
struct strbuf *referent,
@@ -575,6 +701,7 @@ static int lock_raw_ref(struct files_ref_store *refs,
*lock_p = CALLOC_ARRAY(lock, 1);
lock->ref_name = xstrdup(refname);
+ lock->count = 1;
files_ref_path(refs, &ref_file, refname);
retry:
@@ -591,7 +718,7 @@ retry:
* reason to expect this error to be transitory.
*/
if (refs_verify_refname_available(&refs->base, refname,
- extras, NULL, err)) {
+ extras, NULL, 0, err)) {
if (mustexist) {
/*
* To the user the relevant error is
@@ -698,7 +825,7 @@ retry:
REMOVE_DIR_EMPTY_ONLY)) {
if (refs_verify_refname_available(
&refs->base, refname,
- extras, NULL, err)) {
+ extras, NULL, 0, err)) {
/*
* The error message set by
* verify_refname_available() is OK.
@@ -729,14 +856,11 @@ retry:
}
/*
- * If the ref did not exist and we are creating it,
- * make sure there is no existing packed ref that
- * conflicts with refname:
+ * If the ref did not exist and we are creating it, we have to
+ * make sure there is no existing packed ref that conflicts
+ * with refname. This check is deferred so that we can batch it.
*/
- if (refs_verify_refname_available(
- refs->packed_ref_store, refname,
- extras, NULL, err))
- goto error_return;
+ string_list_append(refnames_to_check, refname);
}
ret = 0;
@@ -786,16 +910,22 @@ static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
iter->base.refname = iter->iter0->refname;
iter->base.oid = iter->iter0->oid;
iter->base.flags = iter->iter0->flags;
+ iter->base.referent = iter->iter0->referent;
+
return ITER_OK;
}
- iter->iter0 = NULL;
- if (ref_iterator_abort(ref_iterator) != ITER_DONE)
- ok = ITER_ERROR;
-
return ok;
}
+static int files_ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
+{
+ struct files_ref_iterator *iter =
+ (struct files_ref_iterator *)ref_iterator;
+ return ref_iterator_seek(iter->iter0, prefix);
+}
+
static int files_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
@@ -805,23 +935,18 @@ static int files_ref_iterator_peel(struct ref_iterator *ref_iterator,
return ref_iterator_peel(iter->iter0, peeled);
}
-static int files_ref_iterator_abort(struct ref_iterator *ref_iterator)
+static void files_ref_iterator_release(struct ref_iterator *ref_iterator)
{
struct files_ref_iterator *iter =
(struct files_ref_iterator *)ref_iterator;
- int ok = ITER_DONE;
-
- if (iter->iter0)
- ok = ref_iterator_abort(iter->iter0);
-
- base_ref_iterator_free(ref_iterator);
- return ok;
+ ref_iterator_free(iter->iter0);
}
static struct ref_iterator_vtable files_ref_iterator_vtable = {
.advance = files_ref_iterator_advance,
+ .seek = files_ref_iterator_seek,
.peel = files_ref_iterator_peel,
- .abort = files_ref_iterator_abort,
+ .release = files_ref_iterator_release,
};
static struct ref_iterator *files_ref_iterator_begin(
@@ -857,7 +982,7 @@ static struct ref_iterator *files_ref_iterator_begin(
* disk, and re-reads it if not.
*/
- loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs),
+ loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, flags),
prefix, ref_store->repo, 1);
/*
@@ -879,8 +1004,7 @@ static struct ref_iterator *files_ref_iterator_begin(
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
- overlay_iter->ordered);
+ base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
iter->iter0 = overlay_iter;
iter->repo = ref_store->repo;
iter->flags = flags;
@@ -1041,10 +1165,11 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
*/
if (is_null_oid(&lock->old_oid) &&
refs_verify_refname_available(refs->packed_ref_store, refname,
- NULL, NULL, err))
+ NULL, NULL, 0, err))
goto error_return;
lock->ref_name = xstrdup(refname);
+ lock->count = 1;
if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) {
unable_to_lock_message(ref_file.buf, errno, err);
@@ -1053,7 +1178,7 @@ static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
if (!refs_resolve_ref_unsafe(&refs->base, lock->ref_name, 0,
&lock->old_oid, NULL))
- oidclr(&lock->old_oid);
+ oidclr(&lock->old_oid, refs->base.repo->hash_algo);
goto out;
error_return:
@@ -1134,13 +1259,13 @@ static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
if (check_refname_format(r->name, 0))
return;
- transaction = ref_store_transaction_begin(&refs->base, &err);
+ transaction = ref_store_transaction_begin(&refs->base, 0, &err);
if (!transaction)
goto cleanup;
ref_transaction_add_update(
transaction, r->name,
REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING,
- null_oid(), &r->oid, NULL);
+ null_oid(), &r->oid, NULL, NULL, NULL, NULL);
if (ref_transaction_commit(transaction, &err))
goto cleanup;
@@ -1171,7 +1296,8 @@ static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_
/*
* Return true if the specified reference should be packed.
*/
-static int should_pack_ref(const char *refname,
+static int should_pack_ref(struct files_ref_store *refs,
+ const char *refname,
const struct object_id *oid, unsigned int ref_flags,
struct pack_refs_opts *opts)
{
@@ -1187,7 +1313,7 @@ static int should_pack_ref(const char *refname,
return 0;
/* Do not pack broken refs: */
- if (!ref_resolves_to_object(refname, the_repository, oid, ref_flags))
+ if (!ref_resolves_to_object(refname, refs->base.repo, oid, ref_flags))
return 0;
if (ref_excluded(opts->exclusions, refname))
@@ -1200,6 +1326,69 @@ static int should_pack_ref(const char *refname,
return 0;
}
+static int should_pack_refs(struct files_ref_store *refs,
+ struct pack_refs_opts *opts)
+{
+ struct ref_iterator *iter;
+ size_t packed_size;
+ size_t refcount = 0;
+ size_t limit;
+ int ret;
+
+ if (!(opts->flags & PACK_REFS_AUTO))
+ return 1;
+
+ ret = packed_refs_size(refs->packed_ref_store, &packed_size);
+ if (ret < 0)
+ die("cannot determine packed-refs size");
+
+ /*
+ * Packing loose references into the packed-refs file scales with the
+ * number of references we're about to write. We thus decide whether we
+ * repack refs by weighing the current size of the packed-refs file
+ * against the number of loose references. This is done such that we do
+ * not repack too often on repositories with a huge number of
+ * references, where we can expect a lot of churn in the number of
+ * references.
+ *
+ * As a heuristic, we repack if the number of loose references in the
+ * repository exceeds `log2(nr_packed_refs) * 5`, where we estimate
+ * `nr_packed_refs = packed_size / 100`, which scales as following:
+ *
+ * - 1kB ~ 10 packed refs: 16 refs
+ * - 10kB ~ 100 packed refs: 33 refs
+ * - 100kB ~ 1k packed refs: 49 refs
+ * - 1MB ~ 10k packed refs: 66 refs
+ * - 10MB ~ 100k packed refs: 82 refs
+ * - 100MB ~ 1m packed refs: 99 refs
+ *
+ * We thus allow roughly 16 additional loose refs per factor of ten of
+ * packed refs. This heuristic may be tweaked in the future, but should
+ * serve as a sufficiently good first iteration.
+ */
+ limit = log2u(packed_size / 100) * 5;
+ if (limit < 16)
+ limit = 16;
+
+ iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL,
+ refs->base.repo, 0);
+ while ((ret = ref_iterator_advance(iter)) == ITER_OK) {
+ if (should_pack_ref(refs, iter->refname, iter->oid,
+ iter->flags, opts))
+ refcount++;
+ if (refcount >= limit) {
+ ref_iterator_free(iter);
+ return 1;
+ }
+ }
+
+ if (ret != ITER_DONE)
+ die("error while iterating over references");
+
+ ref_iterator_free(iter);
+ return 0;
+}
+
static int files_pack_refs(struct ref_store *ref_store,
struct pack_refs_opts *opts)
{
@@ -1212,21 +1401,25 @@ static int files_pack_refs(struct ref_store *ref_store,
struct strbuf err = STRBUF_INIT;
struct ref_transaction *transaction;
- transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
+ if (!should_pack_refs(refs, opts))
+ return 0;
+
+ transaction = ref_store_transaction_begin(refs->packed_ref_store,
+ 0, &err);
if (!transaction)
return -1;
packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
- iter = cache_ref_iterator_begin(get_loose_ref_cache(refs), NULL,
- the_repository, 0);
+ iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL,
+ refs->base.repo, 0);
while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
/*
* If the loose reference can be packed, add an entry
* in the packed ref cache. If the reference should be
* pruned, also add it to refs_to_prune.
*/
- if (!should_pack_ref(iter->refname, iter->oid, iter->flags, opts))
+ if (!should_pack_ref(refs, iter->refname, iter->oid, iter->flags, opts))
continue;
/*
@@ -1234,7 +1427,7 @@ static int files_pack_refs(struct ref_store *ref_store,
* packed-refs transaction:
*/
if (ref_transaction_update(transaction, iter->refname,
- iter->oid, NULL,
+ iter->oid, NULL, NULL, NULL,
REF_NO_DEREF, NULL, &err))
die("failure preparing to create packed reference %s: %s",
iter->refname, err.buf);
@@ -1259,6 +1452,7 @@ static int files_pack_refs(struct ref_store *ref_store,
packed_refs_unlock(refs->packed_ref_store);
prune_refs(refs, &refs_to_prune);
+ ref_iterator_free(iter);
strbuf_release(&err);
return 0;
}
@@ -1323,12 +1517,14 @@ static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname)
return ret;
}
-static int write_ref_to_lockfile(struct ref_lock *lock,
+static int write_ref_to_lockfile(struct files_ref_store *refs,
+ struct ref_lock *lock,
const struct object_id *oid,
int skip_oid_verification, struct strbuf *err);
static int commit_ref_update(struct files_ref_store *refs,
struct ref_lock *lock,
const struct object_id *oid, const char *logmsg,
+ int flags,
struct strbuf *err);
/*
@@ -1351,7 +1547,7 @@ static int refs_rename_ref_available(struct ref_store *refs,
string_list_insert(&skip, old_refname);
ok = !refs_verify_refname_available(refs, new_refname,
- NULL, &skip, &err);
+ NULL, &skip, 0, &err);
if (!ok)
error("%s", err.buf);
@@ -1471,8 +1667,8 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store,
}
oidcpy(&lock->old_oid, &orig_oid);
- if (write_ref_to_lockfile(lock, &orig_oid, 0, &err) ||
- commit_ref_update(refs, lock, &orig_oid, logmsg, &err)) {
+ if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) ||
+ commit_ref_update(refs, lock, &orig_oid, logmsg, 0, &err)) {
error("unable to write current sha1 into %s: %s", newrefname, err.buf);
strbuf_release(&err);
goto rollback;
@@ -1489,14 +1685,11 @@ static int files_copy_or_rename_ref(struct ref_store *ref_store,
goto rollbacklog;
}
- flag = log_all_ref_updates;
- log_all_ref_updates = LOG_REFS_NONE;
- if (write_ref_to_lockfile(lock, &orig_oid, 0, &err) ||
- commit_ref_update(refs, lock, &orig_oid, NULL, &err)) {
+ if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) ||
+ commit_ref_update(refs, lock, &orig_oid, NULL, REF_SKIP_CREATE_REFLOG, &err)) {
error("unable to write current sha1 into %s: %s", oldrefname, err.buf);
strbuf_release(&err);
}
- log_all_ref_updates = flag;
rollbacklog:
if (logmoved && rename(sb_newref.buf, sb_oldref.buf))
@@ -1591,13 +1784,17 @@ static int log_ref_setup(struct files_ref_store *refs,
const char *refname, int force_create,
int *logfd, struct strbuf *err)
{
+ enum log_refs_config log_refs_cfg = refs->log_all_ref_updates;
struct strbuf logfile_sb = STRBUF_INIT;
char *logfile;
+ if (log_refs_cfg == LOG_REFS_UNSET)
+ log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
+
files_reflog_path(refs, &logfile_sb, refname);
logfile = strbuf_detach(&logfile_sb, NULL);
- if (force_create || should_autocreate_reflog(refname)) {
+ if (force_create || should_autocreate_reflog(log_refs_cfg, refname)) {
if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) {
if (errno == ENOENT)
strbuf_addf(err, "unable to create directory for '%s': "
@@ -1631,7 +1828,7 @@ static int log_ref_setup(struct files_ref_store *refs,
}
if (*logfd >= 0)
- adjust_shared_perm(logfile);
+ adjust_shared_perm(the_repository, logfile);
free(logfile);
return 0;
@@ -1664,6 +1861,9 @@ static int log_ref_write_fd(int fd, const struct object_id *old_oid,
struct strbuf sb = STRBUF_INIT;
int ret = 0;
+ if (!committer)
+ committer = git_committer_info(0);
+
strbuf_addf(&sb, "%s %s %s", oid_to_hex(old_oid), oid_to_hex(new_oid), committer);
if (msg && *msg) {
strbuf_addch(&sb, '\t');
@@ -1677,14 +1877,16 @@ static int log_ref_write_fd(int fd, const struct object_id *old_oid,
}
static int files_log_ref_write(struct files_ref_store *refs,
- const char *refname, const struct object_id *old_oid,
- const struct object_id *new_oid, const char *msg,
+ const char *refname,
+ const struct object_id *old_oid,
+ const struct object_id *new_oid,
+ const char *committer_info, const char *msg,
int flags, struct strbuf *err)
{
int logfd, result;
- if (log_all_ref_updates == LOG_REFS_UNSET)
- log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
+ if (flags & REF_SKIP_CREATE_REFLOG)
+ return 0;
result = log_ref_setup(refs, refname,
flags & REF_FORCE_CREATE_REFLOG,
@@ -1695,8 +1897,7 @@ static int files_log_ref_write(struct files_ref_store *refs,
if (logfd < 0)
return 0;
- result = log_ref_write_fd(logfd, old_oid, new_oid,
- git_committer_info(0), msg);
+ result = log_ref_write_fd(logfd, old_oid, new_oid, committer_info, msg);
if (result) {
struct strbuf sb = STRBUF_INIT;
int save_errno = errno;
@@ -1725,7 +1926,8 @@ static int files_log_ref_write(struct files_ref_store *refs,
* Write oid into the open lockfile, then close the lockfile. On
* errors, rollback the lockfile, fill in *err and return -1.
*/
-static int write_ref_to_lockfile(struct ref_lock *lock,
+static int write_ref_to_lockfile(struct files_ref_store *refs,
+ struct ref_lock *lock,
const struct object_id *oid,
int skip_oid_verification, struct strbuf *err)
{
@@ -1734,7 +1936,7 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
int fd;
if (!skip_oid_verification) {
- o = parse_object(the_repository, oid);
+ o = parse_object(refs->base.repo, oid);
if (!o) {
strbuf_addf(
err,
@@ -1753,7 +1955,7 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
}
}
fd = get_lock_file_fd(&lock->lk);
- if (write_in_full(fd, oid_to_hex(oid), the_hash_algo->hexsz) < 0 ||
+ if (write_in_full(fd, oid_to_hex(oid), refs->base.repo->hash_algo->hexsz) < 0 ||
write_in_full(fd, &term, 1) < 0 ||
fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 ||
close_ref_gently(lock) < 0) {
@@ -1773,14 +1975,14 @@ static int write_ref_to_lockfile(struct ref_lock *lock,
static int commit_ref_update(struct files_ref_store *refs,
struct ref_lock *lock,
const struct object_id *oid, const char *logmsg,
+ int flags,
struct strbuf *err)
{
files_assert_main_repository(refs, "commit_ref_update");
clear_loose_ref_cache(refs);
- if (files_log_ref_write(refs, lock->ref_name,
- &lock->old_oid, oid,
- logmsg, 0, err)) {
+ if (files_log_ref_write(refs, lock->ref_name, &lock->old_oid, oid, NULL,
+ logmsg, flags, err)) {
char *old_msg = strbuf_detach(err, NULL);
strbuf_addf(err, "cannot update the ref '%s': %s",
lock->ref_name, old_msg);
@@ -1811,9 +2013,9 @@ static int commit_ref_update(struct files_ref_store *refs,
if (head_ref && (head_flag & REF_ISSYMREF) &&
!strcmp(head_ref, lock->ref_name)) {
struct strbuf log_err = STRBUF_INIT;
- if (files_log_ref_write(refs, "HEAD",
- &lock->old_oid, oid,
- logmsg, 0, &log_err)) {
+ if (files_log_ref_write(refs, "HEAD", &lock->old_oid,
+ oid, NULL, logmsg, flags,
+ &log_err)) {
error("%s", log_err.buf);
strbuf_release(&log_err);
}
@@ -1830,10 +2032,13 @@ static int commit_ref_update(struct files_ref_store *refs,
return 0;
}
+#ifdef NO_SYMLINK_HEAD
+#define create_ref_symlink(a, b) (-1)
+#else
static int create_ref_symlink(struct ref_lock *lock, const char *target)
{
int ret = -1;
-#ifndef NO_SYMLINK_HEAD
+
char *ref_path = get_locked_file_path(&lock->lk);
unlink(ref_path);
ret = symlink(target, ref_path);
@@ -1841,70 +2046,26 @@ static int create_ref_symlink(struct ref_lock *lock, const char *target)
if (ret)
fprintf(stderr, "no symlink - falling back to symbolic ref\n");
-#endif
return ret;
}
+#endif
-static void update_symref_reflog(struct files_ref_store *refs,
- struct ref_lock *lock, const char *refname,
- const char *target, const char *logmsg)
-{
- struct strbuf err = STRBUF_INIT;
- struct object_id new_oid;
-
- if (logmsg &&
- refs_resolve_ref_unsafe(&refs->base, target,
- RESOLVE_REF_READING, &new_oid, NULL) &&
- files_log_ref_write(refs, refname, &lock->old_oid,
- &new_oid, logmsg, 0, &err)) {
- error("%s", err.buf);
- strbuf_release(&err);
- }
-}
-
-static int create_symref_locked(struct files_ref_store *refs,
- struct ref_lock *lock, const char *refname,
- const char *target, const char *logmsg)
+static int create_symref_lock(struct ref_lock *lock, const char *target,
+ struct strbuf *err)
{
- if (prefer_symlink_refs && !create_ref_symlink(lock, target)) {
- update_symref_reflog(refs, lock, refname, target, logmsg);
- return 0;
+ if (!fdopen_lock_file(&lock->lk, "w")) {
+ strbuf_addf(err, "unable to fdopen %s: %s",
+ get_lock_file_path(&lock->lk), strerror(errno));
+ return -1;
}
- if (!fdopen_lock_file(&lock->lk, "w"))
- return error("unable to fdopen %s: %s",
+ if (fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target) < 0) {
+ strbuf_addf(err, "unable to write to %s: %s",
get_lock_file_path(&lock->lk), strerror(errno));
-
- update_symref_reflog(refs, lock, refname, target, logmsg);
-
- /* no error check; commit_ref will check ferror */
- fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target);
- if (commit_ref(lock) < 0)
- return error("unable to write symref for %s: %s", refname,
- strerror(errno));
- return 0;
-}
-
-static int files_create_symref(struct ref_store *ref_store,
- const char *refname, const char *target,
- const char *logmsg)
-{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_WRITE, "create_symref");
- struct strbuf err = STRBUF_INIT;
- struct ref_lock *lock;
- int ret;
-
- lock = lock_ref_oid_basic(refs, refname, &err);
- if (!lock) {
- error("%s", err.buf);
- strbuf_release(&err);
return -1;
}
- ret = create_symref_locked(refs, lock, refname, target, logmsg);
- unlock_ref(lock);
- return ret;
+ return 0;
}
static int files_reflog_exists(struct ref_store *ref_store,
@@ -1936,7 +2097,8 @@ static int files_delete_reflog(struct ref_store *ref_store,
return ret;
}
-static int show_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn, void *cb_data)
+static int show_one_reflog_ent(struct files_ref_store *refs, struct strbuf *sb,
+ each_reflog_ent_fn fn, void *cb_data)
{
struct object_id ooid, noid;
char *email_end, *message;
@@ -1946,8 +2108,8 @@ static int show_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn, void *c
/* old SP new SP name <email> SP time TAB msg LF */
if (!sb->len || sb->buf[sb->len - 1] != '\n' ||
- parse_oid_hex(p, &ooid, &p) || *p++ != ' ' ||
- parse_oid_hex(p, &noid, &p) || *p++ != ' ' ||
+ parse_oid_hex_algop(p, &ooid, &p, refs->base.repo->hash_algo) || *p++ != ' ' ||
+ parse_oid_hex_algop(p, &noid, &p, refs->base.repo->hash_algo) || *p++ != ' ' ||
!(email_end = strchr(p, '>')) ||
email_end[1] != ' ' ||
!(timestamp = parse_timestamp(email_end + 2, &message, 10)) ||
@@ -2046,7 +2208,7 @@ static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1));
scanp = bp;
endp = bp + 1;
- ret = show_one_reflog_ent(&sb, fn, cb_data);
+ ret = show_one_reflog_ent(refs, &sb, fn, cb_data);
strbuf_reset(&sb);
if (ret)
break;
@@ -2058,7 +2220,7 @@ static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
* Process it, and we can end the loop.
*/
strbuf_splice(&sb, 0, 0, buf, endp - buf);
- ret = show_one_reflog_ent(&sb, fn, cb_data);
+ ret = show_one_reflog_ent(refs, &sb, fn, cb_data);
strbuf_reset(&sb);
break;
}
@@ -2108,7 +2270,7 @@ static int files_for_each_reflog_ent(struct ref_store *ref_store,
return -1;
while (!ret && !strbuf_getwholeline(&sb, logfp, '\n'))
- ret = show_one_reflog_ent(&sb, fn, cb_data);
+ ret = show_one_reflog_ent(refs, &sb, fn, cb_data);
fclose(logfp);
strbuf_release(&sb);
return ret;
@@ -2116,10 +2278,8 @@ static int files_for_each_reflog_ent(struct ref_store *ref_store,
struct files_reflog_iterator {
struct ref_iterator base;
-
struct ref_store *ref_store;
struct dir_iterator *dir_iterator;
- struct object_id oid;
};
static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
@@ -2130,57 +2290,43 @@ static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
int ok;
while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
- int flags;
-
if (!S_ISREG(diter->st.st_mode))
continue;
- if (diter->basename[0] == '.')
- continue;
- if (ends_with(diter->basename, ".lock"))
+ if (check_refname_format(diter->basename,
+ REFNAME_ALLOW_ONELEVEL))
continue;
- if (!refs_resolve_ref_unsafe(iter->ref_store,
- diter->relative_path, 0,
- &iter->oid, &flags)) {
- error("bad ref for %s", diter->path.buf);
- continue;
- }
-
iter->base.refname = diter->relative_path;
- iter->base.oid = &iter->oid;
- iter->base.flags = flags;
return ITER_OK;
}
- iter->dir_iterator = NULL;
- if (ref_iterator_abort(ref_iterator) == ITER_ERROR)
- ok = ITER_ERROR;
return ok;
}
+static int files_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED,
+ const char *prefix UNUSED)
+{
+ BUG("ref_iterator_seek() called for reflog_iterator");
+}
+
static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
struct object_id *peeled UNUSED)
{
BUG("ref_iterator_peel() called for reflog_iterator");
}
-static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator)
+static void files_reflog_iterator_release(struct ref_iterator *ref_iterator)
{
struct files_reflog_iterator *iter =
(struct files_reflog_iterator *)ref_iterator;
- int ok = ITER_DONE;
-
- if (iter->dir_iterator)
- ok = dir_iterator_abort(iter->dir_iterator);
-
- base_ref_iterator_free(ref_iterator);
- return ok;
+ dir_iterator_free(iter->dir_iterator);
}
static struct ref_iterator_vtable files_reflog_iterator_vtable = {
.advance = files_reflog_iterator_advance,
+ .seek = files_reflog_iterator_seek,
.peel = files_reflog_iterator_peel,
- .abort = files_reflog_iterator_abort,
+ .release = files_reflog_iterator_release,
};
static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
@@ -2193,7 +2339,7 @@ static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
strbuf_addf(&sb, "%s/logs", gitdir);
- diter = dir_iterator_begin(sb.buf, 0);
+ diter = dir_iterator_begin(sb.buf, DIR_ITERATOR_SORTED);
if (!diter) {
strbuf_release(&sb);
return empty_ref_iterator_begin();
@@ -2202,7 +2348,7 @@ static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
+ base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
iter->dir_iterator = diter;
iter->ref_store = ref_store;
strbuf_release(&sb);
@@ -2210,32 +2356,6 @@ static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
return ref_iterator;
}
-static enum iterator_selection reflog_iterator_select(
- struct ref_iterator *iter_worktree,
- struct ref_iterator *iter_common,
- void *cb_data UNUSED)
-{
- if (iter_worktree) {
- /*
- * We're a bit loose here. We probably should ignore
- * common refs if they are accidentally added as
- * per-worktree refs.
- */
- return ITER_SELECT_0;
- } else if (iter_common) {
- if (parse_worktree_ref(iter_common->refname, NULL, NULL,
- NULL) == REF_WORKTREE_SHARED)
- return ITER_SELECT_1;
-
- /*
- * The main ref store may contain main worktree's
- * per-worktree refs, which should be ignored
- */
- return ITER_SKIP_1;
- } else
- return ITER_DONE;
-}
-
static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
{
struct files_ref_store *refs =
@@ -2246,9 +2366,9 @@ static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_st
return reflog_iterator_begin(ref_store, refs->gitcommondir);
} else {
return merge_ref_iterator_begin(
- 0, reflog_iterator_begin(ref_store, refs->base.gitdir),
+ reflog_iterator_begin(ref_store, refs->base.gitdir),
reflog_iterator_begin(ref_store, refs->gitcommondir),
- reflog_iterator_select, refs);
+ ref_iterator_select, refs);
}
}
@@ -2266,6 +2386,7 @@ static int split_head_update(struct ref_update *update,
struct ref_update *new_update;
if ((update->flags & REF_LOG_ONLY) ||
+ (update->flags & REF_SKIP_CREATE_REFLOG) ||
(update->flags & REF_IS_PRUNING) ||
(update->flags & REF_UPDATE_VIA_HEAD))
return 0;
@@ -2291,7 +2412,7 @@ static int split_head_update(struct ref_update *update,
transaction, "HEAD",
update->flags | REF_LOG_ONLY | REF_NO_DEREF,
&update->new_oid, &update->old_oid,
- update->msg);
+ NULL, NULL, update->committer_info, update->msg);
/*
* Add "HEAD". This insertion is O(N) in the transaction
@@ -2353,7 +2474,9 @@ static int split_symref_update(struct ref_update *update,
new_update = ref_transaction_add_update(
transaction, referent, new_flags,
- &update->new_oid, &update->old_oid,
+ update->new_target ? NULL : &update->new_oid,
+ update->old_target ? NULL : &update->old_oid,
+ update->new_target, update->old_target, NULL,
update->msg);
new_update->parent_update = update;
@@ -2383,17 +2506,6 @@ static int split_symref_update(struct ref_update *update,
}
/*
- * Return the refname under which update was originally requested.
- */
-static const char *original_update_refname(struct ref_update *update)
-{
- while (update->parent_update)
- update = update->parent_update;
-
- return update->refname;
-}
-
-/*
* Check whether the REF_HAVE_OLD and old_oid values stored in update
* are consistent with oid, which is the reference's current value. If
* everything is OK, return 0; otherwise, write an error message to
@@ -2402,29 +2514,39 @@ static const char *original_update_refname(struct ref_update *update)
static int check_old_oid(struct ref_update *update, struct object_id *oid,
struct strbuf *err)
{
+ int ret = TRANSACTION_GENERIC_ERROR;
+
if (!(update->flags & REF_HAVE_OLD) ||
oideq(oid, &update->old_oid))
return 0;
- if (is_null_oid(&update->old_oid))
+ if (is_null_oid(&update->old_oid)) {
strbuf_addf(err, "cannot lock ref '%s': "
"reference already exists",
- original_update_refname(update));
+ ref_update_original_update_refname(update));
+ ret = TRANSACTION_CREATE_EXISTS;
+ }
else if (is_null_oid(oid))
strbuf_addf(err, "cannot lock ref '%s': "
"reference is missing but expected %s",
- original_update_refname(update),
+ ref_update_original_update_refname(update),
oid_to_hex(&update->old_oid));
else
strbuf_addf(err, "cannot lock ref '%s': "
"is at %s but expected %s",
- original_update_refname(update),
+ ref_update_original_update_refname(update),
oid_to_hex(oid),
oid_to_hex(&update->old_oid));
- return -1;
+ return ret;
}
+struct files_transaction_backend_data {
+ struct ref_transaction *packed_transaction;
+ int packed_refs_locked;
+ struct strmap ref_locks;
+};
+
/*
* Prepare for carrying out update:
* - Lock the reference referred to by update.
@@ -2442,18 +2564,21 @@ static int lock_ref_for_update(struct files_ref_store *refs,
struct ref_update *update,
struct ref_transaction *transaction,
const char *head_ref,
+ struct string_list *refnames_to_check,
struct string_list *affected_refnames,
struct strbuf *err)
{
struct strbuf referent = STRBUF_INIT;
- int mustexist = (update->flags & REF_HAVE_OLD) &&
- !is_null_oid(&update->old_oid);
+ int mustexist = ref_update_expects_existing_old_ref(update);
+ struct files_transaction_backend_data *backend_data;
int ret = 0;
struct ref_lock *lock;
files_assert_main_repository(refs, "lock_ref_for_update");
- if ((update->flags & REF_HAVE_NEW) && is_null_oid(&update->new_oid))
+ backend_data = transaction->backend_data;
+
+ if ((update->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(update))
update->flags |= REF_DELETING;
if (head_ref) {
@@ -2463,18 +2588,25 @@ static int lock_ref_for_update(struct files_ref_store *refs,
goto out;
}
- ret = lock_raw_ref(refs, update->refname, mustexist,
- affected_refnames,
- &lock, &referent,
- &update->type, err);
- if (ret) {
- char *reason;
+ lock = strmap_get(&backend_data->ref_locks, update->refname);
+ if (lock) {
+ lock->count++;
+ } else {
+ ret = lock_raw_ref(refs, update->refname, mustexist,
+ refnames_to_check, affected_refnames,
+ &lock, &referent,
+ &update->type, err);
+ if (ret) {
+ char *reason;
+
+ reason = strbuf_detach(err, NULL);
+ strbuf_addf(err, "cannot lock ref '%s': %s",
+ ref_update_original_update_refname(update), reason);
+ free(reason);
+ goto out;
+ }
- reason = strbuf_detach(err, NULL);
- strbuf_addf(err, "cannot lock ref '%s': %s",
- original_update_refname(update), reason);
- free(reason);
- goto out;
+ strmap_put(&backend_data->ref_locks, update->refname, lock);
}
update->backend_data = lock;
@@ -2492,13 +2624,22 @@ static int lock_ref_for_update(struct files_ref_store *refs,
if (update->flags & REF_HAVE_OLD) {
strbuf_addf(err, "cannot lock ref '%s': "
"error reading reference",
- original_update_refname(update));
+ ref_update_original_update_refname(update));
ret = TRANSACTION_GENERIC_ERROR;
goto out;
}
- } else if (check_old_oid(update, &lock->old_oid, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- goto out;
+ }
+
+ if (update->old_target) {
+ if (ref_update_check_old_target(referent.buf, update, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
+ }
+ } else {
+ ret = check_old_oid(update, &lock->old_oid, err);
+ if (ret) {
+ goto out;
+ }
}
} else {
/*
@@ -2517,9 +2658,23 @@ static int lock_ref_for_update(struct files_ref_store *refs,
} else {
struct ref_update *parent_update;
- if (check_old_oid(update, &lock->old_oid, err)) {
+ /*
+ * Even if the ref is a regular ref, if `old_target` is set, we
+ * fail with an error.
+ */
+ if (update->old_target) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "expected symref with target '%s': "
+ "but is a regular ref"),
+ ref_update_original_update_refname(update),
+ update->old_target);
ret = TRANSACTION_GENERIC_ERROR;
goto out;
+ } else {
+ ret = check_old_oid(update, &lock->old_oid, err);
+ if (ret) {
+ goto out;
+ }
}
/*
@@ -2535,9 +2690,27 @@ static int lock_ref_for_update(struct files_ref_store *refs,
}
}
- if ((update->flags & REF_HAVE_NEW) &&
- !(update->flags & REF_DELETING) &&
- !(update->flags & REF_LOG_ONLY)) {
+ if (update->new_target && !(update->flags & REF_LOG_ONLY)) {
+ if (create_symref_lock(lock, update->new_target, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
+ }
+
+ if (close_ref_gently(lock)) {
+ strbuf_addf(err, "couldn't close '%s.lock'",
+ update->refname);
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
+ }
+
+ /*
+ * Once we have created the symref lock, the commit
+ * phase of the transaction only needs to commit the lock.
+ */
+ update->flags |= REF_NEEDS_COMMIT;
+ } else if ((update->flags & REF_HAVE_NEW) &&
+ !(update->flags & REF_DELETING) &&
+ !(update->flags & REF_LOG_ONLY)) {
if (!(update->type & REF_ISSYMREF) &&
oideq(&lock->old_oid, &update->new_oid)) {
/*
@@ -2545,7 +2718,7 @@ static int lock_ref_for_update(struct files_ref_store *refs,
* value, so we don't need to write it.
*/
} else if (write_ref_to_lockfile(
- lock, &update->new_oid,
+ refs, lock, &update->new_oid,
update->flags & REF_SKIP_OID_VERIFICATION,
err)) {
char *write_err = strbuf_detach(err, NULL);
@@ -2584,11 +2757,6 @@ out:
return ret;
}
-struct files_transaction_backend_data {
- struct ref_transaction *packed_transaction;
- int packed_refs_locked;
-};
-
/*
* Unlock any references in `transaction` that are still locked, and
* mark the transaction closed.
@@ -2621,6 +2789,8 @@ static void files_transaction_cleanup(struct files_ref_store *refs,
if (backend_data->packed_refs_locked)
packed_refs_unlock(refs->packed_ref_store);
+ strmap_clear(&backend_data->ref_locks, 0);
+
free(backend_data);
}
@@ -2637,6 +2807,7 @@ static int files_transaction_prepare(struct ref_store *ref_store,
size_t i;
int ret = 0;
struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
+ struct string_list refnames_to_check = STRING_LIST_INIT_NODUP;
char *head_ref = NULL;
int head_type;
struct files_transaction_backend_data *backend_data;
@@ -2644,10 +2815,13 @@ static int files_transaction_prepare(struct ref_store *ref_store,
assert(err);
+ if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL)
+ goto cleanup;
if (!transaction->nr)
goto cleanup;
CALLOC_ARRAY(backend_data, 1);
+ strmap_init(&backend_data->ref_locks);
transaction->backend_data = backend_data;
/*
@@ -2660,13 +2834,16 @@ static int files_transaction_prepare(struct ref_store *ref_store,
*/
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
- struct string_list_item *item =
- string_list_append(&affected_refnames, update->refname);
+ struct string_list_item *item;
if ((update->flags & REF_IS_PRUNING) &&
!(update->flags & REF_NO_DEREF))
BUG("REF_IS_PRUNING set without REF_NO_DEREF");
+ if (update->flags & REF_LOG_ONLY)
+ continue;
+
+ item = string_list_append(&affected_refnames, update->refname);
/*
* We store a pointer to update in item->util, but at
* the moment we never use the value of this field
@@ -2718,7 +2895,8 @@ static int files_transaction_prepare(struct ref_store *ref_store,
struct ref_update *update = transaction->updates[i];
ret = lock_ref_for_update(refs, update, transaction,
- head_ref, &affected_refnames, err);
+ head_ref, &refnames_to_check,
+ &affected_refnames, err);
if (ret)
goto cleanup;
@@ -2731,7 +2909,8 @@ static int files_transaction_prepare(struct ref_store *ref_store,
*/
if (!packed_transaction) {
packed_transaction = ref_store_transaction_begin(
- refs->packed_ref_store, err);
+ refs->packed_ref_store,
+ transaction->flags, err);
if (!packed_transaction) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
@@ -2745,10 +2924,30 @@ static int files_transaction_prepare(struct ref_store *ref_store,
packed_transaction, update->refname,
REF_HAVE_NEW | REF_NO_DEREF,
&update->new_oid, NULL,
- NULL);
+ NULL, NULL, NULL, NULL);
}
}
+ /*
+ * Verify that none of the loose reference that we're about to write
+ * conflict with any existing packed references. Ideally, we'd do this
+ * check after the packed-refs are locked so that the file cannot
+ * change underneath our feet. But introducing such a lock now would
+ * probably do more harm than good as users rely on there not being a
+ * global lock with the "files" backend.
+ *
+ * Another alternative would be to do the check after the (optional)
+ * lock, but that would extend the time we spend in the globally-locked
+ * state.
+ *
+ * So instead, we accept the race for now.
+ */
+ if (refs_verify_refnames_available(refs->packed_ref_store, &refnames_to_check,
+ &affected_refnames, NULL, 0, err)) {
+ ret = TRANSACTION_NAME_CONFLICT;
+ goto cleanup;
+ }
+
if (packed_transaction) {
if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
ret = TRANSACTION_GENERIC_ERROR;
@@ -2791,6 +2990,7 @@ static int files_transaction_prepare(struct ref_store *ref_store,
cleanup:
free(head_ref);
string_list_clear(&affected_refnames, 0);
+ string_list_clear(&refnames_to_check, 0);
if (ret)
files_transaction_cleanup(refs, transaction);
@@ -2800,6 +3000,184 @@ cleanup:
return ret;
}
+static int parse_and_write_reflog(struct files_ref_store *refs,
+ struct ref_update *update,
+ struct ref_lock *lock,
+ struct strbuf *err)
+{
+ if (update->new_target) {
+ /*
+ * We want to get the resolved OID for the target, to ensure
+ * that the correct value is added to the reflog.
+ */
+ if (!refs_resolve_ref_unsafe(&refs->base, update->new_target,
+ RESOLVE_REF_READING,
+ &update->new_oid, NULL)) {
+ /*
+ * TODO: currently we skip creating reflogs for dangling
+ * symref updates. It would be nice to capture this as
+ * zero oid updates however.
+ */
+ return 0;
+ }
+ }
+
+ if (files_log_ref_write(refs, lock->ref_name, &lock->old_oid,
+ &update->new_oid, update->committer_info,
+ update->msg, update->flags, err)) {
+ char *old_msg = strbuf_detach(err, NULL);
+
+ strbuf_addf(err, "cannot update the ref '%s': %s",
+ lock->ref_name, old_msg);
+ free(old_msg);
+ unlock_ref(lock);
+ update->backend_data = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ref_present(const char *refname, const char *referent UNUSED,
+ const struct object_id *oid UNUSED,
+ int flags UNUSED,
+ void *cb_data)
+{
+ struct string_list *affected_refnames = cb_data;
+
+ return string_list_has_string(affected_refnames, refname);
+}
+
+static int files_transaction_finish_initial(struct files_ref_store *refs,
+ struct ref_transaction *transaction,
+ struct strbuf *err)
+{
+ size_t i;
+ int ret = 0;
+ struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
+ struct string_list refnames_to_check = STRING_LIST_INIT_NODUP;
+ struct ref_transaction *packed_transaction = NULL;
+ struct ref_transaction *loose_transaction = NULL;
+
+ assert(err);
+
+ if (transaction->state != REF_TRANSACTION_PREPARED)
+ BUG("commit called for transaction that is not prepared");
+
+ /* Fail if a refname appears more than once in the transaction: */
+ for (i = 0; i < transaction->nr; i++)
+ if (!(transaction->updates[i]->flags & REF_LOG_ONLY))
+ string_list_append(&affected_refnames,
+ transaction->updates[i]->refname);
+ string_list_sort(&affected_refnames);
+ if (ref_update_reject_duplicates(&affected_refnames, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+
+ /*
+ * It's really undefined to call this function in an active
+ * repository or when there are existing references: we are
+ * only locking and changing packed-refs, so (1) any
+ * simultaneous processes might try to change a reference at
+ * the same time we do, and (2) any existing loose versions of
+ * the references that we are setting would have precedence
+ * over our values. But some remote helpers create the remote
+ * "HEAD" and "master" branches before calling this function,
+ * so here we really only check that none of the references
+ * that we are creating already exists.
+ */
+ if (refs_for_each_rawref(&refs->base, ref_present,
+ &affected_refnames))
+ BUG("initial ref transaction called with existing refs");
+
+ packed_transaction = ref_store_transaction_begin(refs->packed_ref_store,
+ transaction->flags, err);
+ if (!packed_transaction) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+
+ if ((update->flags & REF_HAVE_OLD) &&
+ !is_null_oid(&update->old_oid))
+ BUG("initial ref transaction with old_sha1 set");
+
+ string_list_append(&refnames_to_check, update->refname);
+
+ /*
+ * packed-refs don't support symbolic refs, root refs and reflogs,
+ * so we have to queue these references via the loose transaction.
+ */
+ if (update->new_target ||
+ is_root_ref(update->refname) ||
+ (update->flags & REF_LOG_ONLY)) {
+ if (!loose_transaction) {
+ loose_transaction = ref_store_transaction_begin(&refs->base, 0, err);
+ if (!loose_transaction) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+ }
+
+ if (update->flags & REF_LOG_ONLY)
+ ref_transaction_add_update(loose_transaction, update->refname,
+ update->flags, &update->new_oid,
+ &update->old_oid, NULL, NULL,
+ update->committer_info, update->msg);
+ else
+ ref_transaction_add_update(loose_transaction, update->refname,
+ update->flags & ~REF_HAVE_OLD,
+ update->new_target ? NULL : &update->new_oid, NULL,
+ update->new_target, NULL, update->committer_info,
+ NULL);
+ } else {
+ ref_transaction_add_update(packed_transaction, update->refname,
+ update->flags & ~REF_HAVE_OLD,
+ &update->new_oid, &update->old_oid,
+ NULL, NULL, update->committer_info, NULL);
+ }
+ }
+
+ if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+
+ if (refs_verify_refnames_available(&refs->base, &refnames_to_check,
+ &affected_refnames, NULL, 1, err)) {
+ packed_refs_unlock(refs->packed_ref_store);
+ ret = TRANSACTION_NAME_CONFLICT;
+ goto cleanup;
+ }
+
+ if (ref_transaction_commit(packed_transaction, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+ packed_refs_unlock(refs->packed_ref_store);
+
+ if (loose_transaction) {
+ if (ref_transaction_prepare(loose_transaction, err) ||
+ ref_transaction_commit(loose_transaction, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ if (loose_transaction)
+ ref_transaction_free(loose_transaction);
+ if (packed_transaction)
+ ref_transaction_free(packed_transaction);
+ transaction->state = REF_TRANSACTION_CLOSED;
+ string_list_clear(&affected_refnames, 0);
+ string_list_clear(&refnames_to_check, 0);
+ return ret;
+}
+
static int files_transaction_finish(struct ref_store *ref_store,
struct ref_transaction *transaction,
struct strbuf *err)
@@ -2815,6 +3193,8 @@ static int files_transaction_finish(struct ref_store *ref_store,
assert(err);
+ if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL)
+ return files_transaction_finish_initial(refs, transaction, err);
if (!transaction->nr) {
transaction->state = REF_TRANSACTION_CLOSED;
return 0;
@@ -2830,23 +3210,20 @@ static int files_transaction_finish(struct ref_store *ref_store,
if (update->flags & REF_NEEDS_COMMIT ||
update->flags & REF_LOG_ONLY) {
- if (files_log_ref_write(refs,
- lock->ref_name,
- &lock->old_oid,
- &update->new_oid,
- update->msg, update->flags,
- err)) {
- char *old_msg = strbuf_detach(err, NULL);
-
- strbuf_addf(err, "cannot update the ref '%s': %s",
- lock->ref_name, old_msg);
- free(old_msg);
- unlock_ref(lock);
- update->backend_data = NULL;
+ if (parse_and_write_reflog(refs, update, lock, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
}
}
+
+ /*
+ * We try creating a symlink, if that succeeds we continue to the
+ * next update. If not, we try and create a regular symref.
+ */
+ if (update->new_target && refs->prefer_symlink_refs)
+ if (!create_ref_symlink(lock, update->new_target))
+ continue;
+
if (update->flags & REF_NEEDS_COMMIT) {
clear_loose_ref_cache(refs);
if (commit_ref(lock)) {
@@ -2951,106 +3328,6 @@ static int files_transaction_abort(struct ref_store *ref_store,
return 0;
}
-static int ref_present(const char *refname,
- const struct object_id *oid UNUSED,
- int flags UNUSED,
- void *cb_data)
-{
- struct string_list *affected_refnames = cb_data;
-
- return string_list_has_string(affected_refnames, refname);
-}
-
-static int files_initial_transaction_commit(struct ref_store *ref_store,
- struct ref_transaction *transaction,
- struct strbuf *err)
-{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_WRITE,
- "initial_ref_transaction_commit");
- size_t i;
- int ret = 0;
- struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
- struct ref_transaction *packed_transaction = NULL;
-
- assert(err);
-
- if (transaction->state != REF_TRANSACTION_OPEN)
- BUG("commit called for transaction that is not open");
-
- /* Fail if a refname appears more than once in the transaction: */
- for (i = 0; i < transaction->nr; i++)
- string_list_append(&affected_refnames,
- transaction->updates[i]->refname);
- string_list_sort(&affected_refnames);
- if (ref_update_reject_duplicates(&affected_refnames, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- goto cleanup;
- }
-
- /*
- * It's really undefined to call this function in an active
- * repository or when there are existing references: we are
- * only locking and changing packed-refs, so (1) any
- * simultaneous processes might try to change a reference at
- * the same time we do, and (2) any existing loose versions of
- * the references that we are setting would have precedence
- * over our values. But some remote helpers create the remote
- * "HEAD" and "master" branches before calling this function,
- * so here we really only check that none of the references
- * that we are creating already exists.
- */
- if (refs_for_each_rawref(&refs->base, ref_present,
- &affected_refnames))
- BUG("initial ref transaction called with existing refs");
-
- packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err);
- if (!packed_transaction) {
- ret = TRANSACTION_GENERIC_ERROR;
- goto cleanup;
- }
-
- for (i = 0; i < transaction->nr; i++) {
- struct ref_update *update = transaction->updates[i];
-
- if ((update->flags & REF_HAVE_OLD) &&
- !is_null_oid(&update->old_oid))
- BUG("initial ref transaction with old_sha1 set");
- if (refs_verify_refname_available(&refs->base, update->refname,
- &affected_refnames, NULL,
- err)) {
- ret = TRANSACTION_NAME_CONFLICT;
- goto cleanup;
- }
-
- /*
- * Add a reference creation for this reference to the
- * packed-refs transaction:
- */
- ref_transaction_add_update(packed_transaction, update->refname,
- update->flags & ~REF_HAVE_OLD,
- &update->new_oid, &update->old_oid,
- NULL);
- }
-
- if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- goto cleanup;
- }
-
- if (initial_ref_transaction_commit(packed_transaction, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- }
-
- packed_refs_unlock(refs->packed_ref_store);
-cleanup:
- if (packed_transaction)
- ref_transaction_free(packed_transaction);
- transaction->state = REF_TRANSACTION_CLOSED;
- string_list_clear(&affected_refnames, 0);
- return ret;
-}
-
struct expire_reflog_cb {
reflog_expiry_should_prune_fn *should_prune_fn;
void *policy_cb;
@@ -3194,7 +3471,7 @@ static int files_reflog_expire(struct ref_store *ref_store,
rollback_lock_file(&reflog_lock);
} else if (update &&
(write_in_full(get_lock_file_fd(&lock->lk),
- oid_to_hex(&cb.last_kept_oid), the_hash_algo->hexsz) < 0 ||
+ oid_to_hex(&cb.last_kept_oid), refs->base.repo->hash_algo->hexsz) < 0 ||
write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
close_ref_gently(lock) < 0)) {
status |= error("couldn't write %s",
@@ -3218,12 +3495,12 @@ static int files_reflog_expire(struct ref_store *ref_store,
return -1;
}
-static int files_init_db(struct ref_store *ref_store,
- int flags,
- struct strbuf *err UNUSED)
+static int files_ref_store_create_on_disk(struct ref_store *ref_store,
+ int flags,
+ struct strbuf *err UNUSED)
{
struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_WRITE, "init_db");
+ files_downcast(ref_store, REF_STORE_WRITE, "create");
struct strbuf sb = STRBUF_INIT;
/*
@@ -3239,41 +3516,370 @@ static int files_init_db(struct ref_store *ref_store,
* they do not understand the reference format extension.
*/
strbuf_addf(&sb, "%s/refs", ref_store->gitdir);
- safe_create_dir(sb.buf, 1);
- adjust_shared_perm(sb.buf);
+ safe_create_dir(the_repository, sb.buf, 1);
+ adjust_shared_perm(the_repository, sb.buf);
/*
* There is no need to create directories for common refs when creating
* a worktree ref store.
*/
- if (!(flags & REFS_INIT_DB_IS_WORKTREE)) {
+ if (!(flags & REF_STORE_CREATE_ON_DISK_IS_WORKTREE)) {
/*
* Create .git/refs/{heads,tags}
*/
strbuf_reset(&sb);
files_ref_path(refs, &sb, "refs/heads");
- safe_create_dir(sb.buf, 1);
+ safe_create_dir(the_repository, sb.buf, 1);
strbuf_reset(&sb);
files_ref_path(refs, &sb, "refs/tags");
- safe_create_dir(sb.buf, 1);
+ safe_create_dir(the_repository, sb.buf, 1);
}
strbuf_release(&sb);
return 0;
}
+struct remove_one_root_ref_data {
+ const char *gitdir;
+ struct strbuf *err;
+};
+
+static int remove_one_root_ref(const char *refname,
+ void *cb_data)
+{
+ struct remove_one_root_ref_data *data = cb_data;
+ struct strbuf buf = STRBUF_INIT;
+ int ret = 0;
+
+ strbuf_addf(&buf, "%s/%s", data->gitdir, refname);
+
+ ret = unlink(buf.buf);
+ if (ret < 0)
+ strbuf_addf(data->err, "could not delete %s: %s\n",
+ refname, strerror(errno));
+
+ strbuf_release(&buf);
+ return ret;
+}
+
+static int files_ref_store_remove_on_disk(struct ref_store *ref_store,
+ struct strbuf *err)
+{
+ struct files_ref_store *refs =
+ files_downcast(ref_store, REF_STORE_WRITE, "remove");
+ struct remove_one_root_ref_data data = {
+ .gitdir = refs->base.gitdir,
+ .err = err,
+ };
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+
+ strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
+ if (remove_dir_recursively(&sb, 0) < 0) {
+ strbuf_addf(err, "could not delete refs: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/logs", refs->base.gitdir);
+ if (remove_dir_recursively(&sb, 0) < 0) {
+ strbuf_addf(err, "could not delete logs: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ if (for_each_root_ref(refs, remove_one_root_ref, &data) < 0)
+ ret = -1;
+
+ if (ref_store_remove_on_disk(refs->packed_ref_store, err) < 0)
+ ret = -1;
+
+ strbuf_release(&sb);
+ return ret;
+}
+
+/*
+ * For refs and reflogs, they share a unified interface when scanning
+ * the whole directory. This function is used as the callback for each
+ * regular file or symlink in the directory.
+ */
+typedef int (*files_fsck_refs_fn)(struct ref_store *ref_store,
+ struct fsck_options *o,
+ const char *refname,
+ struct dir_iterator *iter);
+
+static int files_fsck_symref_target(struct fsck_options *o,
+ struct fsck_ref_report *report,
+ struct strbuf *referent,
+ unsigned int symbolic_link)
+{
+ int is_referent_root;
+ char orig_last_byte;
+ size_t orig_len;
+ int ret = 0;
+
+ orig_len = referent->len;
+ orig_last_byte = referent->buf[orig_len - 1];
+ if (!symbolic_link)
+ strbuf_rtrim(referent);
+
+ is_referent_root = is_root_ref(referent->buf);
+ if (!is_referent_root &&
+ !starts_with(referent->buf, "refs/") &&
+ !starts_with(referent->buf, "worktrees/")) {
+ ret = fsck_report_ref(o, report,
+ FSCK_MSG_SYMREF_TARGET_IS_NOT_A_REF,
+ "points to non-ref target '%s'", referent->buf);
+
+ }
+
+ if (!is_referent_root && check_refname_format(referent->buf, 0)) {
+ ret = fsck_report_ref(o, report,
+ FSCK_MSG_BAD_REFERENT_NAME,
+ "points to invalid refname '%s'", referent->buf);
+ goto out;
+ }
+
+ if (symbolic_link)
+ goto out;
+
+ if (referent->len == orig_len ||
+ (referent->len < orig_len && orig_last_byte != '\n')) {
+ ret = fsck_report_ref(o, report,
+ FSCK_MSG_REF_MISSING_NEWLINE,
+ "misses LF at the end");
+ }
+
+ if (referent->len != orig_len && referent->len != orig_len - 1) {
+ ret = fsck_report_ref(o, report,
+ FSCK_MSG_TRAILING_REF_CONTENT,
+ "has trailing whitespaces or newlines");
+ }
+
+out:
+ return ret;
+}
+
+static int files_fsck_refs_content(struct ref_store *ref_store,
+ struct fsck_options *o,
+ const char *target_name,
+ struct dir_iterator *iter)
+{
+ struct strbuf ref_content = STRBUF_INIT;
+ struct strbuf abs_gitdir = STRBUF_INIT;
+ struct strbuf referent = STRBUF_INIT;
+ struct fsck_ref_report report = { 0 };
+ const char *trailing = NULL;
+ unsigned int type = 0;
+ int failure_errno = 0;
+ struct object_id oid;
+ int ret = 0;
+
+ report.path = target_name;
+
+ if (S_ISLNK(iter->st.st_mode)) {
+ const char *relative_referent_path = NULL;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_SYMLINK_REF,
+ "use deprecated symbolic link for symref");
+
+ strbuf_add_absolute_path(&abs_gitdir, ref_store->repo->gitdir);
+ strbuf_normalize_path(&abs_gitdir);
+ if (!is_dir_sep(abs_gitdir.buf[abs_gitdir.len - 1]))
+ strbuf_addch(&abs_gitdir, '/');
+
+ strbuf_add_real_path(&ref_content, iter->path.buf);
+ skip_prefix(ref_content.buf, abs_gitdir.buf,
+ &relative_referent_path);
+
+ if (relative_referent_path)
+ strbuf_addstr(&referent, relative_referent_path);
+ else
+ strbuf_addbuf(&referent, &ref_content);
+
+ ret |= files_fsck_symref_target(o, &report, &referent, 1);
+ goto cleanup;
+ }
+
+ if (strbuf_read_file(&ref_content, iter->path.buf, 0) < 0) {
+ /*
+ * Ref file could be removed by another concurrent process. We should
+ * ignore this error and continue to the next ref.
+ */
+ if (errno == ENOENT)
+ goto cleanup;
+
+ ret = error_errno(_("cannot read ref file '%s'"), iter->path.buf);
+ goto cleanup;
+ }
+
+ if (parse_loose_ref_contents(ref_store->repo->hash_algo,
+ ref_content.buf, &oid, &referent,
+ &type, &trailing, &failure_errno)) {
+ strbuf_rtrim(&ref_content);
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_CONTENT,
+ "%s", ref_content.buf);
+ goto cleanup;
+ }
+
+ if (!(type & REF_ISSYMREF)) {
+ if (!*trailing) {
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_REF_MISSING_NEWLINE,
+ "misses LF at the end");
+ goto cleanup;
+ }
+ if (*trailing != '\n' || *(trailing + 1)) {
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_TRAILING_REF_CONTENT,
+ "has trailing garbage: '%s'", trailing);
+ goto cleanup;
+ }
+ } else {
+ ret = files_fsck_symref_target(o, &report, &referent, 0);
+ goto cleanup;
+ }
+
+cleanup:
+ strbuf_release(&ref_content);
+ strbuf_release(&referent);
+ strbuf_release(&abs_gitdir);
+ return ret;
+}
+
+static int files_fsck_refs_name(struct ref_store *ref_store UNUSED,
+ struct fsck_options *o,
+ const char *refname,
+ struct dir_iterator *iter)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+
+ /*
+ * Ignore the files ending with ".lock" as they may be lock files
+ * However, do not allow bare ".lock" files.
+ */
+ if (iter->basename[0] != '.' && ends_with(iter->basename, ".lock"))
+ goto cleanup;
+
+ /*
+ * This works right now because we never check the root refs.
+ */
+ if (check_refname_format(refname, 0)) {
+ struct fsck_ref_report report = { 0 };
+
+ report.path = refname;
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_NAME,
+ "invalid refname format");
+ }
+
+cleanup:
+ strbuf_release(&sb);
+ return ret;
+}
+
+static int files_fsck_refs_dir(struct ref_store *ref_store,
+ struct fsck_options *o,
+ const char *refs_check_dir,
+ struct worktree *wt,
+ files_fsck_refs_fn *fsck_refs_fn)
+{
+ struct strbuf refname = STRBUF_INIT;
+ struct strbuf sb = STRBUF_INIT;
+ struct dir_iterator *iter;
+ int iter_status;
+ int ret = 0;
+
+ strbuf_addf(&sb, "%s/%s", ref_store->gitdir, refs_check_dir);
+
+ iter = dir_iterator_begin(sb.buf, 0);
+ if (!iter) {
+ ret = error_errno(_("cannot open directory %s"), sb.buf);
+ goto out;
+ }
+
+ while ((iter_status = dir_iterator_advance(iter)) == ITER_OK) {
+ if (S_ISDIR(iter->st.st_mode)) {
+ continue;
+ } else if (S_ISREG(iter->st.st_mode) ||
+ S_ISLNK(iter->st.st_mode)) {
+ strbuf_reset(&refname);
+
+ if (!is_main_worktree(wt))
+ strbuf_addf(&refname, "worktrees/%s/", wt->id);
+ strbuf_addf(&refname, "%s/%s", refs_check_dir,
+ iter->relative_path);
+
+ if (o->verbose)
+ fprintf_ln(stderr, "Checking %s", refname.buf);
+
+ for (size_t i = 0; fsck_refs_fn[i]; i++) {
+ if (fsck_refs_fn[i](ref_store, o, refname.buf, iter))
+ ret = -1;
+ }
+ } else {
+ struct fsck_ref_report report = { .path = iter->basename };
+ if (fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_FILETYPE,
+ "unexpected file type"))
+ ret = -1;
+ }
+ }
+
+ if (iter_status != ITER_DONE)
+ ret = error(_("failed to iterate over '%s'"), sb.buf);
+
+out:
+ dir_iterator_free(iter);
+ strbuf_release(&sb);
+ strbuf_release(&refname);
+ return ret;
+}
+
+static int files_fsck_refs(struct ref_store *ref_store,
+ struct fsck_options *o,
+ struct worktree *wt)
+{
+ files_fsck_refs_fn fsck_refs_fn[]= {
+ files_fsck_refs_name,
+ files_fsck_refs_content,
+ NULL,
+ };
+
+ if (o->verbose)
+ fprintf_ln(stderr, _("Checking references consistency"));
+ return files_fsck_refs_dir(ref_store, o, "refs", wt, fsck_refs_fn);
+}
+
+static int files_fsck(struct ref_store *ref_store,
+ struct fsck_options *o,
+ struct worktree *wt)
+{
+ struct files_ref_store *refs =
+ files_downcast(ref_store, REF_STORE_READ, "fsck");
+
+ return files_fsck_refs(ref_store, o, wt) |
+ refs->packed_ref_store->be->fsck(refs->packed_ref_store, o, wt);
+}
+
struct ref_storage_be refs_be_files = {
.name = "files",
- .init = files_ref_store_create,
- .init_db = files_init_db,
+ .init = files_ref_store_init,
+ .release = files_ref_store_release,
+ .create_on_disk = files_ref_store_create_on_disk,
+ .remove_on_disk = files_ref_store_remove_on_disk,
+
.transaction_prepare = files_transaction_prepare,
.transaction_finish = files_transaction_finish,
.transaction_abort = files_transaction_abort,
- .initial_transaction_commit = files_initial_transaction_commit,
.pack_refs = files_pack_refs,
- .create_symref = files_create_symref,
.rename_ref = files_rename_ref,
.copy_ref = files_copy_ref,
@@ -3287,5 +3893,7 @@ struct ref_storage_be refs_be_files = {
.reflog_exists = files_reflog_exists,
.create_reflog = files_create_reflog,
.delete_reflog = files_delete_reflog,
- .reflog_expire = files_reflog_expire
+ .reflog_expire = files_reflog_expire,
+
+ .fsck = files_fsck,
};
diff --git a/refs/iterator.c b/refs/iterator.c
index 6b680f610e..766d96e795 100644
--- a/refs/iterator.c
+++ b/refs/iterator.c
@@ -3,6 +3,8 @@
* documentation about the design and use of reference iterators.
*/
+#define DISABLE_SIGN_COMPARE_WARNINGS
+
#include "git-compat-util.h"
#include "refs.h"
#include "refs/refs-internal.h"
@@ -13,42 +15,51 @@ int ref_iterator_advance(struct ref_iterator *ref_iterator)
return ref_iterator->vtable->advance(ref_iterator);
}
+int ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
+{
+ return ref_iterator->vtable->seek(ref_iterator, prefix);
+}
+
int ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
return ref_iterator->vtable->peel(ref_iterator, peeled);
}
-int ref_iterator_abort(struct ref_iterator *ref_iterator)
+void ref_iterator_free(struct ref_iterator *ref_iterator)
{
- return ref_iterator->vtable->abort(ref_iterator);
+ if (ref_iterator) {
+ ref_iterator->vtable->release(ref_iterator);
+ /* Help make use-after-free bugs fail quickly: */
+ ref_iterator->vtable = NULL;
+ free(ref_iterator);
+ }
}
void base_ref_iterator_init(struct ref_iterator *iter,
- struct ref_iterator_vtable *vtable,
- int ordered)
+ struct ref_iterator_vtable *vtable)
{
iter->vtable = vtable;
- iter->ordered = !!ordered;
iter->refname = NULL;
+ iter->referent = NULL;
iter->oid = NULL;
iter->flags = 0;
}
-void base_ref_iterator_free(struct ref_iterator *iter)
-{
- /* Help make use-after-free bugs fail quickly: */
- iter->vtable = NULL;
- free(iter);
-}
-
struct empty_ref_iterator {
struct ref_iterator base;
};
-static int empty_ref_iterator_advance(struct ref_iterator *ref_iterator)
+static int empty_ref_iterator_advance(struct ref_iterator *ref_iterator UNUSED)
{
- return ref_iterator_abort(ref_iterator);
+ return ITER_DONE;
+}
+
+static int empty_ref_iterator_seek(struct ref_iterator *ref_iterator UNUSED,
+ const char *prefix UNUSED)
+{
+ return 0;
}
static int empty_ref_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
@@ -57,16 +68,15 @@ static int empty_ref_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
BUG("peel called for empty iterator");
}
-static int empty_ref_iterator_abort(struct ref_iterator *ref_iterator)
+static void empty_ref_iterator_release(struct ref_iterator *ref_iterator UNUSED)
{
- base_ref_iterator_free(ref_iterator);
- return ITER_DONE;
}
static struct ref_iterator_vtable empty_ref_iterator_vtable = {
.advance = empty_ref_iterator_advance,
+ .seek = empty_ref_iterator_seek,
.peel = empty_ref_iterator_peel,
- .abort = empty_ref_iterator_abort,
+ .release = empty_ref_iterator_release,
};
struct ref_iterator *empty_ref_iterator_begin(void)
@@ -74,7 +84,7 @@ struct ref_iterator *empty_ref_iterator_begin(void)
struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter));
struct ref_iterator *ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable, 1);
+ base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable);
return ref_iterator;
}
@@ -86,7 +96,8 @@ int is_empty_ref_iterator(struct ref_iterator *ref_iterator)
struct merge_ref_iterator {
struct ref_iterator base;
- struct ref_iterator *iter0, *iter1;
+ struct ref_iterator *iter0, *iter0_owned;
+ struct ref_iterator *iter1, *iter1_owned;
ref_iterator_select_fn *select;
void *cb_data;
@@ -98,6 +109,49 @@ struct merge_ref_iterator {
struct ref_iterator **current;
};
+enum iterator_selection ref_iterator_select(struct ref_iterator *iter_worktree,
+ struct ref_iterator *iter_common,
+ void *cb_data UNUSED)
+{
+ if (iter_worktree && !iter_common) {
+ /*
+ * Return the worktree ref if there are no more common refs.
+ */
+ return ITER_SELECT_0;
+ } else if (iter_common) {
+ /*
+ * In case we have pending worktree and common refs we need to
+ * yield them based on their lexicographical order. Worktree
+ * refs that have the same name as common refs shadow the
+ * latter.
+ */
+ if (iter_worktree) {
+ int cmp = strcmp(iter_worktree->refname,
+ iter_common->refname);
+ if (cmp < 0)
+ return ITER_SELECT_0;
+ else if (!cmp)
+ return ITER_SELECT_0_SKIP_1;
+ }
+
+ /*
+ * We now know that the lexicographically-next ref is a common
+ * ref. When the common ref is a shared one we return it.
+ */
+ if (parse_worktree_ref(iter_common->refname, NULL, NULL,
+ NULL) == REF_WORKTREE_SHARED)
+ return ITER_SELECT_1;
+
+ /*
+ * Otherwise, if the common ref is a per-worktree ref we skip
+ * it because it would belong to the main worktree, not ours.
+ */
+ return ITER_SKIP_1;
+ } else {
+ return ITER_DONE;
+ }
+}
+
static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
{
struct merge_ref_iterator *iter =
@@ -135,9 +189,8 @@ static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
iter->select(iter->iter0, iter->iter1, iter->cb_data);
if (selection == ITER_SELECT_DONE) {
- return ref_iterator_abort(ref_iterator);
+ return ITER_DONE;
} else if (selection == ITER_SELECT_ERROR) {
- ref_iterator_abort(ref_iterator);
return ITER_ERROR;
}
@@ -158,6 +211,7 @@ static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
}
if (selection & ITER_YIELD_CURRENT) {
+ iter->base.referent = (*iter->current)->referent;
iter->base.refname = (*iter->current)->refname;
iter->base.oid = (*iter->current)->oid;
iter->base.flags = (*iter->current)->flags;
@@ -166,10 +220,31 @@ static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator)
}
error:
- ref_iterator_abort(ref_iterator);
return ITER_ERROR;
}
+static int merge_ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
+{
+ struct merge_ref_iterator *iter =
+ (struct merge_ref_iterator *)ref_iterator;
+ int ret;
+
+ iter->current = NULL;
+ iter->iter0 = iter->iter0_owned;
+ iter->iter1 = iter->iter1_owned;
+
+ ret = ref_iterator_seek(iter->iter0, prefix);
+ if (ret < 0)
+ return ret;
+
+ ret = ref_iterator_seek(iter->iter1, prefix);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int merge_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
@@ -182,32 +257,22 @@ static int merge_ref_iterator_peel(struct ref_iterator *ref_iterator,
return ref_iterator_peel(*iter->current, peeled);
}
-static int merge_ref_iterator_abort(struct ref_iterator *ref_iterator)
+static void merge_ref_iterator_release(struct ref_iterator *ref_iterator)
{
struct merge_ref_iterator *iter =
(struct merge_ref_iterator *)ref_iterator;
- int ok = ITER_DONE;
-
- if (iter->iter0) {
- if (ref_iterator_abort(iter->iter0) != ITER_DONE)
- ok = ITER_ERROR;
- }
- if (iter->iter1) {
- if (ref_iterator_abort(iter->iter1) != ITER_DONE)
- ok = ITER_ERROR;
- }
- base_ref_iterator_free(ref_iterator);
- return ok;
+ ref_iterator_free(iter->iter0_owned);
+ ref_iterator_free(iter->iter1_owned);
}
static struct ref_iterator_vtable merge_ref_iterator_vtable = {
.advance = merge_ref_iterator_advance,
+ .seek = merge_ref_iterator_seek,
.peel = merge_ref_iterator_peel,
- .abort = merge_ref_iterator_abort,
+ .release = merge_ref_iterator_release,
};
struct ref_iterator *merge_ref_iterator_begin(
- int ordered,
struct ref_iterator *iter0, struct ref_iterator *iter1,
ref_iterator_select_fn *select, void *cb_data)
{
@@ -222,9 +287,9 @@ struct ref_iterator *merge_ref_iterator_begin(
* references through only if they exist in both iterators.
*/
- base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable, ordered);
- iter->iter0 = iter0;
- iter->iter1 = iter1;
+ base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable);
+ iter->iter0 = iter->iter0_owned = iter0;
+ iter->iter1 = iter->iter1_owned = iter1;
iter->select = select;
iter->cb_data = cb_data;
iter->current = NULL;
@@ -266,17 +331,14 @@ struct ref_iterator *overlay_ref_iterator_begin(
* them.
*/
if (is_empty_ref_iterator(front)) {
- ref_iterator_abort(front);
+ ref_iterator_free(front);
return back;
} else if (is_empty_ref_iterator(back)) {
- ref_iterator_abort(back);
+ ref_iterator_free(back);
return front;
- } else if (!front->ordered || !back->ordered) {
- BUG("overlay_ref_iterator requires ordered inputs");
}
- return merge_ref_iterator_begin(1, front, back,
- overlay_iterator_select, NULL);
+ return merge_ref_iterator_begin(front, back, overlay_iterator_select, NULL);
}
struct prefix_ref_iterator {
@@ -309,23 +371,15 @@ static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
int cmp = compare_prefix(iter->iter0->refname, iter->prefix);
-
if (cmp < 0)
continue;
-
- if (cmp > 0) {
- /*
- * If the source iterator is ordered, then we
- * can stop the iteration as soon as we see a
- * refname that comes after the prefix:
- */
- if (iter->iter0->ordered) {
- ok = ref_iterator_abort(iter->iter0);
- break;
- } else {
- continue;
- }
- }
+ /*
+ * As the source iterator is ordered, we
+ * can stop the iteration as soon as we see a
+ * refname that comes after the prefix:
+ */
+ if (cmp > 0)
+ return ITER_DONE;
if (iter->trim) {
/*
@@ -349,12 +403,19 @@ static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
return ITER_OK;
}
- iter->iter0 = NULL;
- if (ref_iterator_abort(ref_iterator) != ITER_DONE)
- return ITER_ERROR;
return ok;
}
+static int prefix_ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
+{
+ struct prefix_ref_iterator *iter =
+ (struct prefix_ref_iterator *)ref_iterator;
+ free(iter->prefix);
+ iter->prefix = xstrdup_or_null(prefix);
+ return ref_iterator_seek(iter->iter0, prefix);
+}
+
static int prefix_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
@@ -364,23 +425,19 @@ static int prefix_ref_iterator_peel(struct ref_iterator *ref_iterator,
return ref_iterator_peel(iter->iter0, peeled);
}
-static int prefix_ref_iterator_abort(struct ref_iterator *ref_iterator)
+static void prefix_ref_iterator_release(struct ref_iterator *ref_iterator)
{
struct prefix_ref_iterator *iter =
(struct prefix_ref_iterator *)ref_iterator;
- int ok = ITER_DONE;
-
- if (iter->iter0)
- ok = ref_iterator_abort(iter->iter0);
+ ref_iterator_free(iter->iter0);
free(iter->prefix);
- base_ref_iterator_free(ref_iterator);
- return ok;
}
static struct ref_iterator_vtable prefix_ref_iterator_vtable = {
.advance = prefix_ref_iterator_advance,
+ .seek = prefix_ref_iterator_seek,
.peel = prefix_ref_iterator_peel,
- .abort = prefix_ref_iterator_abort,
+ .release = prefix_ref_iterator_release,
};
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
@@ -396,7 +453,7 @@ struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable, iter0->ordered);
+ base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable);
iter->iter0 = iter0;
iter->prefix = xstrdup(prefix);
@@ -407,29 +464,23 @@ struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
struct ref_iterator *current_ref_iter = NULL;
-int do_for_each_repo_ref_iterator(struct repository *r, struct ref_iterator *iter,
- each_repo_ref_fn fn, void *cb_data)
+int do_for_each_ref_iterator(struct ref_iterator *iter,
+ each_ref_fn fn, void *cb_data)
{
int retval = 0, ok;
struct ref_iterator *old_ref_iter = current_ref_iter;
current_ref_iter = iter;
while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
- retval = fn(r, iter->refname, iter->oid, iter->flags, cb_data);
- if (retval) {
- /*
- * If ref_iterator_abort() returns ITER_ERROR,
- * we ignore that error in deference to the
- * callback function's return value.
- */
- ref_iterator_abort(iter);
+ retval = fn(iter->refname, iter->referent, iter->oid, iter->flags, cb_data);
+ if (retval)
goto out;
- }
}
out:
current_ref_iter = old_ref_iter;
if (ok == ITER_ERROR)
- return -1;
+ retval = -1;
+ ref_iterator_free(iter);
return retval;
}
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index a499a91c7e..b4289a7d9c 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -1,5 +1,10 @@
+#define USE_THE_REPOSITORY_VARIABLE
+#define DISABLE_SIGN_COMPARE_WARNINGS
+
#include "../git-compat-util.h"
#include "../config.h"
+#include "../dir.h"
+#include "../fsck.h"
#include "../gettext.h"
#include "../hash.h"
#include "../hex.h"
@@ -10,6 +15,7 @@
#include "../lockfile.h"
#include "../chdir-notify.h"
#include "../statinfo.h"
+#include "../worktree.h"
#include "../wrapper.h"
#include "../write-or-die.h"
#include "../trace2.h"
@@ -200,9 +206,14 @@ static int release_snapshot(struct snapshot *snapshot)
}
}
-struct ref_store *packed_ref_store_create(struct repository *repo,
- const char *gitdir,
- unsigned int store_flags)
+static size_t snapshot_hexsz(const struct snapshot *snapshot)
+{
+ return snapshot->refs->base.repo->hash_algo->hexsz;
+}
+
+struct ref_store *packed_ref_store_init(struct repository *repo,
+ const char *gitdir,
+ unsigned int store_flags)
{
struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
struct ref_store *ref_store = (struct ref_store *)refs;
@@ -252,6 +263,15 @@ static void clear_snapshot(struct packed_ref_store *refs)
}
}
+static void packed_ref_store_release(struct ref_store *ref_store)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store, 0, "release");
+ clear_snapshot(refs);
+ rollback_lock_file(&refs->lock);
+ delete_tempfile(&refs->tempfile);
+ free(refs->path);
+}
+
static NORETURN void die_unterminated_line(const char *path,
const char *p, size_t len)
{
@@ -280,12 +300,9 @@ struct snapshot_record {
size_t len;
};
-static int cmp_packed_ref_records(const void *v1, const void *v2)
-{
- const struct snapshot_record *e1 = v1, *e2 = v2;
- const char *r1 = e1->start + the_hash_algo->hexsz + 1;
- const char *r2 = e2->start + the_hash_algo->hexsz + 1;
+static int cmp_packed_refname(const char *r1, const char *r2)
+{
while (1) {
if (*r1 == '\n')
return *r2 == '\n' ? 0 : -1;
@@ -300,14 +317,25 @@ static int cmp_packed_ref_records(const void *v1, const void *v2)
}
}
+static int cmp_packed_ref_records(const void *v1, const void *v2,
+ void *cb_data)
+{
+ const struct snapshot *snapshot = cb_data;
+ const struct snapshot_record *e1 = v1, *e2 = v2;
+ const char *r1 = e1->start + snapshot_hexsz(snapshot) + 1;
+ const char *r2 = e2->start + snapshot_hexsz(snapshot) + 1;
+
+ return cmp_packed_refname(r1, r2);
+}
+
/*
* Compare a snapshot record at `rec` to the specified NUL-terminated
* refname.
*/
static int cmp_record_to_refname(const char *rec, const char *refname,
- int start)
+ int start, const struct snapshot *snapshot)
{
- const char *r1 = rec + the_hash_algo->hexsz + 1;
+ const char *r1 = rec + snapshot_hexsz(snapshot) + 1;
const char *r2 = refname;
while (1) {
@@ -354,7 +382,7 @@ static void sort_snapshot(struct snapshot *snapshot)
if (!eol)
/* The safety check should prevent this. */
BUG("unterminated line found in packed-refs");
- if (eol - pos < the_hash_algo->hexsz + 2)
+ if (eol - pos < snapshot_hexsz(snapshot) + 2)
die_invalid_line(snapshot->refs->path,
pos, eof - pos);
eol++;
@@ -380,7 +408,7 @@ static void sort_snapshot(struct snapshot *snapshot)
if (sorted &&
nr > 1 &&
cmp_packed_ref_records(&records[nr - 2],
- &records[nr - 1]) >= 0)
+ &records[nr - 1], snapshot) >= 0)
sorted = 0;
pos = eol;
@@ -390,7 +418,7 @@ static void sort_snapshot(struct snapshot *snapshot)
goto cleanup;
/* We need to sort the memory. First we sort the records array: */
- QSORT(records, nr, cmp_packed_ref_records);
+ QSORT_S(records, nr, cmp_packed_ref_records, snapshot);
/*
* Allocate a new chunk of memory, and copy the old memory to
@@ -466,11 +494,27 @@ static void verify_buffer_safe(struct snapshot *snapshot)
return;
last_line = find_start_of_record(start, eof - 1);
- if (*(eof - 1) != '\n' || eof - last_line < the_hash_algo->hexsz + 2)
+ if (*(eof - 1) != '\n' ||
+ eof - last_line < snapshot_hexsz(snapshot) + 2)
die_invalid_line(snapshot->refs->path,
last_line, eof - last_line);
}
+/*
+ * When parsing the "packed-refs" file, we will parse it line by line.
+ * Because we know the start pointer of the refname and the next
+ * newline pointer, we could calculate the length of the refname by
+ * subtracting the two pointers. However, there is a corner case where
+ * the refname contains corrupted embedded NUL characters. And
+ * `check_refname_format()` will not catch this when the truncated
+ * refname is still a valid refname. To prevent this, we need to check
+ * whether the refname contains the NUL characters.
+ */
+static int refname_contains_nul(struct strbuf *refname)
+{
+ return !!memchr(refname->buf, '\0', refname->len);
+}
+
#define SMALL_FILE_SIZE (32*1024)
/*
@@ -561,7 +605,7 @@ static const char *find_reference_location_1(struct snapshot *snapshot,
mid = lo + (hi - lo) / 2;
rec = find_start_of_record(lo, mid);
- cmp = cmp_record_to_refname(rec, refname, start);
+ cmp = cmp_record_to_refname(rec, refname, start, snapshot);
if (cmp < 0) {
lo = find_end_of_record(mid, hi);
} else if (cmp > 0) {
@@ -671,7 +715,7 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
- if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
+ if (!skip_prefix(tmp, "# pack-refs with: ", (const char **)&p))
die_invalid_line(refs->path,
snapshot->buf,
snapshot->eof - snapshot->buf);
@@ -774,7 +818,7 @@ static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname,
return -1;
}
- if (get_oid_hex(rec, oid))
+ if (get_oid_hex_algop(rec, oid, ref_store->repo->hash_algo))
die_invalid_line(refs->path, rec, snapshot->eof - rec);
*type = REF_ISPACKED;
@@ -797,6 +841,8 @@ struct packed_ref_iterator {
struct snapshot *snapshot;
+ char *prefix;
+
/* The current position in the snapshot's buffer: */
const char *pos;
@@ -819,11 +865,9 @@ struct packed_ref_iterator {
};
/*
- * Move the iterator to the next record in the snapshot, without
- * respect for whether the record is actually required by the current
- * iteration. Adjust the fields in `iter` and return `ITER_OK` or
- * `ITER_DONE`. This function does not free the iterator in the case
- * of `ITER_DONE`.
+ * Move the iterator to the next record in the snapshot. Adjust the fields in
+ * `iter` and return `ITER_OK` or `ITER_DONE`. This function does not free the
+ * iterator in the case of `ITER_DONE`.
*/
static int next_record(struct packed_ref_iterator *iter)
{
@@ -858,8 +902,8 @@ static int next_record(struct packed_ref_iterator *iter)
iter->base.flags = REF_ISPACKED;
p = iter->pos;
- if (iter->eof - p < the_hash_algo->hexsz + 2 ||
- parse_oid_hex(p, &iter->oid, &p) ||
+ if (iter->eof - p < snapshot_hexsz(iter->snapshot) + 2 ||
+ parse_oid_hex_algop(p, &iter->oid, &p, iter->repo->hash_algo) ||
!isspace(*p++))
die_invalid_line(iter->snapshot->refs->path,
iter->pos, iter->eof - iter->pos);
@@ -872,11 +916,14 @@ static int next_record(struct packed_ref_iterator *iter)
strbuf_add(&iter->refname_buf, p, eol - p);
iter->base.refname = iter->refname_buf.buf;
+ if (refname_contains_nul(&iter->refname_buf))
+ die("packed refname contains embedded NULL: %s", iter->base.refname);
+
if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
if (!refname_is_safe(iter->base.refname))
die("packed refname is dangerous: %s",
iter->base.refname);
- oidclr(&iter->oid);
+ oidclr(&iter->oid, iter->repo->hash_algo);
iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
}
if (iter->snapshot->peeled == PEELED_FULLY ||
@@ -888,8 +935,8 @@ static int next_record(struct packed_ref_iterator *iter)
if (iter->pos < iter->eof && *iter->pos == '^') {
p = iter->pos + 1;
- if (iter->eof - p < the_hash_algo->hexsz + 1 ||
- parse_oid_hex(p, &iter->peeled, &p) ||
+ if (iter->eof - p < snapshot_hexsz(iter->snapshot) + 1 ||
+ parse_oid_hex_algop(p, &iter->peeled, &p, iter->repo->hash_algo) ||
*p++ != '\n')
die_invalid_line(iter->snapshot->refs->path,
iter->pos, iter->eof - iter->pos);
@@ -901,13 +948,13 @@ static int next_record(struct packed_ref_iterator *iter)
* we suppress it if the reference is broken:
*/
if ((iter->base.flags & REF_ISBROKEN)) {
- oidclr(&iter->peeled);
+ oidclr(&iter->peeled, iter->repo->hash_algo);
iter->base.flags &= ~REF_KNOWS_PEELED;
} else {
iter->base.flags |= REF_KNOWS_PEELED;
}
} else {
- oidclr(&iter->peeled);
+ oidclr(&iter->peeled, iter->repo->hash_algo);
}
return ITER_OK;
@@ -920,6 +967,9 @@ static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
int ok;
while ((ok = next_record(iter)) == ITER_OK) {
+ const char *refname = iter->base.refname;
+ const char *prefix = iter->prefix;
+
if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
!is_per_worktree_ref(iter->base.refname))
continue;
@@ -929,51 +979,72 @@ static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
&iter->oid, iter->flags))
continue;
+ while (prefix && *prefix) {
+ if (*refname < *prefix)
+ BUG("packed-refs backend yielded reference preceding its prefix");
+ else if (*refname > *prefix)
+ return ITER_DONE;
+ prefix++;
+ refname++;
+ }
+
return ITER_OK;
}
- if (ref_iterator_abort(ref_iterator) != ITER_DONE)
- ok = ITER_ERROR;
-
return ok;
}
+static int packed_ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
+{
+ struct packed_ref_iterator *iter =
+ (struct packed_ref_iterator *)ref_iterator;
+ const char *start;
+
+ if (prefix && *prefix)
+ start = find_reference_location(iter->snapshot, prefix, 0);
+ else
+ start = iter->snapshot->start;
+
+ free(iter->prefix);
+ iter->prefix = xstrdup_or_null(prefix);
+ iter->pos = start;
+ iter->eof = iter->snapshot->eof;
+
+ return 0;
+}
+
static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
- if (iter->repo != the_repository)
- BUG("peeling for non-the_repository is not supported");
-
if ((iter->base.flags & REF_KNOWS_PEELED)) {
oidcpy(peeled, &iter->peeled);
return is_null_oid(&iter->peeled) ? -1 : 0;
} else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
return -1;
} else {
- return peel_object(&iter->oid, peeled) ? -1 : 0;
+ return peel_object(iter->repo, &iter->oid, peeled) ? -1 : 0;
}
}
-static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
+static void packed_ref_iterator_release(struct ref_iterator *ref_iterator)
{
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
- int ok = ITER_DONE;
-
strbuf_release(&iter->refname_buf);
free(iter->jump);
+ free(iter->prefix);
release_snapshot(iter->snapshot);
- base_ref_iterator_free(ref_iterator);
- return ok;
}
static struct ref_iterator_vtable packed_ref_iterator_vtable = {
.advance = packed_ref_iterator_advance,
+ .seek = packed_ref_iterator_seek,
.peel = packed_ref_iterator_peel,
- .abort = packed_ref_iterator_abort
+ .release = packed_ref_iterator_release,
};
static int jump_list_entry_cmp(const void *va, const void *vb)
@@ -1085,7 +1156,6 @@ static struct ref_iterator *packed_ref_iterator_begin(
{
struct packed_ref_store *refs;
struct snapshot *snapshot;
- const char *start;
struct packed_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
@@ -1101,36 +1171,24 @@ static struct ref_iterator *packed_ref_iterator_begin(
*/
snapshot = get_snapshot(refs);
- if (prefix && *prefix)
- start = find_reference_location(snapshot, prefix, 0);
- else
- start = snapshot->start;
-
- if (start == snapshot->eof)
- return empty_ref_iterator_begin();
-
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
+ base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);
if (exclude_patterns)
populate_excluded_jump_list(iter, snapshot, exclude_patterns);
iter->snapshot = snapshot;
acquire_snapshot(snapshot);
-
- iter->pos = start;
- iter->eof = snapshot->eof;
strbuf_init(&iter->refname_buf, 0);
-
iter->base.oid = &iter->oid;
-
iter->repo = ref_store->repo;
iter->flags = flags;
- if (prefix && *prefix)
- /* Stop iteration after we've gone *past* prefix: */
- ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
+ if (packed_ref_iterator_seek(&iter->base, prefix) < 0) {
+ ref_iterator_free(&iter->base);
+ return NULL;
+ }
return ref_iterator;
}
@@ -1233,6 +1291,24 @@ int packed_refs_is_locked(struct ref_store *ref_store)
return is_lock_file_locked(&refs->lock);
}
+int packed_refs_size(struct ref_store *ref_store,
+ size_t *out)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store, REF_STORE_READ,
+ "packed_refs_size");
+ struct stat st;
+
+ if (stat(refs->path, &st) < 0) {
+ if (errno != ENOENT)
+ return -1;
+ *out = 0;
+ return 0;
+ }
+
+ *out = st.st_size;
+ return 0;
+}
+
/*
* The packed-refs header line that we write out. Perhaps other traits
* will be added later.
@@ -1244,14 +1320,27 @@ int packed_refs_is_locked(struct ref_store *ref_store)
static const char PACKED_REFS_HEADER[] =
"# pack-refs with: peeled fully-peeled sorted \n";
-static int packed_init_db(struct ref_store *ref_store UNUSED,
- int flags UNUSED,
- struct strbuf *err UNUSED)
+static int packed_ref_store_create_on_disk(struct ref_store *ref_store UNUSED,
+ int flags UNUSED,
+ struct strbuf *err UNUSED)
{
/* Nothing to do. */
return 0;
}
+static int packed_ref_store_remove_on_disk(struct ref_store *ref_store,
+ struct strbuf *err)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store, 0, "remove");
+
+ if (remove_path(refs->path) < 0) {
+ strbuf_addstr(err, "could not delete packed-refs");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* Write the packed refs from the current snapshot to the packed-refs
* tempfile, incorporating any changes from `updates`. `updates` must
@@ -1312,8 +1401,10 @@ static int write_with_updates(struct packed_ref_store *refs,
*/
iter = packed_ref_iterator_begin(&refs->base, "", NULL,
DO_FOR_EACH_INCLUDE_BROKEN);
- if ((ok = ref_iterator_advance(iter)) != ITER_OK)
+ if ((ok = ref_iterator_advance(iter)) != ITER_OK) {
+ ref_iterator_free(iter);
iter = NULL;
+ }
i = 0;
@@ -1361,8 +1452,10 @@ static int write_with_updates(struct packed_ref_store *refs,
* the iterator over the unneeded
* value.
*/
- if ((ok = ref_iterator_advance(iter)) != ITER_OK)
+ if ((ok = ref_iterator_advance(iter)) != ITER_OK) {
+ ref_iterator_free(iter);
iter = NULL;
+ }
cmp = +1;
} else {
/*
@@ -1399,8 +1492,10 @@ static int write_with_updates(struct packed_ref_store *refs,
peel_error ? NULL : &peeled))
goto write_error;
- if ((ok = ref_iterator_advance(iter)) != ITER_OK)
+ if ((ok = ref_iterator_advance(iter)) != ITER_OK) {
+ ref_iterator_free(iter);
iter = NULL;
+ }
} else if (is_null_oid(&update->new_oid)) {
/*
* The update wants to delete the reference,
@@ -1412,7 +1507,8 @@ static int write_with_updates(struct packed_ref_store *refs,
i++;
} else {
struct object_id peeled;
- int peel_error = peel_object(&update->new_oid,
+ int peel_error = peel_object(refs->base.repo,
+ &update->new_oid,
&peeled);
if (write_packed_entry(out, update->refname,
@@ -1448,9 +1544,7 @@ write_error:
get_tempfile_path(refs->tempfile), strerror(errno));
error:
- if (iter)
- ref_iterator_abort(iter);
-
+ ref_iterator_free(iter);
delete_tempfile(&refs->tempfile);
return -1;
}
@@ -1681,13 +1775,6 @@ cleanup:
return ret;
}
-static int packed_initial_transaction_commit(struct ref_store *ref_store UNUSED,
- struct ref_transaction *transaction,
- struct strbuf *err)
-{
- return ref_transaction_commit(transaction, err);
-}
-
static int packed_pack_refs(struct ref_store *ref_store UNUSED,
struct pack_refs_opts *pack_opts UNUSED)
{
@@ -1704,17 +1791,343 @@ static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_s
return empty_ref_iterator_begin();
}
+static int packed_fsck_ref_next_line(struct fsck_options *o,
+ unsigned long line_number, const char *start,
+ const char *eof, const char **eol)
+{
+ int ret = 0;
+
+ *eol = memchr(start, '\n', eof - start);
+ if (!*eol) {
+ struct strbuf packed_entry = STRBUF_INIT;
+ struct fsck_ref_report report = { 0 };
+
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_PACKED_REF_ENTRY_NOT_TERMINATED,
+ "'%.*s' is not terminated with a newline",
+ (int)(eof - start), start);
+
+ /*
+ * There is no newline but we still want to parse it to the end of
+ * the buffer.
+ */
+ *eol = eof;
+ strbuf_release(&packed_entry);
+ }
+
+ return ret;
+}
+
+static int packed_fsck_ref_header(struct fsck_options *o,
+ const char *start, const char *eol,
+ unsigned int *sorted)
+{
+ struct string_list traits = STRING_LIST_INIT_NODUP;
+ char *tmp_line;
+ int ret = 0;
+ char *p;
+
+ tmp_line = xmemdupz(start, eol - start);
+ if (!skip_prefix(tmp_line, "# pack-refs with: ", (const char **)&p)) {
+ struct fsck_ref_report report = { 0 };
+ report.path = "packed-refs.header";
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_HEADER,
+ "'%.*s' does not start with '# pack-refs with: '",
+ (int)(eol - start), start);
+ goto cleanup;
+ }
+
+ string_list_split_in_place(&traits, p, " ", -1);
+ *sorted = unsorted_string_list_has_string(&traits, "sorted");
+
+cleanup:
+ free(tmp_line);
+ string_list_clear(&traits, 0);
+ return ret;
+}
+
+static int packed_fsck_ref_peeled_line(struct fsck_options *o,
+ struct ref_store *ref_store,
+ unsigned long line_number,
+ const char *start, const char *eol)
+{
+ struct strbuf packed_entry = STRBUF_INIT;
+ struct fsck_ref_report report = { 0 };
+ struct object_id peeled;
+ const char *p;
+ int ret = 0;
+
+ /*
+ * Skip the '^' and parse the peeled oid.
+ */
+ start++;
+ if (parse_oid_hex_algop(start, &peeled, &p, ref_store->repo->hash_algo)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "'%.*s' has invalid peeled oid",
+ (int)(eol - start), start);
+ goto cleanup;
+ }
+
+ if (p != eol) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "has trailing garbage after peeled oid '%.*s'",
+ (int)(eol - p), p);
+ goto cleanup;
+ }
+
+cleanup:
+ strbuf_release(&packed_entry);
+ return ret;
+}
+
+static int packed_fsck_ref_main_line(struct fsck_options *o,
+ struct ref_store *ref_store,
+ unsigned long line_number,
+ struct strbuf *refname,
+ const char *start, const char *eol)
+{
+ struct strbuf packed_entry = STRBUF_INIT;
+ struct fsck_ref_report report = { 0 };
+ struct object_id oid;
+ const char *p;
+ int ret = 0;
+
+ if (parse_oid_hex_algop(start, &oid, &p, ref_store->repo->hash_algo)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "'%.*s' has invalid oid",
+ (int)(eol - start), start);
+ goto cleanup;
+ }
+
+ if (p == eol || !isspace(*p)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "has no space after oid '%s' but with '%.*s'",
+ oid_to_hex(&oid), (int)(eol - p), p);
+ goto cleanup;
+ }
+
+ p++;
+ strbuf_reset(refname);
+ strbuf_add(refname, p, eol - p);
+ if (refname_contains_nul(refname)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_PACKED_REF_ENTRY,
+ "refname '%s' contains NULL binaries",
+ refname->buf);
+ }
+
+ if (check_refname_format(refname->buf, 0)) {
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_NAME,
+ "has bad refname '%s'", refname->buf);
+ }
+
+cleanup:
+ strbuf_release(&packed_entry);
+ return ret;
+}
+
+static int packed_fsck_ref_sorted(struct fsck_options *o,
+ struct ref_store *ref_store,
+ const char *start, const char *eof)
+{
+ size_t hexsz = ref_store->repo->hash_algo->hexsz;
+ struct strbuf packed_entry = STRBUF_INIT;
+ struct fsck_ref_report report = { 0 };
+ struct strbuf refname1 = STRBUF_INIT;
+ struct strbuf refname2 = STRBUF_INIT;
+ unsigned long line_number = 1;
+ const char *former = NULL;
+ const char *current;
+ const char *eol;
+ int ret = 0;
+
+ if (*start == '#') {
+ eol = memchr(start, '\n', eof - start);
+ start = eol + 1;
+ line_number++;
+ }
+
+ for (; start < eof; line_number++, start = eol + 1) {
+ eol = memchr(start, '\n', eof - start);
+
+ if (*start == '^')
+ continue;
+
+ if (!former) {
+ former = start + hexsz + 1;
+ continue;
+ }
+
+ current = start + hexsz + 1;
+ if (cmp_packed_refname(former, current) >= 0) {
+ const char *err_fmt =
+ "refname '%s' is less than previous refname '%s'";
+
+ eol = memchr(former, '\n', eof - former);
+ strbuf_add(&refname1, former, eol - former);
+ eol = memchr(current, '\n', eof - current);
+ strbuf_add(&refname2, current, eol - current);
+
+ strbuf_addf(&packed_entry, "packed-refs line %lu", line_number);
+ report.path = packed_entry.buf;
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_PACKED_REF_UNSORTED,
+ err_fmt, refname2.buf, refname1.buf);
+ goto cleanup;
+ }
+ former = current;
+ }
+
+cleanup:
+ strbuf_release(&packed_entry);
+ strbuf_release(&refname1);
+ strbuf_release(&refname2);
+ return ret;
+}
+
+static int packed_fsck_ref_content(struct fsck_options *o,
+ struct ref_store *ref_store,
+ unsigned int *sorted,
+ const char *start, const char *eof)
+{
+ struct strbuf refname = STRBUF_INIT;
+ unsigned long line_number = 1;
+ const char *eol;
+ int ret = 0;
+
+ ret |= packed_fsck_ref_next_line(o, line_number, start, eof, &eol);
+ if (*start == '#') {
+ ret |= packed_fsck_ref_header(o, start, eol, sorted);
+
+ start = eol + 1;
+ line_number++;
+ }
+
+ while (start < eof) {
+ ret |= packed_fsck_ref_next_line(o, line_number, start, eof, &eol);
+ ret |= packed_fsck_ref_main_line(o, ref_store, line_number, &refname, start, eol);
+ start = eol + 1;
+ line_number++;
+ if (start < eof && *start == '^') {
+ ret |= packed_fsck_ref_next_line(o, line_number, start, eof, &eol);
+ ret |= packed_fsck_ref_peeled_line(o, ref_store, line_number,
+ start, eol);
+ start = eol + 1;
+ line_number++;
+ }
+ }
+
+ strbuf_release(&refname);
+ return ret;
+}
+
+static int packed_fsck(struct ref_store *ref_store,
+ struct fsck_options *o,
+ struct worktree *wt)
+{
+ struct packed_ref_store *refs = packed_downcast(ref_store,
+ REF_STORE_READ, "fsck");
+ struct strbuf packed_ref_content = STRBUF_INIT;
+ unsigned int sorted = 0;
+ struct stat st;
+ int ret = 0;
+ int fd = -1;
+
+ if (!is_main_worktree(wt))
+ goto cleanup;
+
+ if (o->verbose)
+ fprintf_ln(stderr, "Checking packed-refs file %s", refs->path);
+
+ fd = open_nofollow(refs->path, O_RDONLY);
+ if (fd < 0) {
+ /*
+ * If the packed-refs file doesn't exist, there's nothing
+ * to check.
+ */
+ if (errno == ENOENT)
+ goto cleanup;
+
+ if (errno == ELOOP) {
+ struct fsck_ref_report report = { 0 };
+ report.path = "packed-refs";
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_FILETYPE,
+ "not a regular file but a symlink");
+ goto cleanup;
+ }
+
+ ret = error_errno(_("unable to open '%s'"), refs->path);
+ goto cleanup;
+ } else if (fstat(fd, &st) < 0) {
+ ret = error_errno(_("unable to stat '%s'"), refs->path);
+ goto cleanup;
+ } else if (!S_ISREG(st.st_mode)) {
+ struct fsck_ref_report report = { 0 };
+ report.path = "packed-refs";
+ ret = fsck_report_ref(o, &report,
+ FSCK_MSG_BAD_REF_FILETYPE,
+ "not a regular file");
+ goto cleanup;
+ }
+
+ if (strbuf_read(&packed_ref_content, fd, 0) < 0) {
+ ret = error_errno(_("unable to read '%s'"), refs->path);
+ goto cleanup;
+ }
+
+ ret = packed_fsck_ref_content(o, ref_store, &sorted, packed_ref_content.buf,
+ packed_ref_content.buf + packed_ref_content.len);
+ if (!ret && sorted)
+ ret = packed_fsck_ref_sorted(o, ref_store, packed_ref_content.buf,
+ packed_ref_content.buf + packed_ref_content.len);
+
+cleanup:
+ if (fd >= 0)
+ close(fd);
+ strbuf_release(&packed_ref_content);
+ return ret;
+}
+
struct ref_storage_be refs_be_packed = {
.name = "packed",
- .init = packed_ref_store_create,
- .init_db = packed_init_db,
+ .init = packed_ref_store_init,
+ .release = packed_ref_store_release,
+ .create_on_disk = packed_ref_store_create_on_disk,
+ .remove_on_disk = packed_ref_store_remove_on_disk,
+
.transaction_prepare = packed_transaction_prepare,
.transaction_finish = packed_transaction_finish,
.transaction_abort = packed_transaction_abort,
- .initial_transaction_commit = packed_initial_transaction_commit,
.pack_refs = packed_pack_refs,
- .create_symref = NULL,
.rename_ref = NULL,
.copy_ref = NULL,
@@ -1729,4 +2142,6 @@ struct ref_storage_be refs_be_packed = {
.create_reflog = NULL,
.delete_reflog = NULL,
.reflog_expire = NULL,
+
+ .fsck = packed_fsck,
};
diff --git a/refs/packed-backend.h b/refs/packed-backend.h
index 9dd8a344c3..9481d5e7c2 100644
--- a/refs/packed-backend.h
+++ b/refs/packed-backend.h
@@ -13,9 +13,9 @@ struct ref_transaction;
* even among packed refs.
*/
-struct ref_store *packed_ref_store_create(struct repository *repo,
- const char *gitdir,
- unsigned int store_flags);
+struct ref_store *packed_ref_store_init(struct repository *repo,
+ const char *gitdir,
+ unsigned int store_flags);
/*
* Lock the packed-refs file for writing. Flags is passed to
@@ -28,6 +28,13 @@ void packed_refs_unlock(struct ref_store *ref_store);
int packed_refs_is_locked(struct ref_store *ref_store);
/*
+ * Obtain the size of the `packed-refs` file. Reports `0` as size in case there
+ * is no packed-refs file. Returns 0 on success, negative otherwise.
+ */
+int packed_refs_size(struct ref_store *ref_store,
+ size_t *out);
+
+/*
* Return true if `transaction` really needs to be carried out against
* the specified packed_ref_store, or false if it can be skipped
* (i.e., because it is an obvious NOOP). `ref_store` must be locked
diff --git a/refs/ref-cache.c b/refs/ref-cache.c
index a372a00941..c1f1bab1d5 100644
--- a/refs/ref-cache.c
+++ b/refs/ref-cache.c
@@ -34,6 +34,7 @@ struct ref_dir *get_ref_dir(struct ref_entry *entry)
}
struct ref_entry *create_ref_entry(const char *refname,
+ const char *referent,
const struct object_id *oid, int flag)
{
struct ref_entry *ref;
@@ -41,6 +42,8 @@ struct ref_entry *create_ref_entry(const char *refname,
FLEX_ALLOC_STR(ref, name, refname);
oidcpy(&ref->u.value.oid, oid);
ref->flag = flag;
+ ref->u.value.referent = xstrdup_or_null(referent);
+
return ref;
}
@@ -65,12 +68,16 @@ static void free_ref_entry(struct ref_entry *entry)
* trigger the reading of loose refs.
*/
clear_ref_dir(&entry->u.subdir);
+ } else {
+ free(entry->u.value.referent);
}
free(entry);
}
void free_ref_cache(struct ref_cache *cache)
{
+ if (!cache)
+ return;
free_ref_entry(cache->root);
free(cache);
}
@@ -355,9 +362,7 @@ struct cache_ref_iterator {
struct ref_iterator base;
/*
- * The number of levels currently on the stack. This is always
- * at least 1, because when it becomes zero the iteration is
- * ended and this struct is freed.
+ * The number of levels currently on the stack.
*/
size_t levels_nr;
@@ -369,7 +374,7 @@ struct cache_ref_iterator {
* The prefix is matched textually, without regard for path
* component boundaries.
*/
- const char *prefix;
+ char *prefix;
/*
* A stack of levels. levels[0] is the uppermost level that is
@@ -382,6 +387,9 @@ struct cache_ref_iterator {
struct cache_ref_iterator_level *levels;
struct repository *repo;
+ struct ref_cache *cache;
+
+ int prime_dir;
};
static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
@@ -389,6 +397,9 @@ static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
struct cache_ref_iterator *iter =
(struct cache_ref_iterator *)ref_iterator;
+ if (!iter->levels_nr)
+ return ITER_DONE;
+
while (1) {
struct cache_ref_iterator_level *level =
&iter->levels[iter->levels_nr - 1];
@@ -402,7 +413,7 @@ static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
if (++level->index == level->dir->nr) {
/* This level is exhausted; pop up a level */
if (--iter->levels_nr == 0)
- return ref_iterator_abort(ref_iterator);
+ return ITER_DONE;
continue;
}
@@ -429,6 +440,7 @@ static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
level->index = -1;
} else {
iter->base.refname = entry->name;
+ iter->base.referent = entry->u.value.referent;
iter->base.oid = &entry->u.value.oid;
iter->base.flags = entry->flag;
return ITER_OK;
@@ -436,32 +448,62 @@ static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
}
}
-static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
- struct object_id *peeled)
+static int cache_ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
{
struct cache_ref_iterator *iter =
(struct cache_ref_iterator *)ref_iterator;
+ struct cache_ref_iterator_level *level;
+ struct ref_dir *dir;
+
+ dir = get_ref_dir(iter->cache->root);
+ if (prefix && *prefix)
+ dir = find_containing_dir(dir, prefix);
+ if (!dir) {
+ iter->levels_nr = 0;
+ return 0;
+ }
+
+ if (iter->prime_dir)
+ prime_ref_dir(dir, prefix);
+ iter->levels_nr = 1;
+ level = &iter->levels[0];
+ level->index = -1;
+ level->dir = dir;
+
+ if (prefix && *prefix) {
+ free(iter->prefix);
+ iter->prefix = xstrdup(prefix);
+ level->prefix_state = PREFIX_WITHIN_DIR;
+ } else {
+ FREE_AND_NULL(iter->prefix);
+ level->prefix_state = PREFIX_CONTAINS_DIR;
+ }
- if (iter->repo != the_repository)
- BUG("peeling for non-the_repository is not supported");
- return peel_object(ref_iterator->oid, peeled) ? -1 : 0;
+ return 0;
}
-static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
+static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
+ struct object_id *peeled)
{
struct cache_ref_iterator *iter =
(struct cache_ref_iterator *)ref_iterator;
+ return peel_object(iter->repo, ref_iterator->oid, peeled) ? -1 : 0;
+}
- free((char *)iter->prefix);
+static void cache_ref_iterator_release(struct ref_iterator *ref_iterator)
+{
+ struct cache_ref_iterator *iter =
+ (struct cache_ref_iterator *)ref_iterator;
+ free(iter->prefix);
free(iter->levels);
- base_ref_iterator_free(ref_iterator);
- return ITER_DONE;
}
static struct ref_iterator_vtable cache_ref_iterator_vtable = {
.advance = cache_ref_iterator_advance,
+ .seek = cache_ref_iterator_seek,
.peel = cache_ref_iterator_peel,
- .abort = cache_ref_iterator_abort
+ .release = cache_ref_iterator_release,
};
struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
@@ -469,39 +511,22 @@ struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
struct repository *repo,
int prime_dir)
{
- struct ref_dir *dir;
struct cache_ref_iterator *iter;
struct ref_iterator *ref_iterator;
- struct cache_ref_iterator_level *level;
-
- dir = get_ref_dir(cache->root);
- if (prefix && *prefix)
- dir = find_containing_dir(dir, prefix);
- if (!dir)
- /* There's nothing to iterate over. */
- return empty_ref_iterator_begin();
-
- if (prime_dir)
- prime_ref_dir(dir, prefix);
CALLOC_ARRAY(iter, 1);
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable, 1);
+ base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable);
ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
- iter->levels_nr = 1;
- level = &iter->levels[0];
- level->index = -1;
- level->dir = dir;
+ iter->repo = repo;
+ iter->cache = cache;
+ iter->prime_dir = prime_dir;
- if (prefix && *prefix) {
- iter->prefix = xstrdup(prefix);
- level->prefix_state = PREFIX_WITHIN_DIR;
- } else {
- level->prefix_state = PREFIX_CONTAINS_DIR;
+ if (cache_ref_iterator_seek(&iter->base, prefix) < 0) {
+ ref_iterator_free(&iter->base);
+ return NULL;
}
- iter->repo = repo;
-
return ref_iterator;
}
diff --git a/refs/ref-cache.h b/refs/ref-cache.h
index 95c76e27c8..5f04e518c3 100644
--- a/refs/ref-cache.h
+++ b/refs/ref-cache.h
@@ -1,7 +1,7 @@
#ifndef REFS_REF_CACHE_H
#define REFS_REF_CACHE_H
-#include "hash-ll.h"
+#include "hash.h"
struct ref_dir;
struct ref_store;
@@ -42,6 +42,7 @@ struct ref_value {
* referred to by the last reference in the symlink chain.
*/
struct object_id oid;
+ char *referent;
};
/*
@@ -173,6 +174,7 @@ struct ref_entry *create_dir_entry(struct ref_cache *cache,
const char *dirname, size_t len);
struct ref_entry *create_ref_entry(const char *refname,
+ const char *referent,
const struct object_id *oid, int flag);
/*
diff --git a/refs/refs-internal.h b/refs/refs-internal.h
index 82219829b0..e5862757a7 100644
--- a/refs/refs-internal.h
+++ b/refs/refs-internal.h
@@ -4,6 +4,7 @@
#include "refs.h"
#include "iterator.h"
+struct fsck_options;
struct ref_transaction;
/*
@@ -69,40 +70,6 @@ int ref_resolves_to_object(const char *refname,
const struct object_id *oid,
unsigned int flags);
-enum peel_status {
- /* object was peeled successfully: */
- PEEL_PEELED = 0,
-
- /*
- * object cannot be peeled because the named object (or an
- * object referred to by a tag in the peel chain), does not
- * exist.
- */
- PEEL_INVALID = -1,
-
- /* object cannot be peeled because it is not a tag: */
- PEEL_NON_TAG = -2,
-
- /* ref_entry contains no peeled value because it is a symref: */
- PEEL_IS_SYMREF = -3,
-
- /*
- * ref_entry cannot be peeled because it is broken (i.e., the
- * symbolic reference cannot even be resolved to an object
- * name):
- */
- PEEL_BROKEN = -4
-};
-
-/*
- * Peel the named object; i.e., if the object is a tag, resolve the
- * tag recursively until a non-tag is found. If successful, store the
- * result to oid and return PEEL_PEELED. If the object is not a tag
- * or is not valid, return PEEL_NON_TAG or PEEL_INVALID, respectively,
- * and leave oid unchanged.
- */
-enum peel_status peel_object(const struct object_id *name, struct object_id *oid);
-
/**
* Information needed for a single ref update. Set new_oid to the new
* value or to null_oid to delete the ref. To check the old value
@@ -125,6 +92,19 @@ struct ref_update {
struct object_id old_oid;
/*
+ * If set, point the reference to this value. This can also be
+ * used to convert regular references to become symbolic refs.
+ * Cannot be set together with `new_oid`.
+ */
+ const char *new_target;
+
+ /*
+ * If set, check that the reference previously pointed to this
+ * value. Cannot be set together with `old_oid`.
+ */
+ const char *old_target;
+
+ /*
* One or more of REF_NO_DEREF, REF_FORCE_CREATE_REFLOG,
* REF_HAVE_NEW, REF_HAVE_OLD, or backend-specific flags.
*/
@@ -133,6 +113,14 @@ struct ref_update {
void *backend_data;
unsigned int type;
char *msg;
+ char *committer_info;
+
+ /*
+ * The index overrides the default sort algorithm. This is needed
+ * when migrating reflogs and we want to ensure we carry over the
+ * same order.
+ */
+ uint64_t index;
/*
* If this ref_update was split off of a symref update via
@@ -173,6 +161,8 @@ struct ref_update *ref_transaction_add_update(
const char *refname, unsigned int flags,
const struct object_id *new_oid,
const struct object_id *old_oid,
+ const char *new_target, const char *old_target,
+ const char *committer_info,
const char *msg);
/*
@@ -212,6 +202,8 @@ struct ref_transaction {
size_t nr;
enum ref_transaction_state state;
void *backend_data;
+ unsigned int flags;
+ uint64_t max_index;
};
/*
@@ -260,6 +252,12 @@ enum do_for_each_ref_flags {
* INCLUDE_BROKEN, since they are otherwise not included at all.
*/
DO_FOR_EACH_OMIT_DANGLING_SYMREFS = (1 << 2),
+
+ /*
+ * Include root refs i.e. HEAD and pseudorefs along with the regular
+ * refs.
+ */
+ DO_FOR_EACH_INCLUDE_ROOT_REFS = (1 << 3),
};
/*
@@ -275,11 +273,11 @@ enum do_for_each_ref_flags {
* the next reference and returns ITER_OK. The data pointed at by
* refname and oid belong to the iterator; if you want to retain them
* after calling ref_iterator_advance() again or calling
- * ref_iterator_abort(), you must make a copy. When the iteration has
+ * ref_iterator_free(), you must make a copy. When the iteration has
* been exhausted, ref_iterator_advance() releases any resources
* associated with the iteration, frees the ref_iterator object, and
* returns ITER_DONE. If you want to abort the iteration early, call
- * ref_iterator_abort(), which also frees the ref_iterator object and
+ * ref_iterator_free(), which also frees the ref_iterator object and
* any associated resources. If there was an internal error advancing
* to the next entry, ref_iterator_advance() aborts the iteration,
* frees the ref_iterator, and returns ITER_ERROR.
@@ -295,7 +293,7 @@ enum do_for_each_ref_flags {
*
* while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
* if (want_to_stop_iteration()) {
- * ok = ref_iterator_abort(iter);
+ * ok = ITER_DONE;
* break;
* }
*
@@ -309,17 +307,12 @@ enum do_for_each_ref_flags {
*
* if (ok != ITER_DONE)
* handle_error();
+ * ref_iterator_free(iter);
*/
struct ref_iterator {
struct ref_iterator_vtable *vtable;
-
- /*
- * Does this `ref_iterator` iterate over references in order
- * by refname?
- */
- unsigned int ordered : 1;
-
const char *refname;
+ const char *referent;
const struct object_id *oid;
unsigned int flags;
};
@@ -335,18 +328,30 @@ struct ref_iterator {
int ref_iterator_advance(struct ref_iterator *ref_iterator);
/*
+ * Seek the iterator to the first reference with the given prefix.
+ * The prefix is matched as a literal string, without regard for path
+ * separators. If prefix is NULL or the empty string, seek the iterator to the
+ * first reference again.
+ *
+ * This function is expected to behave as if a new ref iterator with the same
+ * prefix had been created, but allows reuse of iterators and thus may allow
+ * the backend to optimize. Parameters other than the prefix that have been
+ * passed when creating the iterator will remain unchanged.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix);
+
+/*
* If possible, peel the reference currently being viewed by the
* iterator. Return 0 on success.
*/
int ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled);
-/*
- * End the iteration before it has been exhausted, freeing the
- * reference iterator and any associated resources and returning
- * ITER_DONE. If the abort itself failed, return ITER_ERROR.
- */
-int ref_iterator_abort(struct ref_iterator *ref_iterator);
+/* Free the reference iterator and any associated resources. */
+void ref_iterator_free(struct ref_iterator *ref_iterator);
/*
* An iterator over nothing (its first ref_iterator_advance() call
@@ -387,14 +392,21 @@ typedef enum iterator_selection ref_iterator_select_fn(
void *cb_data);
/*
+ * An implementation of ref_iterator_select_fn that merges worktree and common
+ * refs. Per-worktree refs from the common iterator are ignored, worktree refs
+ * override common refs. Refs are selected lexicographically.
+ */
+enum iterator_selection ref_iterator_select(struct ref_iterator *iter_worktree,
+ struct ref_iterator *iter_common,
+ void *cb_data);
+
+/*
* Iterate over the entries from iter0 and iter1, with the values
* interleaved as directed by the select function. The iterator takes
* ownership of iter0 and iter1 and frees them when the iteration is
- * over. A derived class should set `ordered` to 1 or 0 based on
- * whether it generates its output in order by reference name.
+ * over.
*/
struct ref_iterator *merge_ref_iterator_begin(
- int ordered,
struct ref_iterator *iter0, struct ref_iterator *iter1,
ref_iterator_select_fn *select, void *cb_data);
@@ -423,8 +435,6 @@ struct ref_iterator *overlay_ref_iterator_begin(
* As an convenience to callers, if prefix is the empty string and
* trim is zero, this function returns iter0 directly, without
* wrapping it.
- *
- * The resulting ref_iterator is ordered if iter0 is.
*/
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
const char *prefix,
@@ -435,21 +445,11 @@ struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
/*
* Base class constructor for ref_iterators. Initialize the
* ref_iterator part of iter, setting its vtable pointer as specified.
- * `ordered` should be set to 1 if the iterator will iterate over
- * references in order by refname; otherwise it should be set to 0.
* This is meant to be called only by the initializers of derived
* classes.
*/
void base_ref_iterator_init(struct ref_iterator *iter,
- struct ref_iterator_vtable *vtable,
- int ordered);
-
-/*
- * Base class destructor for ref_iterators. Destroy the ref_iterator
- * part of iter and shallow-free the object. This is meant to be
- * called only by the destructors of derived classes.
- */
-void base_ref_iterator_free(struct ref_iterator *iter);
+ struct ref_iterator_vtable *vtable);
/* Virtual function declarations for ref_iterators: */
@@ -462,6 +462,13 @@ void base_ref_iterator_free(struct ref_iterator *iter);
typedef int ref_iterator_advance_fn(struct ref_iterator *ref_iterator);
/*
+ * Seek the iterator to the first reference matching the given prefix. Should
+ * behave the same as if a new iterator was created with the same prefix.
+ */
+typedef int ref_iterator_seek_fn(struct ref_iterator *ref_iterator,
+ const char *prefix);
+
+/*
* Peels the current ref, returning 0 for success or -1 for failure.
*/
typedef int ref_iterator_peel_fn(struct ref_iterator *ref_iterator,
@@ -469,15 +476,15 @@ typedef int ref_iterator_peel_fn(struct ref_iterator *ref_iterator,
/*
* Implementations of this function should free any resources specific
- * to the derived class, then call base_ref_iterator_free() to clean
- * up and free the ref_iterator object.
+ * to the derived class.
*/
-typedef int ref_iterator_abort_fn(struct ref_iterator *ref_iterator);
+typedef void ref_iterator_release_fn(struct ref_iterator *ref_iterator);
struct ref_iterator_vtable {
ref_iterator_advance_fn *advance;
+ ref_iterator_seek_fn *seek;
ref_iterator_peel_fn *peel;
- ref_iterator_abort_fn *abort;
+ ref_iterator_release_fn *release;
};
/*
@@ -502,9 +509,8 @@ extern struct ref_iterator *current_ref_iter;
* adapter between the callback style of reference iteration and the
* iterator style.
*/
-int do_for_each_repo_ref_iterator(struct repository *r,
- struct ref_iterator *iter,
- each_repo_ref_fn fn, void *cb_data);
+int do_for_each_ref_iterator(struct ref_iterator *iter,
+ each_ref_fn fn, void *cb_data);
struct ref_store;
@@ -528,10 +534,20 @@ struct ref_store;
typedef struct ref_store *ref_store_init_fn(struct repository *repo,
const char *gitdir,
unsigned int flags);
+/*
+ * Release all memory and resources associated with the ref store.
+ */
+typedef void ref_store_release_fn(struct ref_store *refs);
+
+typedef int ref_store_create_on_disk_fn(struct ref_store *refs,
+ int flags,
+ struct strbuf *err);
-typedef int ref_init_db_fn(struct ref_store *refs,
- int flags,
- struct strbuf *err);
+/*
+ * Remove the reference store from disk.
+ */
+typedef int ref_store_remove_on_disk_fn(struct ref_store *refs,
+ struct strbuf *err);
typedef int ref_transaction_prepare_fn(struct ref_store *refs,
struct ref_transaction *transaction,
@@ -551,10 +567,6 @@ typedef int ref_transaction_commit_fn(struct ref_store *refs,
typedef int pack_refs_fn(struct ref_store *ref_store,
struct pack_refs_opts *opts);
-typedef int create_symref_fn(struct ref_store *ref_store,
- const char *ref_target,
- const char *refs_heads_master,
- const char *logmsg);
typedef int rename_ref_fn(struct ref_store *ref_store,
const char *oldref, const char *newref,
const char *logmsg);
@@ -664,23 +676,32 @@ typedef int read_raw_ref_fn(struct ref_store *ref_store, const char *refname,
typedef int read_symbolic_ref_fn(struct ref_store *ref_store, const char *refname,
struct strbuf *referent);
+typedef int fsck_fn(struct ref_store *ref_store,
+ struct fsck_options *o,
+ struct worktree *wt);
+
struct ref_storage_be {
const char *name;
ref_store_init_fn *init;
- ref_init_db_fn *init_db;
+ ref_store_release_fn *release;
+ ref_store_create_on_disk_fn *create_on_disk;
+ ref_store_remove_on_disk_fn *remove_on_disk;
ref_transaction_prepare_fn *transaction_prepare;
ref_transaction_finish_fn *transaction_finish;
ref_transaction_abort_fn *transaction_abort;
- ref_transaction_commit_fn *initial_transaction_commit;
pack_refs_fn *pack_refs;
- create_symref_fn *create_symref;
rename_ref_fn *rename_ref;
copy_ref_fn *copy_ref;
ref_iterator_begin_fn *iterator_begin;
read_raw_ref_fn *read_raw_ref;
+
+ /*
+ * Please refer to `refs_read_symbolic_ref()` for the expected
+ * behaviour.
+ */
read_symbolic_ref_fn *read_symbolic_ref;
reflog_iterator_begin_fn *reflog_iterator_begin;
@@ -690,15 +711,18 @@ struct ref_storage_be {
create_reflog_fn *create_reflog;
delete_reflog_fn *delete_reflog;
reflog_expire_fn *reflog_expire;
+
+ fsck_fn *fsck;
};
extern struct ref_storage_be refs_be_files;
+extern struct ref_storage_be refs_be_reftable;
extern struct ref_storage_be refs_be_packed;
/*
* A representation of the reference store for the main repository or
* a submodule. The ref_store instances for submodules are kept in a
- * hash map; see get_submodule_ref_store() for more info.
+ * hash map; see repo_get_submodule_ref_store() for more info.
*/
struct ref_store {
/* The backend describing this ref_store's storage scheme: */
@@ -717,9 +741,10 @@ struct ref_store {
* Parse contents of a loose ref file. *failure_errno maybe be set to EINVAL for
* invalid contents.
*/
-int parse_loose_ref_contents(const char *buf, struct object_id *oid,
+int parse_loose_ref_contents(const struct git_hash_algo *algop,
+ const char *buf, struct object_id *oid,
struct strbuf *referent, unsigned int *type,
- int *failure_errno);
+ const char **trailing, int *failure_errno);
/*
* Fill in the generic part of refs and add it to our collection of
@@ -733,4 +758,31 @@ void base_ref_store_init(struct ref_store *refs, struct repository *repo,
*/
struct ref_store *maybe_debug_wrap_ref_store(const char *gitdir, struct ref_store *store);
+/*
+ * Return the refname under which update was originally requested.
+ */
+const char *ref_update_original_update_refname(struct ref_update *update);
+
+/*
+ * Helper function to check if the new value is null, this
+ * takes into consideration that the update could be a regular
+ * ref or a symbolic ref.
+ */
+int ref_update_has_null_new_value(struct ref_update *update);
+
+/*
+ * Check whether the old_target values stored in update are consistent
+ * with the referent, which is the symbolic reference's current value.
+ * If everything is OK, return 0; otherwise, write an error message to
+ * err and return -1.
+ */
+int ref_update_check_old_target(const char *referent, struct ref_update *update,
+ struct strbuf *err);
+
+/*
+ * Check if the ref must exist, this means that the old_oid or
+ * old_target is non NULL.
+ */
+int ref_update_expects_existing_old_ref(struct ref_update *update);
+
#endif /* REFS_REFS_INTERNAL_H */
diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
new file mode 100644
index 0000000000..ae434cd248
--- /dev/null
+++ b/refs/reftable-backend.c
@@ -0,0 +1,2700 @@
+#define USE_THE_REPOSITORY_VARIABLE
+
+#include "../git-compat-util.h"
+#include "../abspath.h"
+#include "../chdir-notify.h"
+#include "../config.h"
+#include "../dir.h"
+#include "../environment.h"
+#include "../gettext.h"
+#include "../hash.h"
+#include "../hex.h"
+#include "../iterator.h"
+#include "../ident.h"
+#include "../lockfile.h"
+#include "../object.h"
+#include "../path.h"
+#include "../refs.h"
+#include "../reftable/reftable-basics.h"
+#include "../reftable/reftable-stack.h"
+#include "../reftable/reftable-record.h"
+#include "../reftable/reftable-error.h"
+#include "../reftable/reftable-iterator.h"
+#include "../repo-settings.h"
+#include "../setup.h"
+#include "../strmap.h"
+#include "../trace2.h"
+#include "../write-or-die.h"
+#include "parse.h"
+#include "refs-internal.h"
+
+/*
+ * Used as a flag in ref_update::flags when the ref_update was via an
+ * update to HEAD.
+ */
+#define REF_UPDATE_VIA_HEAD (1 << 8)
+
+struct reftable_backend {
+ struct reftable_stack *stack;
+ struct reftable_iterator it;
+};
+
+static void reftable_backend_on_reload(void *payload)
+{
+ struct reftable_backend *be = payload;
+ reftable_iterator_destroy(&be->it);
+}
+
+static int reftable_backend_init(struct reftable_backend *be,
+ const char *path,
+ const struct reftable_write_options *_opts)
+{
+ struct reftable_write_options opts = *_opts;
+ opts.on_reload = reftable_backend_on_reload;
+ opts.on_reload_payload = be;
+ return reftable_new_stack(&be->stack, path, &opts);
+}
+
+static void reftable_backend_release(struct reftable_backend *be)
+{
+ reftable_stack_destroy(be->stack);
+ be->stack = NULL;
+ reftable_iterator_destroy(&be->it);
+}
+
+static int reftable_backend_read_ref(struct reftable_backend *be,
+ const char *refname,
+ struct object_id *oid,
+ struct strbuf *referent,
+ unsigned int *type)
+{
+ struct reftable_ref_record ref = {0};
+ int ret;
+
+ if (!be->it.ops) {
+ ret = reftable_stack_init_ref_iterator(be->stack, &be->it);
+ if (ret)
+ goto done;
+ }
+
+ ret = reftable_iterator_seek_ref(&be->it, refname);
+ if (ret)
+ goto done;
+
+ ret = reftable_iterator_next_ref(&be->it, &ref);
+ if (ret)
+ goto done;
+
+ if (strcmp(ref.refname, refname)) {
+ ret = 1;
+ goto done;
+ }
+
+ if (ref.value_type == REFTABLE_REF_SYMREF) {
+ strbuf_reset(referent);
+ strbuf_addstr(referent, ref.value.symref);
+ *type |= REF_ISSYMREF;
+ } else if (reftable_ref_record_val1(&ref)) {
+ unsigned int hash_id;
+
+ switch (reftable_stack_hash_id(be->stack)) {
+ case REFTABLE_HASH_SHA1:
+ hash_id = GIT_HASH_SHA1;
+ break;
+ case REFTABLE_HASH_SHA256:
+ hash_id = GIT_HASH_SHA256;
+ break;
+ default:
+ BUG("unhandled hash ID %d", reftable_stack_hash_id(be->stack));
+ }
+
+ oidread(oid, reftable_ref_record_val1(&ref),
+ &hash_algos[hash_id]);
+ } else {
+ /* We got a tombstone, which should not happen. */
+ BUG("unhandled reference value type %d", ref.value_type);
+ }
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ reftable_ref_record_release(&ref);
+ return ret;
+}
+
+struct reftable_ref_store {
+ struct ref_store base;
+
+ /*
+ * The main backend refers to the common dir and thus contains common
+ * refs as well as refs of the main repository.
+ */
+ struct reftable_backend main_backend;
+ /*
+ * The worktree backend refers to the gitdir in case the refdb is opened
+ * via a worktree. It thus contains the per-worktree refs.
+ */
+ struct reftable_backend worktree_backend;
+ /*
+ * Map of worktree backends by their respective worktree names. The map
+ * is populated lazily when we try to resolve `worktrees/$worktree` refs.
+ */
+ struct strmap worktree_backends;
+ struct reftable_write_options write_options;
+
+ unsigned int store_flags;
+ enum log_refs_config log_all_ref_updates;
+ int err;
+};
+
+/*
+ * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
+ * reftable_ref_store. required_flags is compared with ref_store's store_flags
+ * to ensure the ref_store has all required capabilities. "caller" is used in
+ * any necessary error messages.
+ */
+static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
+ unsigned int required_flags,
+ const char *caller)
+{
+ struct reftable_ref_store *refs;
+
+ if (ref_store->be != &refs_be_reftable)
+ BUG("ref_store is type \"%s\" not \"reftables\" in %s",
+ ref_store->be->name, caller);
+
+ refs = (struct reftable_ref_store *)ref_store;
+
+ if ((refs->store_flags & required_flags) != required_flags)
+ BUG("operation %s requires abilities 0x%x, but only have 0x%x",
+ caller, required_flags, refs->store_flags);
+
+ return refs;
+}
+
+/*
+ * Some refs are global to the repository (refs/heads/{*}), while others are
+ * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
+ * multiple separate databases (ie. multiple reftable/ directories), one for
+ * the shared refs, one for the current worktree refs, and one for each
+ * additional worktree. For reading, we merge the view of both the shared and
+ * the current worktree's refs, when necessary.
+ *
+ * This function also optionally assigns the rewritten reference name that is
+ * local to the stack. This translation is required when using worktree refs
+ * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
+ * those references in their normalized form.
+ */
+static int backend_for(struct reftable_backend **out,
+ struct reftable_ref_store *store,
+ const char *refname,
+ const char **rewritten_ref,
+ int reload)
+{
+ struct reftable_backend *be;
+ const char *wtname;
+ int wtname_len;
+
+ if (!refname) {
+ be = &store->main_backend;
+ goto out;
+ }
+
+ switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
+ case REF_WORKTREE_OTHER: {
+ static struct strbuf wtname_buf = STRBUF_INIT;
+ struct strbuf wt_dir = STRBUF_INIT;
+
+ /*
+ * We're using a static buffer here so that we don't need to
+ * allocate the worktree name whenever we look up a reference.
+ * This could be avoided if the strmap interface knew how to
+ * handle keys with a length.
+ */
+ strbuf_reset(&wtname_buf);
+ strbuf_add(&wtname_buf, wtname, wtname_len);
+
+ /*
+ * There is an edge case here: when the worktree references the
+ * current worktree, then we set up the stack once via
+ * `worktree_backends` and once via `worktree_backend`. This is
+ * wasteful, but in the reading case it shouldn't matter. And
+ * in the writing case we would notice that the stack is locked
+ * already and error out when trying to write a reference via
+ * both stacks.
+ */
+ be = strmap_get(&store->worktree_backends, wtname_buf.buf);
+ if (!be) {
+ strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
+ store->base.repo->commondir, wtname_buf.buf);
+
+ CALLOC_ARRAY(be, 1);
+ store->err = reftable_backend_init(be, wt_dir.buf,
+ &store->write_options);
+ assert(store->err != REFTABLE_API_ERROR);
+
+ strmap_put(&store->worktree_backends, wtname_buf.buf, be);
+ }
+
+ strbuf_release(&wt_dir);
+ goto out;
+ }
+ case REF_WORKTREE_CURRENT:
+ /*
+ * If there is no worktree stack then we're currently in the
+ * main worktree. We thus return the main stack in that case.
+ */
+ if (!store->worktree_backend.stack)
+ be = &store->main_backend;
+ else
+ be = &store->worktree_backend;
+ goto out;
+ case REF_WORKTREE_MAIN:
+ case REF_WORKTREE_SHARED:
+ be = &store->main_backend;
+ goto out;
+ default:
+ BUG("unhandled worktree reference type");
+ }
+
+out:
+ if (reload) {
+ int ret = reftable_stack_reload(be->stack);
+ if (ret)
+ return ret;
+ }
+ *out = be;
+
+ return 0;
+}
+
+static int should_write_log(struct reftable_ref_store *refs, const char *refname)
+{
+ enum log_refs_config log_refs_cfg = refs->log_all_ref_updates;
+ if (log_refs_cfg == LOG_REFS_UNSET)
+ log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
+
+ switch (log_refs_cfg) {
+ case LOG_REFS_NONE:
+ return refs_reflog_exists(&refs->base, refname);
+ case LOG_REFS_ALWAYS:
+ return 1;
+ case LOG_REFS_NORMAL:
+ if (should_autocreate_reflog(log_refs_cfg, refname))
+ return 1;
+ return refs_reflog_exists(&refs->base, refname);
+ default:
+ BUG("unhandled core.logAllRefUpdates value %d", log_refs_cfg);
+ }
+}
+
+static void fill_reftable_log_record(struct reftable_log_record *log, const struct ident_split *split)
+{
+ const char *tz_begin;
+ int sign = 1;
+
+ reftable_log_record_release(log);
+ log->value_type = REFTABLE_LOG_UPDATE;
+ log->value.update.name =
+ xstrndup(split->name_begin, split->name_end - split->name_begin);
+ log->value.update.email =
+ xstrndup(split->mail_begin, split->mail_end - split->mail_begin);
+ log->value.update.time = atol(split->date_begin);
+
+ tz_begin = split->tz_begin;
+ if (*tz_begin == '-') {
+ sign = -1;
+ tz_begin++;
+ }
+ if (*tz_begin == '+') {
+ sign = 1;
+ tz_begin++;
+ }
+
+ log->value.update.tz_offset = sign * atoi(tz_begin);
+}
+
+static int reftable_be_config(const char *var, const char *value,
+ const struct config_context *ctx,
+ void *_opts)
+{
+ struct reftable_write_options *opts = _opts;
+
+ if (!strcmp(var, "reftable.blocksize")) {
+ unsigned long block_size = git_config_ulong(var, value, ctx->kvi);
+ if (block_size > 16777215)
+ die("reftable block size cannot exceed 16MB");
+ opts->block_size = block_size;
+ } else if (!strcmp(var, "reftable.restartinterval")) {
+ unsigned long restart_interval = git_config_ulong(var, value, ctx->kvi);
+ if (restart_interval > UINT16_MAX)
+ die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX);
+ opts->restart_interval = restart_interval;
+ } else if (!strcmp(var, "reftable.indexobjects")) {
+ opts->skip_index_objects = !git_config_bool(var, value);
+ } else if (!strcmp(var, "reftable.geometricfactor")) {
+ unsigned long factor = git_config_ulong(var, value, ctx->kvi);
+ if (factor > UINT8_MAX)
+ die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX);
+ opts->auto_compaction_factor = factor;
+ } else if (!strcmp(var, "reftable.locktimeout")) {
+ int64_t lock_timeout = git_config_int64(var, value, ctx->kvi);
+ if (lock_timeout > LONG_MAX)
+ die("reftable lock timeout cannot exceed %"PRIdMAX, (intmax_t)LONG_MAX);
+ if (lock_timeout < 0 && lock_timeout != -1)
+ die("reftable lock timeout does not support negative values other than -1");
+ opts->lock_timeout_ms = lock_timeout;
+ }
+
+ return 0;
+}
+
+static int reftable_be_fsync(int fd)
+{
+ return fsync_component(FSYNC_COMPONENT_REFERENCE, fd);
+}
+
+static struct ref_store *reftable_be_init(struct repository *repo,
+ const char *gitdir,
+ unsigned int store_flags)
+{
+ struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
+ struct strbuf path = STRBUF_INIT;
+ int is_worktree;
+ mode_t mask;
+
+ mask = umask(0);
+ umask(mask);
+
+ base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
+ strmap_init(&refs->worktree_backends);
+ refs->store_flags = store_flags;
+ refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
+
+ switch (repo->hash_algo->format_id) {
+ case GIT_SHA1_FORMAT_ID:
+ refs->write_options.hash_id = REFTABLE_HASH_SHA1;
+ break;
+ case GIT_SHA256_FORMAT_ID:
+ refs->write_options.hash_id = REFTABLE_HASH_SHA256;
+ break;
+ default:
+ BUG("unknown hash algorithm %d", repo->hash_algo->format_id);
+ }
+ refs->write_options.default_permissions = calc_shared_perm(the_repository, 0666 & ~mask);
+ refs->write_options.disable_auto_compact =
+ !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
+ refs->write_options.lock_timeout_ms = 100;
+ refs->write_options.fsync = reftable_be_fsync;
+
+ git_config(reftable_be_config, &refs->write_options);
+
+ /*
+ * It is somewhat unfortunate that we have to mirror the default block
+ * size of the reftable library here. But given that the write options
+ * wouldn't be updated by the library here, and given that we require
+ * the proper block size to trim reflog message so that they fit, we
+ * must set up a proper value here.
+ */
+ if (!refs->write_options.block_size)
+ refs->write_options.block_size = 4096;
+
+ /*
+ * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
+ * This stack contains both the shared and the main worktree refs.
+ *
+ * Note that we don't try to resolve the path in case we have a
+ * worktree because `get_common_dir_noenv()` already does it for us.
+ */
+ is_worktree = get_common_dir_noenv(&path, gitdir);
+ if (!is_worktree) {
+ strbuf_reset(&path);
+ strbuf_realpath(&path, gitdir, 0);
+ }
+ strbuf_addstr(&path, "/reftable");
+ refs->err = reftable_backend_init(&refs->main_backend, path.buf,
+ &refs->write_options);
+ if (refs->err)
+ goto done;
+
+ /*
+ * If we're in a worktree we also need to set up the worktree reftable
+ * stack that is contained in the per-worktree GIT_DIR.
+ *
+ * Ideally, we would also add the stack to our worktree stack map. But
+ * we have no way to figure out the worktree name here and thus can't
+ * do it efficiently.
+ */
+ if (is_worktree) {
+ strbuf_reset(&path);
+ strbuf_addf(&path, "%s/reftable", gitdir);
+
+ refs->err = reftable_backend_init(&refs->worktree_backend, path.buf,
+ &refs->write_options);
+ if (refs->err)
+ goto done;
+ }
+
+ chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
+
+done:
+ assert(refs->err != REFTABLE_API_ERROR);
+ strbuf_release(&path);
+ return &refs->base;
+}
+
+static void reftable_be_release(struct ref_store *ref_store)
+{
+ struct reftable_ref_store *refs = reftable_be_downcast(ref_store, 0, "release");
+ struct strmap_entry *entry;
+ struct hashmap_iter iter;
+
+ if (refs->main_backend.stack)
+ reftable_backend_release(&refs->main_backend);
+ if (refs->worktree_backend.stack)
+ reftable_backend_release(&refs->worktree_backend);
+
+ strmap_for_each_entry(&refs->worktree_backends, &iter, entry) {
+ struct reftable_backend *be = entry->value;
+ reftable_backend_release(be);
+ free(be);
+ }
+ strmap_clear(&refs->worktree_backends, 0);
+}
+
+static int reftable_be_create_on_disk(struct ref_store *ref_store,
+ int flags UNUSED,
+ struct strbuf *err UNUSED)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "create");
+ struct strbuf sb = STRBUF_INIT;
+
+ strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
+ safe_create_dir(the_repository, sb.buf, 1);
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
+ write_file(sb.buf, "ref: refs/heads/.invalid");
+ adjust_shared_perm(the_repository, sb.buf);
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
+ safe_create_dir(the_repository, sb.buf, 1);
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
+ write_file(sb.buf, "this repository uses the reftable format");
+ adjust_shared_perm(the_repository, sb.buf);
+
+ strbuf_release(&sb);
+ return 0;
+}
+
+static int reftable_be_remove_on_disk(struct ref_store *ref_store,
+ struct strbuf *err)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "remove");
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+
+ /*
+ * Release the ref store such that all stacks are closed. This is
+ * required so that the "tables.list" file is not open anymore, which
+ * would otherwise make it impossible to remove the file on Windows.
+ */
+ reftable_be_release(ref_store);
+
+ strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
+ if (remove_dir_recursively(&sb, 0) < 0) {
+ strbuf_addf(err, "could not delete reftables: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
+ if (unlink(sb.buf) < 0) {
+ strbuf_addf(err, "could not delete stub HEAD: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
+ if (unlink(sb.buf) < 0) {
+ strbuf_addf(err, "could not delete stub heads: %s",
+ strerror(errno));
+ ret = -1;
+ }
+ strbuf_reset(&sb);
+
+ strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
+ if (rmdir(sb.buf) < 0) {
+ strbuf_addf(err, "could not delete refs directory: %s",
+ strerror(errno));
+ ret = -1;
+ }
+
+ strbuf_release(&sb);
+ return ret;
+}
+
+struct reftable_ref_iterator {
+ struct ref_iterator base;
+ struct reftable_ref_store *refs;
+ struct reftable_iterator iter;
+ struct reftable_ref_record ref;
+ struct object_id oid;
+
+ char *prefix;
+ size_t prefix_len;
+ char **exclude_patterns;
+ size_t exclude_patterns_index;
+ size_t exclude_patterns_strlen;
+ unsigned int flags;
+ int err;
+};
+
+/*
+ * Handle exclude patterns. Returns either `1`, which tells the caller that the
+ * current reference shall not be shown. Or `0`, which indicates that it should
+ * be shown.
+ */
+static int should_exclude_current_ref(struct reftable_ref_iterator *iter)
+{
+ while (iter->exclude_patterns[iter->exclude_patterns_index]) {
+ const char *pattern = iter->exclude_patterns[iter->exclude_patterns_index];
+ char *ref_after_pattern;
+ int cmp;
+
+ /*
+ * Lazily cache the pattern length so that we don't have to
+ * recompute it every time this function is called.
+ */
+ if (!iter->exclude_patterns_strlen)
+ iter->exclude_patterns_strlen = strlen(pattern);
+
+ /*
+ * When the reference name is lexicographically bigger than the
+ * current exclude pattern we know that it won't ever match any
+ * of the following references, either. We thus advance to the
+ * next pattern and re-check whether it matches.
+ *
+ * Otherwise, if it's smaller, then we do not have a match and
+ * thus want to show the current reference.
+ */
+ cmp = strncmp(iter->ref.refname, pattern,
+ iter->exclude_patterns_strlen);
+ if (cmp > 0) {
+ iter->exclude_patterns_index++;
+ iter->exclude_patterns_strlen = 0;
+ continue;
+ }
+ if (cmp < 0)
+ return 0;
+
+ /*
+ * The reference shares a prefix with the exclude pattern and
+ * shall thus be omitted. We skip all references that match the
+ * pattern by seeking to the first reference after the block of
+ * matches.
+ *
+ * This is done by appending the highest possible character to
+ * the pattern. Consequently, all references that have the
+ * pattern as prefix and whose suffix starts with anything in
+ * the range [0x00, 0xfe] are skipped. And given that 0xff is a
+ * non-printable character that shouldn't ever be in a ref name,
+ * we'd not yield any such record, either.
+ *
+ * Note that the seeked-to reference may also be excluded. This
+ * is not handled here though, but the caller is expected to
+ * loop and re-verify the next reference for us.
+ */
+ ref_after_pattern = xstrfmt("%s%c", pattern, 0xff);
+ iter->err = reftable_iterator_seek_ref(&iter->iter, ref_after_pattern);
+ iter->exclude_patterns_index++;
+ iter->exclude_patterns_strlen = 0;
+ trace2_counter_add(TRACE2_COUNTER_ID_REFTABLE_RESEEKS, 1);
+
+ free(ref_after_pattern);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
+{
+ struct reftable_ref_iterator *iter =
+ (struct reftable_ref_iterator *)ref_iterator;
+ struct reftable_ref_store *refs = iter->refs;
+ const char *referent = NULL;
+
+ while (!iter->err) {
+ int flags = 0;
+
+ iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
+ if (iter->err)
+ break;
+
+ /*
+ * The files backend only lists references contained in "refs/" unless
+ * the root refs are to be included. We emulate the same behaviour here.
+ */
+ if (!starts_with(iter->ref.refname, "refs/") &&
+ !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
+ is_root_ref(iter->ref.refname))) {
+ continue;
+ }
+
+ if (iter->prefix_len &&
+ strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
+ iter->err = 1;
+ break;
+ }
+
+ if (iter->exclude_patterns && should_exclude_current_ref(iter))
+ continue;
+
+ if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
+ parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
+ REF_WORKTREE_CURRENT)
+ continue;
+
+ switch (iter->ref.value_type) {
+ case REFTABLE_REF_VAL1:
+ oidread(&iter->oid, iter->ref.value.val1,
+ refs->base.repo->hash_algo);
+ break;
+ case REFTABLE_REF_VAL2:
+ oidread(&iter->oid, iter->ref.value.val2.value,
+ refs->base.repo->hash_algo);
+ break;
+ case REFTABLE_REF_SYMREF:
+ referent = refs_resolve_ref_unsafe(&iter->refs->base,
+ iter->ref.refname,
+ RESOLVE_REF_READING,
+ &iter->oid, &flags);
+ if (!referent)
+ oidclr(&iter->oid, refs->base.repo->hash_algo);
+ break;
+ default:
+ BUG("unhandled reference value type %d", iter->ref.value_type);
+ }
+
+ if (is_null_oid(&iter->oid))
+ flags |= REF_ISBROKEN;
+
+ if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(iter->ref.refname))
+ die(_("refname is dangerous: %s"), iter->ref.refname);
+ oidclr(&iter->oid, refs->base.repo->hash_algo);
+ flags |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+
+ if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
+ flags & REF_ISSYMREF &&
+ flags & REF_ISBROKEN)
+ continue;
+
+ if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
+ !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
+ &iter->oid, flags))
+ continue;
+
+ iter->base.refname = iter->ref.refname;
+ iter->base.referent = referent;
+ iter->base.oid = &iter->oid;
+ iter->base.flags = flags;
+
+ break;
+ }
+
+ if (iter->err > 0)
+ return ITER_DONE;
+ if (iter->err < 0)
+ return ITER_ERROR;
+ return ITER_OK;
+}
+
+static int reftable_ref_iterator_seek(struct ref_iterator *ref_iterator,
+ const char *prefix)
+{
+ struct reftable_ref_iterator *iter =
+ (struct reftable_ref_iterator *)ref_iterator;
+
+ free(iter->prefix);
+ iter->prefix = xstrdup_or_null(prefix);
+ iter->prefix_len = prefix ? strlen(prefix) : 0;
+ iter->err = reftable_iterator_seek_ref(&iter->iter, prefix);
+
+ return iter->err;
+}
+
+static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
+ struct object_id *peeled)
+{
+ struct reftable_ref_iterator *iter =
+ (struct reftable_ref_iterator *)ref_iterator;
+
+ if (iter->ref.value_type == REFTABLE_REF_VAL2) {
+ oidread(peeled, iter->ref.value.val2.target_value,
+ iter->refs->base.repo->hash_algo);
+ return 0;
+ }
+
+ return -1;
+}
+
+static void reftable_ref_iterator_release(struct ref_iterator *ref_iterator)
+{
+ struct reftable_ref_iterator *iter =
+ (struct reftable_ref_iterator *)ref_iterator;
+ reftable_ref_record_release(&iter->ref);
+ reftable_iterator_destroy(&iter->iter);
+ if (iter->exclude_patterns) {
+ for (size_t i = 0; iter->exclude_patterns[i]; i++)
+ free(iter->exclude_patterns[i]);
+ free(iter->exclude_patterns);
+ }
+ free(iter->prefix);
+}
+
+static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
+ .advance = reftable_ref_iterator_advance,
+ .seek = reftable_ref_iterator_seek,
+ .peel = reftable_ref_iterator_peel,
+ .release = reftable_ref_iterator_release,
+};
+
+static int qsort_strcmp(const void *va, const void *vb)
+{
+ const char *a = *(const char **)va;
+ const char *b = *(const char **)vb;
+ return strcmp(a, b);
+}
+
+static char **filter_exclude_patterns(const char **exclude_patterns)
+{
+ size_t filtered_size = 0, filtered_alloc = 0;
+ char **filtered = NULL;
+
+ if (!exclude_patterns)
+ return NULL;
+
+ for (size_t i = 0; ; i++) {
+ const char *exclude_pattern = exclude_patterns[i];
+ int has_glob = 0;
+
+ if (!exclude_pattern)
+ break;
+
+ for (const char *p = exclude_pattern; *p; p++) {
+ has_glob = is_glob_special(*p);
+ if (has_glob)
+ break;
+ }
+ if (has_glob)
+ continue;
+
+ ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc);
+ filtered[filtered_size++] = xstrdup(exclude_pattern);
+ }
+
+ if (filtered_size) {
+ QSORT(filtered, filtered_size, qsort_strcmp);
+ ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc);
+ filtered[filtered_size++] = NULL;
+ }
+
+ return filtered;
+}
+
+static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
+ struct reftable_stack *stack,
+ const char *prefix,
+ const char **exclude_patterns,
+ int flags)
+{
+ struct reftable_ref_iterator *iter;
+ int ret;
+
+ iter = xcalloc(1, sizeof(*iter));
+ base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
+ iter->base.oid = &iter->oid;
+ iter->flags = flags;
+ iter->refs = refs;
+ iter->exclude_patterns = filter_exclude_patterns(exclude_patterns);
+
+ ret = refs->err;
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_reload(stack);
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_init_ref_iterator(stack, &iter->iter);
+ if (ret)
+ goto done;
+
+ ret = reftable_ref_iterator_seek(&iter->base, prefix);
+ if (ret)
+ goto done;
+
+done:
+ iter->err = ret;
+ return iter;
+}
+
+static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
+ const char *prefix,
+ const char **exclude_patterns,
+ unsigned int flags)
+{
+ struct reftable_ref_iterator *main_iter, *worktree_iter;
+ struct reftable_ref_store *refs;
+ unsigned int required_flags = REF_STORE_READ;
+
+ if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
+ required_flags |= REF_STORE_ODB;
+ refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
+
+ main_iter = ref_iterator_for_stack(refs, refs->main_backend.stack, prefix,
+ exclude_patterns, flags);
+
+ /*
+ * The worktree stack is only set when we're in an actual worktree
+ * right now. If we aren't, then we return the common reftable
+ * iterator, only.
+ */
+ if (!refs->worktree_backend.stack)
+ return &main_iter->base;
+
+ /*
+ * Otherwise we merge both the common and the per-worktree refs into a
+ * single iterator.
+ */
+ worktree_iter = ref_iterator_for_stack(refs, refs->worktree_backend.stack, prefix,
+ exclude_patterns, flags);
+ return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
+ ref_iterator_select, NULL);
+}
+
+static int reftable_be_read_raw_ref(struct ref_store *ref_store,
+ const char *refname,
+ struct object_id *oid,
+ struct strbuf *referent,
+ unsigned int *type,
+ int *failure_errno)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
+ struct reftable_backend *be;
+ int ret;
+
+ if (refs->err < 0)
+ return refs->err;
+
+ ret = backend_for(&be, refs, refname, &refname, 1);
+ if (ret)
+ return ret;
+
+ ret = reftable_backend_read_ref(be, refname, oid, referent, type);
+ if (ret < 0)
+ return ret;
+ if (ret > 0) {
+ *failure_errno = ENOENT;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
+ const char *refname,
+ struct strbuf *referent)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
+ struct reftable_backend *be;
+ struct object_id oid;
+ unsigned int type = 0;
+ int ret;
+
+ ret = backend_for(&be, refs, refname, &refname, 1);
+ if (ret)
+ return ret;
+
+ ret = reftable_backend_read_ref(be, refname, &oid, referent, &type);
+ if (ret)
+ ret = -1;
+ else if (type == REF_ISSYMREF)
+ ; /* happy */
+ else
+ ret = NOT_A_SYMREF;
+ return ret;
+}
+
+struct reftable_transaction_update {
+ struct ref_update *update;
+ struct object_id current_oid;
+};
+
+struct write_transaction_table_arg {
+ struct reftable_ref_store *refs;
+ struct reftable_backend *be;
+ struct reftable_addition *addition;
+ struct reftable_transaction_update *updates;
+ size_t updates_nr;
+ size_t updates_alloc;
+ size_t updates_expected;
+ uint64_t max_index;
+};
+
+struct reftable_transaction_data {
+ struct write_transaction_table_arg *args;
+ size_t args_nr, args_alloc;
+};
+
+static void free_transaction_data(struct reftable_transaction_data *tx_data)
+{
+ if (!tx_data)
+ return;
+ for (size_t i = 0; i < tx_data->args_nr; i++) {
+ reftable_addition_destroy(tx_data->args[i].addition);
+ free(tx_data->args[i].updates);
+ }
+ free(tx_data->args);
+ free(tx_data);
+}
+
+/*
+ * Prepare transaction update for the given reference update. This will cause
+ * us to lock the corresponding reftable stack for concurrent modification.
+ */
+static int prepare_transaction_update(struct write_transaction_table_arg **out,
+ struct reftable_ref_store *refs,
+ struct reftable_transaction_data *tx_data,
+ struct ref_update *update,
+ struct strbuf *err)
+{
+ struct write_transaction_table_arg *arg = NULL;
+ struct reftable_backend *be;
+ size_t i;
+ int ret;
+
+ /*
+ * This function gets called in a loop, and we don't want to repeatedly
+ * reload the stack for every single ref update. Instead, we manually
+ * reload further down in the case where we haven't yet prepared the
+ * specific `reftable_backend`.
+ */
+ ret = backend_for(&be, refs, update->refname, NULL, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * Search for a preexisting stack update. If there is one then we add
+ * the update to it, otherwise we set up a new stack update.
+ */
+ for (i = 0; !arg && i < tx_data->args_nr; i++)
+ if (tx_data->args[i].be == be)
+ arg = &tx_data->args[i];
+
+ if (!arg) {
+ struct reftable_addition *addition;
+
+ ret = reftable_stack_reload(be->stack);
+ if (ret)
+ return ret;
+
+ ret = reftable_stack_new_addition(&addition, be->stack,
+ REFTABLE_STACK_NEW_ADDITION_RELOAD);
+ if (ret) {
+ if (ret == REFTABLE_LOCK_ERROR)
+ strbuf_addstr(err, "cannot lock references");
+ return ret;
+ }
+
+ ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
+ tx_data->args_alloc);
+ arg = &tx_data->args[tx_data->args_nr++];
+ arg->refs = refs;
+ arg->be = be;
+ arg->addition = addition;
+ arg->updates = NULL;
+ arg->updates_nr = 0;
+ arg->updates_alloc = 0;
+ arg->updates_expected = 0;
+ arg->max_index = 0;
+ }
+
+ arg->updates_expected++;
+
+ if (out)
+ *out = arg;
+
+ return 0;
+}
+
+/*
+ * Queue a reference update for the correct stack. We potentially need to
+ * handle multiple stack updates in a single transaction when it spans across
+ * multiple worktrees.
+ */
+static int queue_transaction_update(struct reftable_ref_store *refs,
+ struct reftable_transaction_data *tx_data,
+ struct ref_update *update,
+ struct object_id *current_oid,
+ struct strbuf *err)
+{
+ struct write_transaction_table_arg *arg = NULL;
+ int ret;
+
+ if (update->backend_data)
+ BUG("reference update queued more than once");
+
+ ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
+ if (ret < 0)
+ return ret;
+
+ ALLOC_GROW(arg->updates, arg->updates_nr + 1,
+ arg->updates_alloc);
+ arg->updates[arg->updates_nr].update = update;
+ oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
+ update->backend_data = &arg->updates[arg->updates_nr++];
+
+ return 0;
+}
+
+static int reftable_be_transaction_prepare(struct ref_store *ref_store,
+ struct ref_transaction *transaction,
+ struct strbuf *err)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
+ struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
+ struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
+ struct string_list refnames_to_check = STRING_LIST_INIT_NODUP;
+ struct reftable_transaction_data *tx_data = NULL;
+ struct reftable_backend *be;
+ struct object_id head_oid;
+ unsigned int head_type = 0;
+ size_t i;
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ tx_data = xcalloc(1, sizeof(*tx_data));
+
+ /*
+ * Preprocess all updates. For one we check that there are no duplicate
+ * reference updates in this transaction. Second, we lock all stacks
+ * that will be modified during the transaction.
+ */
+ for (i = 0; i < transaction->nr; i++) {
+ ret = prepare_transaction_update(NULL, refs, tx_data,
+ transaction->updates[i], err);
+ if (ret)
+ goto done;
+
+ if (!(transaction->updates[i]->flags & REF_LOG_ONLY))
+ string_list_append(&affected_refnames,
+ transaction->updates[i]->refname);
+ }
+
+ /*
+ * Now that we have counted updates per stack we can preallocate their
+ * arrays. This avoids having to reallocate many times.
+ */
+ for (i = 0; i < tx_data->args_nr; i++) {
+ CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
+ tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
+ }
+
+ /*
+ * Fail if a refname appears more than once in the transaction.
+ * This code is taken from the files backend and is a good candidate to
+ * be moved into the generic layer.
+ */
+ string_list_sort(&affected_refnames);
+ if (ref_update_reject_duplicates(&affected_refnames, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto done;
+ }
+
+ /*
+ * TODO: it's dubious whether we should reload the stack that "HEAD"
+ * belongs to or not. In theory, it may happen that we only modify
+ * stacks which are _not_ part of the "HEAD" stack. In that case we
+ * wouldn't have prepared any transaction for its stack and would not
+ * have reloaded it, which may mean that it is stale.
+ *
+ * On the other hand, reloading that stack without locking it feels
+ * wrong, too, as the value of "HEAD" could be modified concurrently at
+ * any point in time.
+ */
+ ret = backend_for(&be, refs, "HEAD", NULL, 0);
+ if (ret)
+ goto done;
+
+ ret = reftable_backend_read_ref(be, "HEAD", &head_oid,
+ &head_referent, &head_type);
+ if (ret < 0)
+ goto done;
+ ret = 0;
+
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *u = transaction->updates[i];
+ struct object_id current_oid = {0};
+ const char *rewritten_ref;
+
+ /*
+ * There is no need to reload the respective backends here as
+ * we have already reloaded them when preparing the transaction
+ * update. And given that the stacks have been locked there
+ * shouldn't have been any concurrent modifications of the
+ * stack.
+ */
+ ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0);
+ if (ret)
+ goto done;
+
+ /* Verify that the new object ID is valid. */
+ if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
+ !(u->flags & REF_SKIP_OID_VERIFICATION) &&
+ !(u->flags & REF_LOG_ONLY)) {
+ struct object *o = parse_object(refs->base.repo, &u->new_oid);
+ if (!o) {
+ strbuf_addf(err,
+ _("trying to write ref '%s' with nonexistent object %s"),
+ u->refname, oid_to_hex(&u->new_oid));
+ ret = -1;
+ goto done;
+ }
+
+ if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
+ strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
+ oid_to_hex(&u->new_oid), u->refname);
+ ret = -1;
+ goto done;
+ }
+ }
+
+ /*
+ * When we update the reference that HEAD points to we enqueue
+ * a second log-only update for HEAD so that its reflog is
+ * updated accordingly.
+ */
+ if (head_type == REF_ISSYMREF &&
+ !(u->flags & REF_LOG_ONLY) &&
+ !(u->flags & REF_UPDATE_VIA_HEAD) &&
+ !strcmp(rewritten_ref, head_referent.buf)) {
+ struct ref_update *new_update;
+
+ /*
+ * First make sure that HEAD is not already in the
+ * transaction. This check is O(lg N) in the transaction
+ * size, but it happens at most once per transaction.
+ */
+ if (string_list_has_string(&affected_refnames, "HEAD")) {
+ /* An entry already existed */
+ strbuf_addf(err,
+ _("multiple updates for 'HEAD' (including one "
+ "via its referent '%s') are not allowed"),
+ u->refname);
+ ret = TRANSACTION_NAME_CONFLICT;
+ goto done;
+ }
+
+ new_update = ref_transaction_add_update(
+ transaction, "HEAD",
+ u->flags | REF_LOG_ONLY | REF_NO_DEREF,
+ &u->new_oid, &u->old_oid, NULL, NULL, NULL,
+ u->msg);
+ string_list_insert(&affected_refnames, new_update->refname);
+ }
+
+ ret = reftable_backend_read_ref(be, rewritten_ref,
+ &current_oid, &referent, &u->type);
+ if (ret < 0)
+ goto done;
+ if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
+ /*
+ * The reference does not exist, and we either have no
+ * old object ID or expect the reference to not exist.
+ * We can thus skip below safety checks as well as the
+ * symref splitting. But we do want to verify that
+ * there is no conflicting reference here so that we
+ * can output a proper error message instead of failing
+ * at a later point.
+ */
+ string_list_append(&refnames_to_check, u->refname);
+
+ /*
+ * There is no need to write the reference deletion
+ * when the reference in question doesn't exist.
+ */
+ if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
+ ret = queue_transaction_update(refs, tx_data, u,
+ &current_oid, err);
+ if (ret)
+ goto done;
+ }
+
+ continue;
+ }
+ if (ret > 0) {
+ /* The reference does not exist, but we expected it to. */
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "unable to resolve reference '%s'"),
+ ref_update_original_update_refname(u), u->refname);
+ ret = -1;
+ goto done;
+ }
+
+ if (u->type & REF_ISSYMREF) {
+ /*
+ * The reftable stack is locked at this point already,
+ * so it is safe to call `refs_resolve_ref_unsafe()`
+ * here without causing races.
+ */
+ const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
+ &current_oid, NULL);
+
+ if (u->flags & REF_NO_DEREF) {
+ if (u->flags & REF_HAVE_OLD && !resolved) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "error reading reference"), u->refname);
+ ret = -1;
+ goto done;
+ }
+ } else {
+ struct ref_update *new_update;
+ int new_flags;
+
+ new_flags = u->flags;
+ if (!strcmp(rewritten_ref, "HEAD"))
+ new_flags |= REF_UPDATE_VIA_HEAD;
+
+ /*
+ * If we are updating a symref (eg. HEAD), we should also
+ * update the branch that the symref points to.
+ *
+ * This is generic functionality, and would be better
+ * done in refs.c, but the current implementation is
+ * intertwined with the locking in files-backend.c.
+ */
+ new_update = ref_transaction_add_update(
+ transaction, referent.buf, new_flags,
+ u->new_target ? NULL : &u->new_oid,
+ u->old_target ? NULL : &u->old_oid,
+ u->new_target, u->old_target,
+ u->committer_info, u->msg);
+
+ new_update->parent_update = u;
+
+ /*
+ * Change the symbolic ref update to log only. Also, it
+ * doesn't need to check its old OID value, as that will be
+ * done when new_update is processed.
+ */
+ u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
+ u->flags &= ~REF_HAVE_OLD;
+
+ if (string_list_has_string(&affected_refnames, new_update->refname)) {
+ strbuf_addf(err,
+ _("multiple updates for '%s' (including one "
+ "via symref '%s') are not allowed"),
+ referent.buf, u->refname);
+ ret = TRANSACTION_NAME_CONFLICT;
+ goto done;
+ }
+ string_list_insert(&affected_refnames, new_update->refname);
+ }
+ }
+
+ /*
+ * Verify that the old object matches our expectations. Note
+ * that the error messages here do not make a lot of sense in
+ * the context of the reftable backend as we never lock
+ * individual refs. But the error messages match what the files
+ * backend returns, which keeps our tests happy.
+ */
+ if (u->old_target) {
+ if (!(u->type & REF_ISSYMREF)) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "expected symref with target '%s': "
+ "but is a regular ref"),
+ ref_update_original_update_refname(u),
+ u->old_target);
+ ret = -1;
+ goto done;
+ }
+
+ if (ref_update_check_old_target(referent.buf, u, err)) {
+ ret = -1;
+ goto done;
+ }
+ } else if ((u->flags & REF_HAVE_OLD) && !oideq(&current_oid, &u->old_oid)) {
+ ret = TRANSACTION_NAME_CONFLICT;
+ if (is_null_oid(&u->old_oid)) {
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "reference already exists"),
+ ref_update_original_update_refname(u));
+ ret = TRANSACTION_CREATE_EXISTS;
+ }
+ else if (is_null_oid(&current_oid))
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "reference is missing but expected %s"),
+ ref_update_original_update_refname(u),
+ oid_to_hex(&u->old_oid));
+ else
+ strbuf_addf(err, _("cannot lock ref '%s': "
+ "is at %s but expected %s"),
+ ref_update_original_update_refname(u),
+ oid_to_hex(&current_oid),
+ oid_to_hex(&u->old_oid));
+ goto done;
+ }
+
+ /*
+ * If all of the following conditions are true:
+ *
+ * - We're not about to write a symref.
+ * - We're not about to write a log-only entry.
+ * - Old and new object ID are different.
+ *
+ * Then we're essentially doing a no-op update that can be
+ * skipped. This is not only for the sake of efficiency, but
+ * also skips writing unneeded reflog entries.
+ */
+ if ((u->type & REF_ISSYMREF) ||
+ (u->flags & REF_LOG_ONLY) ||
+ (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
+ ret = queue_transaction_update(refs, tx_data, u,
+ &current_oid, err);
+ if (ret)
+ goto done;
+ }
+ }
+
+ ret = refs_verify_refnames_available(ref_store, &refnames_to_check, &affected_refnames, NULL,
+ transaction->flags & REF_TRANSACTION_FLAG_INITIAL,
+ err);
+ if (ret < 0)
+ goto done;
+
+ transaction->backend_data = tx_data;
+ transaction->state = REF_TRANSACTION_PREPARED;
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ if (ret < 0) {
+ free_transaction_data(tx_data);
+ transaction->state = REF_TRANSACTION_CLOSED;
+ if (!err->len)
+ strbuf_addf(err, _("reftable: transaction prepare: %s"),
+ reftable_error_str(ret));
+ }
+ string_list_clear(&affected_refnames, 0);
+ strbuf_release(&referent);
+ strbuf_release(&head_referent);
+ string_list_clear(&refnames_to_check, 0);
+
+ return ret;
+}
+
+static int reftable_be_transaction_abort(struct ref_store *ref_store UNUSED,
+ struct ref_transaction *transaction,
+ struct strbuf *err UNUSED)
+{
+ struct reftable_transaction_data *tx_data = transaction->backend_data;
+ free_transaction_data(tx_data);
+ transaction->state = REF_TRANSACTION_CLOSED;
+ return 0;
+}
+
+static int transaction_update_cmp(const void *a, const void *b)
+{
+ struct reftable_transaction_update *update_a = (struct reftable_transaction_update *)a;
+ struct reftable_transaction_update *update_b = (struct reftable_transaction_update *)b;
+
+ /*
+ * If there is an index set, it should take preference (default is 0).
+ * This ensures that updates with indexes are sorted amongst themselves.
+ */
+ if (update_a->update->index || update_b->update->index)
+ return update_a->update->index - update_b->update->index;
+
+ return strcmp(update_a->update->refname, update_b->update->refname);
+}
+
+static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
+{
+ struct write_transaction_table_arg *arg = cb_data;
+ uint64_t ts = reftable_stack_next_update_index(arg->be->stack);
+ struct reftable_log_record *logs = NULL;
+ struct ident_split committer_ident = {0};
+ size_t logs_nr = 0, logs_alloc = 0, i;
+ const char *committer_info;
+ int ret = 0;
+
+ committer_info = git_committer_info(0);
+ if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
+ BUG("failed splitting committer info");
+
+ QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
+
+ /*
+ * During reflog migration, we add indexes for a single reflog with
+ * multiple entries. Each entry will contain a different update_index,
+ * so set the limits accordingly.
+ */
+ ret = reftable_writer_set_limits(writer, ts, ts + arg->max_index);
+ if (ret < 0)
+ goto done;
+
+ for (i = 0; i < arg->updates_nr; i++) {
+ struct reftable_transaction_update *tx_update = &arg->updates[i];
+ struct ref_update *u = tx_update->update;
+
+ /*
+ * Write a reflog entry when updating a ref to point to
+ * something new in either of the following cases:
+ *
+ * - The reference is about to be deleted. We always want to
+ * delete the reflog in that case.
+ * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
+ * the reflog entry.
+ * - `core.logAllRefUpdates` tells us to create the reflog for
+ * the given ref.
+ */
+ if ((u->flags & REF_HAVE_NEW) &&
+ !(u->type & REF_ISSYMREF) &&
+ ref_update_has_null_new_value(u)) {
+ struct reftable_log_record log = {0};
+ struct reftable_iterator it = {0};
+
+ ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * When deleting refs we also delete all reflog entries
+ * with them. While it is not strictly required to
+ * delete reflogs together with their refs, this
+ * matches the behaviour of the files backend.
+ *
+ * Unfortunately, we have no better way than to delete
+ * all reflog entries one by one.
+ */
+ ret = reftable_iterator_seek_log(&it, u->refname);
+ while (ret == 0) {
+ struct reftable_log_record *tombstone;
+
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ break;
+ if (ret > 0 || strcmp(log.refname, u->refname)) {
+ ret = 0;
+ break;
+ }
+
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ tombstone = &logs[logs_nr++];
+ tombstone->refname = xstrdup(u->refname);
+ tombstone->value_type = REFTABLE_LOG_DELETION;
+ tombstone->update_index = log.update_index;
+ }
+
+ reftable_log_record_release(&log);
+ reftable_iterator_destroy(&it);
+
+ if (ret)
+ goto done;
+ } else if (!(u->flags & REF_SKIP_CREATE_REFLOG) &&
+ (u->flags & REF_HAVE_NEW) &&
+ (u->flags & REF_FORCE_CREATE_REFLOG ||
+ should_write_log(arg->refs, u->refname))) {
+ struct reftable_log_record *log;
+ int create_reflog = 1;
+
+ if (u->new_target) {
+ if (!refs_resolve_ref_unsafe(&arg->refs->base, u->new_target,
+ RESOLVE_REF_READING, &u->new_oid, NULL)) {
+ /*
+ * TODO: currently we skip creating reflogs for dangling
+ * symref updates. It would be nice to capture this as
+ * zero oid updates however.
+ */
+ create_reflog = 0;
+ }
+ }
+
+ if (create_reflog) {
+ struct ident_split c;
+
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ log = &logs[logs_nr++];
+ memset(log, 0, sizeof(*log));
+
+ if (u->committer_info) {
+ if (split_ident_line(&c, u->committer_info,
+ strlen(u->committer_info)))
+ BUG("failed splitting committer info");
+ } else {
+ c = committer_ident;
+ }
+
+ fill_reftable_log_record(log, &c);
+
+ /*
+ * Updates are sorted by the writer. So updates for the same
+ * refname need to contain different update indices.
+ */
+ log->update_index = ts + u->index;
+
+ log->refname = xstrdup(u->refname);
+ memcpy(log->value.update.new_hash,
+ u->new_oid.hash, GIT_MAX_RAWSZ);
+ memcpy(log->value.update.old_hash,
+ tx_update->current_oid.hash, GIT_MAX_RAWSZ);
+ log->value.update.message =
+ xstrndup(u->msg, arg->refs->write_options.block_size / 2);
+ }
+ }
+
+ if (u->flags & REF_LOG_ONLY)
+ continue;
+
+ if (u->new_target) {
+ struct reftable_ref_record ref = {
+ .refname = (char *)u->refname,
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = (char *)u->new_target,
+ .update_index = ts,
+ };
+
+ ret = reftable_writer_add_ref(writer, &ref);
+ if (ret < 0)
+ goto done;
+ } else if ((u->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(u)) {
+ struct reftable_ref_record ref = {
+ .refname = (char *)u->refname,
+ .update_index = ts,
+ .value_type = REFTABLE_REF_DELETION,
+ };
+
+ ret = reftable_writer_add_ref(writer, &ref);
+ if (ret < 0)
+ goto done;
+ } else if (u->flags & REF_HAVE_NEW) {
+ struct reftable_ref_record ref = {0};
+ struct object_id peeled;
+ int peel_error;
+
+ ref.refname = (char *)u->refname;
+ ref.update_index = ts;
+
+ peel_error = peel_object(arg->refs->base.repo, &u->new_oid, &peeled);
+ if (!peel_error) {
+ ref.value_type = REFTABLE_REF_VAL2;
+ memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
+ memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
+ } else if (!is_null_oid(&u->new_oid)) {
+ ref.value_type = REFTABLE_REF_VAL1;
+ memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
+ }
+
+ ret = reftable_writer_add_ref(writer, &ref);
+ if (ret < 0)
+ goto done;
+ }
+ }
+
+ /*
+ * Logs are written at the end so that we do not have intermixed ref
+ * and log blocks.
+ */
+ if (logs) {
+ ret = reftable_writer_add_logs(writer, logs, logs_nr);
+ if (ret < 0)
+ goto done;
+ }
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ for (i = 0; i < logs_nr; i++)
+ reftable_log_record_release(&logs[i]);
+ free(logs);
+ return ret;
+}
+
+static int reftable_be_transaction_finish(struct ref_store *ref_store UNUSED,
+ struct ref_transaction *transaction,
+ struct strbuf *err)
+{
+ struct reftable_transaction_data *tx_data = transaction->backend_data;
+ int ret = 0;
+
+ for (size_t i = 0; i < tx_data->args_nr; i++) {
+ tx_data->args[i].max_index = transaction->max_index;
+
+ ret = reftable_addition_add(tx_data->args[i].addition,
+ write_transaction_table, &tx_data->args[i]);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_addition_commit(tx_data->args[i].addition);
+ if (ret < 0)
+ goto done;
+ }
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ free_transaction_data(tx_data);
+ transaction->state = REF_TRANSACTION_CLOSED;
+
+ if (ret) {
+ strbuf_addf(err, _("reftable: transaction failure: %s"),
+ reftable_error_str(ret));
+ return -1;
+ }
+ return ret;
+}
+
+static int reftable_be_pack_refs(struct ref_store *ref_store,
+ struct pack_refs_opts *opts)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
+ struct reftable_stack *stack;
+ int ret;
+
+ if (refs->err)
+ return refs->err;
+
+ stack = refs->worktree_backend.stack;
+ if (!stack)
+ stack = refs->main_backend.stack;
+
+ if (opts->flags & PACK_REFS_AUTO)
+ ret = reftable_stack_auto_compact(stack);
+ else
+ ret = reftable_stack_compact_all(stack, NULL);
+ if (ret < 0) {
+ ret = error(_("unable to compact stack: %s"),
+ reftable_error_str(ret));
+ goto out;
+ }
+
+ ret = reftable_stack_clean(stack);
+ if (ret)
+ goto out;
+
+out:
+ return ret;
+}
+
+struct write_create_symref_arg {
+ struct reftable_ref_store *refs;
+ struct reftable_stack *stack;
+ struct strbuf *err;
+ const char *refname;
+ const char *target;
+ const char *logmsg;
+};
+
+struct write_copy_arg {
+ struct reftable_ref_store *refs;
+ struct reftable_backend *be;
+ const char *oldname;
+ const char *newname;
+ const char *logmsg;
+ int delete_old;
+};
+
+static int write_copy_table(struct reftable_writer *writer, void *cb_data)
+{
+ struct write_copy_arg *arg = cb_data;
+ uint64_t deletion_ts, creation_ts;
+ struct reftable_ref_record old_ref = {0}, refs[2] = {0};
+ struct reftable_log_record old_log = {0}, *logs = NULL;
+ struct reftable_iterator it = {0};
+ struct string_list skip = STRING_LIST_INIT_NODUP;
+ struct ident_split committer_ident = {0};
+ struct strbuf errbuf = STRBUF_INIT;
+ size_t logs_nr = 0, logs_alloc = 0, i;
+ const char *committer_info;
+ int ret;
+
+ committer_info = git_committer_info(0);
+ if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
+ BUG("failed splitting committer info");
+
+ if (reftable_stack_read_ref(arg->be->stack, arg->oldname, &old_ref)) {
+ ret = error(_("refname %s not found"), arg->oldname);
+ goto done;
+ }
+ if (old_ref.value_type == REFTABLE_REF_SYMREF) {
+ ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
+ arg->oldname);
+ goto done;
+ }
+
+ /*
+ * There's nothing to do in case the old and new name are the same, so
+ * we exit early in that case.
+ */
+ if (!strcmp(arg->oldname, arg->newname)) {
+ ret = 0;
+ goto done;
+ }
+
+ /*
+ * Verify that the new refname is available.
+ */
+ if (arg->delete_old)
+ string_list_insert(&skip, arg->oldname);
+ ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
+ NULL, &skip, 0, &errbuf);
+ if (ret < 0) {
+ error("%s", errbuf.buf);
+ goto done;
+ }
+
+ /*
+ * When deleting the old reference we have to use two update indices:
+ * once to delete the old ref and its reflog, and once to create the
+ * new ref and its reflog. They need to be staged with two separate
+ * indices because the new reflog needs to encode both the deletion of
+ * the old branch and the creation of the new branch, and we cannot do
+ * two changes to a reflog in a single update.
+ */
+ deletion_ts = creation_ts = reftable_stack_next_update_index(arg->be->stack);
+ if (arg->delete_old)
+ creation_ts++;
+ ret = reftable_writer_set_limits(writer, deletion_ts, creation_ts);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * Add the new reference. If this is a rename then we also delete the
+ * old reference.
+ */
+ refs[0] = old_ref;
+ refs[0].refname = xstrdup(arg->newname);
+ refs[0].update_index = creation_ts;
+ if (arg->delete_old) {
+ refs[1].refname = xstrdup(arg->oldname);
+ refs[1].value_type = REFTABLE_REF_DELETION;
+ refs[1].update_index = deletion_ts;
+ }
+ ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * When deleting the old branch we need to create a reflog entry on the
+ * new branch name that indicates that the old branch has been deleted
+ * and then recreated. This is a tad weird, but matches what the files
+ * backend does.
+ */
+ if (arg->delete_old) {
+ struct strbuf head_referent = STRBUF_INIT;
+ struct object_id head_oid;
+ int append_head_reflog;
+ unsigned head_type = 0;
+
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
+ fill_reftable_log_record(&logs[logs_nr], &committer_ident);
+ logs[logs_nr].refname = xstrdup(arg->newname);
+ logs[logs_nr].update_index = deletion_ts;
+ logs[logs_nr].value.update.message =
+ xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
+ memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
+ logs_nr++;
+
+ ret = reftable_backend_read_ref(arg->be, "HEAD", &head_oid,
+ &head_referent, &head_type);
+ if (ret < 0)
+ goto done;
+ append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
+ strbuf_release(&head_referent);
+
+ /*
+ * The files backend uses `refs_delete_ref()` to delete the old
+ * branch name, which will append a reflog entry for HEAD in
+ * case it points to the old branch.
+ */
+ if (append_head_reflog) {
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ logs[logs_nr] = logs[logs_nr - 1];
+ logs[logs_nr].refname = xstrdup("HEAD");
+ logs[logs_nr].value.update.name =
+ xstrdup(logs[logs_nr].value.update.name);
+ logs[logs_nr].value.update.email =
+ xstrdup(logs[logs_nr].value.update.email);
+ logs[logs_nr].value.update.message =
+ xstrdup(logs[logs_nr].value.update.message);
+ logs_nr++;
+ }
+ }
+
+ /*
+ * Create the reflog entry for the newly created branch.
+ */
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
+ fill_reftable_log_record(&logs[logs_nr], &committer_ident);
+ logs[logs_nr].refname = xstrdup(arg->newname);
+ logs[logs_nr].update_index = creation_ts;
+ logs[logs_nr].value.update.message =
+ xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
+ memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
+ logs_nr++;
+
+ /*
+ * In addition to writing the reflog entry for the new branch, we also
+ * copy over all log entries from the old reflog. Last but not least,
+ * when renaming we also have to delete all the old reflog entries.
+ */
+ ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_iterator_seek_log(&it, arg->oldname);
+ if (ret < 0)
+ goto done;
+
+ while (1) {
+ ret = reftable_iterator_next_log(&it, &old_log);
+ if (ret < 0)
+ goto done;
+ if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
+ ret = 0;
+ break;
+ }
+
+ free(old_log.refname);
+
+ /*
+ * Copy over the old reflog entry with the new refname.
+ */
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ logs[logs_nr] = old_log;
+ logs[logs_nr].refname = xstrdup(arg->newname);
+ logs_nr++;
+
+ /*
+ * Delete the old reflog entry in case we are renaming.
+ */
+ if (arg->delete_old) {
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
+ logs[logs_nr].refname = xstrdup(arg->oldname);
+ logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
+ logs[logs_nr].update_index = old_log.update_index;
+ logs_nr++;
+ }
+
+ /*
+ * Transfer ownership of the log record we're iterating over to
+ * the array of log records. Otherwise, the pointers would get
+ * free'd or reallocated by the iterator.
+ */
+ memset(&old_log, 0, sizeof(old_log));
+ }
+
+ ret = reftable_writer_add_logs(writer, logs, logs_nr);
+ if (ret < 0)
+ goto done;
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ reftable_iterator_destroy(&it);
+ string_list_clear(&skip, 0);
+ strbuf_release(&errbuf);
+ for (i = 0; i < logs_nr; i++)
+ reftable_log_record_release(&logs[i]);
+ free(logs);
+ for (i = 0; i < ARRAY_SIZE(refs); i++)
+ reftable_ref_record_release(&refs[i]);
+ reftable_ref_record_release(&old_ref);
+ reftable_log_record_release(&old_log);
+ return ret;
+}
+
+static int reftable_be_rename_ref(struct ref_store *ref_store,
+ const char *oldrefname,
+ const char *newrefname,
+ const char *logmsg)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
+ struct write_copy_arg arg = {
+ .refs = refs,
+ .oldname = oldrefname,
+ .newname = newrefname,
+ .logmsg = logmsg,
+ .delete_old = 1,
+ };
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
+ if (ret)
+ goto done;
+ ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ return ret;
+}
+
+static int reftable_be_copy_ref(struct ref_store *ref_store,
+ const char *oldrefname,
+ const char *newrefname,
+ const char *logmsg)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
+ struct write_copy_arg arg = {
+ .refs = refs,
+ .oldname = oldrefname,
+ .newname = newrefname,
+ .logmsg = logmsg,
+ };
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
+ if (ret)
+ goto done;
+ ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ return ret;
+}
+
+struct reftable_reflog_iterator {
+ struct ref_iterator base;
+ struct reftable_ref_store *refs;
+ struct reftable_iterator iter;
+ struct reftable_log_record log;
+ struct strbuf last_name;
+ int err;
+};
+
+static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
+{
+ struct reftable_reflog_iterator *iter =
+ (struct reftable_reflog_iterator *)ref_iterator;
+
+ while (!iter->err) {
+ iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
+ if (iter->err)
+ break;
+
+ /*
+ * We want the refnames that we have reflogs for, so we skip if
+ * we've already produced this name. This could be faster by
+ * seeking directly to reflog@update_index==0.
+ */
+ if (!strcmp(iter->log.refname, iter->last_name.buf))
+ continue;
+
+ if (check_refname_format(iter->log.refname,
+ REFNAME_ALLOW_ONELEVEL))
+ continue;
+
+ strbuf_reset(&iter->last_name);
+ strbuf_addstr(&iter->last_name, iter->log.refname);
+ iter->base.refname = iter->log.refname;
+
+ break;
+ }
+
+ if (iter->err > 0)
+ return ITER_DONE;
+ if (iter->err < 0)
+ return ITER_ERROR;
+ return ITER_OK;
+}
+
+static int reftable_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED,
+ const char *prefix UNUSED)
+{
+ BUG("reftable reflog iterator cannot be seeked");
+ return -1;
+}
+
+static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
+ struct object_id *peeled UNUSED)
+{
+ BUG("reftable reflog iterator cannot be peeled");
+ return -1;
+}
+
+static void reftable_reflog_iterator_release(struct ref_iterator *ref_iterator)
+{
+ struct reftable_reflog_iterator *iter =
+ (struct reftable_reflog_iterator *)ref_iterator;
+ reftable_log_record_release(&iter->log);
+ reftable_iterator_destroy(&iter->iter);
+ strbuf_release(&iter->last_name);
+}
+
+static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
+ .advance = reftable_reflog_iterator_advance,
+ .seek = reftable_reflog_iterator_seek,
+ .peel = reftable_reflog_iterator_peel,
+ .release = reftable_reflog_iterator_release,
+};
+
+static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
+ struct reftable_stack *stack)
+{
+ struct reftable_reflog_iterator *iter;
+ int ret;
+
+ iter = xcalloc(1, sizeof(*iter));
+ base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
+ strbuf_init(&iter->last_name, 0);
+ iter->refs = refs;
+
+ ret = refs->err;
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_reload(stack);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(stack, &iter->iter);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_iterator_seek_log(&iter->iter, "");
+ if (ret < 0)
+ goto done;
+
+done:
+ iter->err = ret;
+ return iter;
+}
+
+static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
+ struct reftable_reflog_iterator *main_iter, *worktree_iter;
+
+ main_iter = reflog_iterator_for_stack(refs, refs->main_backend.stack);
+ if (!refs->worktree_backend.stack)
+ return &main_iter->base;
+
+ worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_backend.stack);
+
+ return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
+ ref_iterator_select, NULL);
+}
+
+static int yield_log_record(struct reftable_ref_store *refs,
+ struct reftable_log_record *log,
+ each_reflog_ent_fn fn,
+ void *cb_data)
+{
+ struct object_id old_oid, new_oid;
+ const char *full_committer;
+
+ oidread(&old_oid, log->value.update.old_hash, refs->base.repo->hash_algo);
+ oidread(&new_oid, log->value.update.new_hash, refs->base.repo->hash_algo);
+
+ /*
+ * When both the old object ID and the new object ID are null
+ * then this is the reflog existence marker. The caller must
+ * not be aware of it.
+ */
+ if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
+ return 0;
+
+ full_committer = fmt_ident(log->value.update.name, log->value.update.email,
+ WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
+ return fn(&old_oid, &new_oid, full_committer,
+ log->value.update.time, log->value.update.tz_offset,
+ log->value.update.message, cb_data);
+}
+
+static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
+ const char *refname,
+ each_reflog_ent_fn fn,
+ void *cb_data)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
+ struct reftable_log_record log = {0};
+ struct reftable_iterator it = {0};
+ struct reftable_backend *be;
+ int ret;
+
+ if (refs->err < 0)
+ return refs->err;
+
+ /*
+ * TODO: we should adapt this callsite to reload the stack. There is no
+ * obvious reason why we shouldn't.
+ */
+ ret = backend_for(&be, refs, refname, &refname, 0);
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_iterator_seek_log(&it, refname);
+ while (!ret) {
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ break;
+ if (ret > 0 || strcmp(log.refname, refname)) {
+ ret = 0;
+ break;
+ }
+
+ ret = yield_log_record(refs, &log, fn, cb_data);
+ if (ret)
+ break;
+ }
+
+done:
+ reftable_log_record_release(&log);
+ reftable_iterator_destroy(&it);
+ return ret;
+}
+
+static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
+ const char *refname,
+ each_reflog_ent_fn fn,
+ void *cb_data)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
+ struct reftable_log_record *logs = NULL;
+ struct reftable_iterator it = {0};
+ struct reftable_backend *be;
+ size_t logs_alloc = 0, logs_nr = 0, i;
+ int ret;
+
+ if (refs->err < 0)
+ return refs->err;
+
+ /*
+ * TODO: we should adapt this callsite to reload the stack. There is no
+ * obvious reason why we shouldn't.
+ */
+ ret = backend_for(&be, refs, refname, &refname, 0);
+ if (ret)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_iterator_seek_log(&it, refname);
+ while (!ret) {
+ struct reftable_log_record log = {0};
+
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ goto done;
+ if (ret > 0 || strcmp(log.refname, refname)) {
+ reftable_log_record_release(&log);
+ ret = 0;
+ break;
+ }
+
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ logs[logs_nr++] = log;
+ }
+
+ for (i = logs_nr; i--;) {
+ ret = yield_log_record(refs, &logs[i], fn, cb_data);
+ if (ret)
+ goto done;
+ }
+
+done:
+ reftable_iterator_destroy(&it);
+ for (i = 0; i < logs_nr; i++)
+ reftable_log_record_release(&logs[i]);
+ free(logs);
+ return ret;
+}
+
+static int reftable_be_reflog_exists(struct ref_store *ref_store,
+ const char *refname)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
+ struct reftable_log_record log = {0};
+ struct reftable_iterator it = {0};
+ struct reftable_backend *be;
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ ret = backend_for(&be, refs, refname, &refname, 1);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_iterator_seek_log(&it, refname);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * Check whether we get at least one log record for the given ref name.
+ * If so, the reflog exists, otherwise it doesn't.
+ */
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ goto done;
+ if (ret > 0) {
+ ret = 0;
+ goto done;
+ }
+
+ ret = strcmp(log.refname, refname) == 0;
+
+done:
+ reftable_iterator_destroy(&it);
+ reftable_log_record_release(&log);
+ if (ret < 0)
+ ret = 0;
+ return ret;
+}
+
+struct write_reflog_existence_arg {
+ struct reftable_ref_store *refs;
+ const char *refname;
+ struct reftable_stack *stack;
+};
+
+static int write_reflog_existence_table(struct reftable_writer *writer,
+ void *cb_data)
+{
+ struct write_reflog_existence_arg *arg = cb_data;
+ uint64_t ts = reftable_stack_next_update_index(arg->stack);
+ struct reftable_log_record log = {0};
+ int ret;
+
+ ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
+ if (ret <= 0)
+ goto done;
+
+ ret = reftable_writer_set_limits(writer, ts, ts);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * The existence entry has both old and new object ID set to the
+ * null object ID. Our iterators are aware of this and will not present
+ * them to their callers.
+ */
+ log.refname = xstrdup(arg->refname);
+ log.update_index = ts;
+ log.value_type = REFTABLE_LOG_UPDATE;
+ ret = reftable_writer_add_log(writer, &log);
+
+done:
+ assert(ret != REFTABLE_API_ERROR);
+ reftable_log_record_release(&log);
+ return ret;
+}
+
+static int reftable_be_create_reflog(struct ref_store *ref_store,
+ const char *refname,
+ struct strbuf *errmsg UNUSED)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
+ struct reftable_backend *be;
+ struct write_reflog_existence_arg arg = {
+ .refs = refs,
+ .refname = refname,
+ };
+ int ret;
+
+ ret = refs->err;
+ if (ret < 0)
+ goto done;
+
+ ret = backend_for(&be, refs, refname, &refname, 1);
+ if (ret)
+ goto done;
+ arg.stack = be->stack;
+
+ ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg);
+
+done:
+ return ret;
+}
+
+struct write_reflog_delete_arg {
+ struct reftable_stack *stack;
+ const char *refname;
+};
+
+static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
+{
+ struct write_reflog_delete_arg *arg = cb_data;
+ struct reftable_log_record log = {0}, tombstone = {0};
+ struct reftable_iterator it = {0};
+ uint64_t ts = reftable_stack_next_update_index(arg->stack);
+ int ret;
+
+ ret = reftable_writer_set_limits(writer, ts, ts);
+ if (ret < 0)
+ goto out;
+
+ ret = reftable_stack_init_log_iterator(arg->stack, &it);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * In order to delete a table we need to delete all reflog entries one
+ * by one. This is inefficient, but the reftable format does not have a
+ * better marker right now.
+ */
+ ret = reftable_iterator_seek_log(&it, arg->refname);
+ while (ret == 0) {
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ break;
+ if (ret > 0 || strcmp(log.refname, arg->refname)) {
+ ret = 0;
+ break;
+ }
+
+ tombstone.refname = (char *)arg->refname;
+ tombstone.value_type = REFTABLE_LOG_DELETION;
+ tombstone.update_index = log.update_index;
+
+ ret = reftable_writer_add_log(writer, &tombstone);
+ }
+
+out:
+ reftable_log_record_release(&log);
+ reftable_iterator_destroy(&it);
+ return ret;
+}
+
+static int reftable_be_delete_reflog(struct ref_store *ref_store,
+ const char *refname)
+{
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
+ struct reftable_backend *be;
+ struct write_reflog_delete_arg arg = {
+ .refname = refname,
+ };
+ int ret;
+
+ ret = backend_for(&be, refs, refname, &refname, 1);
+ if (ret)
+ return ret;
+ arg.stack = be->stack;
+
+ ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg);
+
+ assert(ret != REFTABLE_API_ERROR);
+ return ret;
+}
+
+struct reflog_expiry_arg {
+ struct reftable_ref_store *refs;
+ struct reftable_stack *stack;
+ struct reftable_log_record *records;
+ struct object_id update_oid;
+ const char *refname;
+ size_t len;
+};
+
+static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
+{
+ struct reflog_expiry_arg *arg = cb_data;
+ uint64_t ts = reftable_stack_next_update_index(arg->stack);
+ uint64_t live_records = 0;
+ size_t i;
+ int ret;
+
+ for (i = 0; i < arg->len; i++)
+ if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
+ live_records++;
+
+ ret = reftable_writer_set_limits(writer, ts, ts);
+ if (ret < 0)
+ return ret;
+
+ if (!is_null_oid(&arg->update_oid)) {
+ struct reftable_ref_record ref = {0};
+ struct object_id peeled;
+
+ ref.refname = (char *)arg->refname;
+ ref.update_index = ts;
+
+ if (!peel_object(arg->refs->base.repo, &arg->update_oid, &peeled)) {
+ ref.value_type = REFTABLE_REF_VAL2;
+ memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
+ memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
+ } else {
+ ref.value_type = REFTABLE_REF_VAL1;
+ memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
+ }
+
+ ret = reftable_writer_add_ref(writer, &ref);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * When there are no more entries left in the reflog we empty it
+ * completely, but write a placeholder reflog entry that indicates that
+ * the reflog still exists.
+ */
+ if (!live_records) {
+ struct reftable_log_record log = {
+ .refname = (char *)arg->refname,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .update_index = ts,
+ };
+
+ ret = reftable_writer_add_log(writer, &log);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < arg->len; i++) {
+ ret = reftable_writer_add_log(writer, &arg->records[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int reftable_be_reflog_expire(struct ref_store *ref_store,
+ const char *refname,
+ unsigned int flags,
+ reflog_expiry_prepare_fn prepare_fn,
+ reflog_expiry_should_prune_fn should_prune_fn,
+ reflog_expiry_cleanup_fn cleanup_fn,
+ void *policy_cb_data)
+{
+ /*
+ * For log expiry, we write tombstones for every single reflog entry
+ * that is to be expired. This means that the entries are still
+ * retrievable by delving into the stack, and expiring entries
+ * paradoxically takes extra memory. This memory is only reclaimed when
+ * compacting the reftable stack.
+ *
+ * It would be better if the refs backend supported an API that sets a
+ * criterion for all refs, passing the criterion to pack_refs().
+ *
+ * On the plus side, because we do the expiration per ref, we can easily
+ * insert the reflog existence dummies.
+ */
+ struct reftable_ref_store *refs =
+ reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
+ struct reftable_log_record *logs = NULL;
+ struct reftable_log_record *rewritten = NULL;
+ struct reftable_iterator it = {0};
+ struct reftable_addition *add = NULL;
+ struct reflog_expiry_arg arg = {0};
+ struct reftable_backend *be;
+ struct object_id oid = {0};
+ struct strbuf referent = STRBUF_INIT;
+ uint8_t *last_hash = NULL;
+ size_t logs_nr = 0, logs_alloc = 0, i;
+ unsigned int type = 0;
+ int ret;
+
+ if (refs->err < 0)
+ return refs->err;
+
+ ret = backend_for(&be, refs, refname, &refname, 1);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_init_log_iterator(be->stack, &it);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_iterator_seek_log(&it, refname);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_stack_new_addition(&add, be->stack, 0);
+ if (ret < 0)
+ goto done;
+
+ ret = reftable_backend_read_ref(be, refname, &oid, &referent, &type);
+ if (ret < 0)
+ goto done;
+ prepare_fn(refname, &oid, policy_cb_data);
+
+ while (1) {
+ struct reftable_log_record log = {0};
+ struct object_id old_oid, new_oid;
+
+ ret = reftable_iterator_next_log(&it, &log);
+ if (ret < 0)
+ goto done;
+ if (ret > 0 || strcmp(log.refname, refname)) {
+ reftable_log_record_release(&log);
+ break;
+ }
+
+ oidread(&old_oid, log.value.update.old_hash,
+ ref_store->repo->hash_algo);
+ oidread(&new_oid, log.value.update.new_hash,
+ ref_store->repo->hash_algo);
+
+ /*
+ * Skip over the reflog existence marker. We will add it back
+ * in when there are no live reflog records.
+ */
+ if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
+ reftable_log_record_release(&log);
+ continue;
+ }
+
+ ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
+ logs[logs_nr++] = log;
+ }
+
+ /*
+ * We need to rewrite all reflog entries according to the pruning
+ * callback function:
+ *
+ * - If a reflog entry shall be pruned we mark the record for
+ * deletion.
+ *
+ * - Otherwise we may have to rewrite the chain of reflog entries so
+ * that gaps created by just-deleted records get backfilled.
+ */
+ CALLOC_ARRAY(rewritten, logs_nr);
+ for (i = logs_nr; i--;) {
+ struct reftable_log_record *dest = &rewritten[i];
+ struct object_id old_oid, new_oid;
+
+ *dest = logs[i];
+ oidread(&old_oid, logs[i].value.update.old_hash,
+ ref_store->repo->hash_algo);
+ oidread(&new_oid, logs[i].value.update.new_hash,
+ ref_store->repo->hash_algo);
+
+ if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
+ (timestamp_t)logs[i].value.update.time,
+ logs[i].value.update.tz_offset,
+ logs[i].value.update.message,
+ policy_cb_data)) {
+ dest->value_type = REFTABLE_LOG_DELETION;
+ } else {
+ if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
+ memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
+ last_hash = logs[i].value.update.new_hash;
+ }
+ }
+
+ if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash && !is_null_oid(&oid))
+ oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
+
+ arg.refs = refs;
+ arg.records = rewritten;
+ arg.len = logs_nr;
+ arg.stack = be->stack;
+ arg.refname = refname;
+
+ ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * Future improvement: we could skip writing records that were
+ * not changed.
+ */
+ if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
+ ret = reftable_addition_commit(add);
+
+done:
+ if (add)
+ cleanup_fn(policy_cb_data);
+ assert(ret != REFTABLE_API_ERROR);
+
+ reftable_iterator_destroy(&it);
+ reftable_addition_destroy(add);
+ for (i = 0; i < logs_nr; i++)
+ reftable_log_record_release(&logs[i]);
+ strbuf_release(&referent);
+ free(logs);
+ free(rewritten);
+ return ret;
+}
+
+static int reftable_be_fsck(struct ref_store *ref_store UNUSED,
+ struct fsck_options *o UNUSED,
+ struct worktree *wt UNUSED)
+{
+ return 0;
+}
+
+struct ref_storage_be refs_be_reftable = {
+ .name = "reftable",
+ .init = reftable_be_init,
+ .release = reftable_be_release,
+ .create_on_disk = reftable_be_create_on_disk,
+ .remove_on_disk = reftable_be_remove_on_disk,
+
+ .transaction_prepare = reftable_be_transaction_prepare,
+ .transaction_finish = reftable_be_transaction_finish,
+ .transaction_abort = reftable_be_transaction_abort,
+
+ .pack_refs = reftable_be_pack_refs,
+ .rename_ref = reftable_be_rename_ref,
+ .copy_ref = reftable_be_copy_ref,
+
+ .iterator_begin = reftable_be_iterator_begin,
+ .read_raw_ref = reftable_be_read_raw_ref,
+ .read_symbolic_ref = reftable_be_read_symbolic_ref,
+
+ .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
+ .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
+ .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
+ .reflog_exists = reftable_be_reflog_exists,
+ .create_reflog = reftable_be_create_reflog,
+ .delete_reflog = reftable_be_delete_reflog,
+ .reflog_expire = reftable_be_reflog_expire,
+
+ .fsck = reftable_be_fsck,
+};